upload v1673447112 model
Browse files
app.py
CHANGED
|
@@ -18,10 +18,10 @@ from huggingface_hub import from_pretrained_keras
|
|
| 18 |
|
| 19 |
PLACEHOLDER_TOKEN="<my-cat-token>"
|
| 20 |
|
| 21 |
-
MODEL_CKPT = "chansung/my-kitty@
|
| 22 |
MODEL = from_pretrained_keras(MODEL_CKPT)
|
| 23 |
|
| 24 |
-
head_sha = "
|
| 25 |
|
| 26 |
model = keras_cv.models.StableDiffusion(
|
| 27 |
img_width=img_width, img_height=img_height, jit_compile=True
|
|
@@ -135,6 +135,7 @@ def update_compute_options(provider, region):
|
|
| 135 |
)
|
| 136 |
|
| 137 |
def submit(
|
|
|
|
| 138 |
hf_token_input,
|
| 139 |
endpoint_name_input,
|
| 140 |
provider_selector,
|
|
@@ -157,7 +158,7 @@ def submit(
|
|
| 157 |
type = compute_resources[-1].strip()
|
| 158 |
|
| 159 |
payload = {
|
| 160 |
-
"accountId":
|
| 161 |
"compute": {
|
| 162 |
"accelerator": accelerator.lower(),
|
| 163 |
"instanceSize": size[1:],
|
|
@@ -219,7 +220,13 @@ with gr.Blocks() as hf_endpoint:
|
|
| 219 |
""")
|
| 220 |
|
| 221 |
gr.Markdown("""
|
| 222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
#### Your 🤗 Access Token
|
| 224 |
""")
|
| 225 |
hf_token_input = gr.Textbox(
|
|
@@ -276,7 +283,7 @@ with gr.Blocks() as hf_endpoint:
|
|
| 276 |
)
|
| 277 |
|
| 278 |
revision_selector = gr.Textbox(
|
| 279 |
-
value=f"
|
| 280 |
interactive=False,
|
| 281 |
show_label=False,
|
| 282 |
)
|
|
@@ -360,6 +367,7 @@ with gr.Blocks() as hf_endpoint:
|
|
| 360 |
submit_button.click(
|
| 361 |
submit,
|
| 362 |
inputs=[
|
|
|
|
| 363 |
hf_token_input,
|
| 364 |
endpoint_name_input,
|
| 365 |
provider_selector,
|
|
@@ -373,6 +381,46 @@ with gr.Blocks() as hf_endpoint:
|
|
| 373 |
security_selector],
|
| 374 |
outputs=status_txt)
|
| 375 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 376 |
gr.TabbedInterface(
|
| 377 |
[demoInterface, hf_endpoint], ["Playground", " Deploy on 🤗 Endpoint"]
|
| 378 |
).launch(enable_queue=True)
|
|
|
|
| 18 |
|
| 19 |
PLACEHOLDER_TOKEN="<my-cat-token>"
|
| 20 |
|
| 21 |
+
MODEL_CKPT = "chansung/my-kitty@v1673447112"
|
| 22 |
MODEL = from_pretrained_keras(MODEL_CKPT)
|
| 23 |
|
| 24 |
+
head_sha = "119d87285b1a7dda732e72de6028d576af17ef29"
|
| 25 |
|
| 26 |
model = keras_cv.models.StableDiffusion(
|
| 27 |
img_width=img_width, img_height=img_height, jit_compile=True
|
|
|
|
| 135 |
)
|
| 136 |
|
| 137 |
def submit(
|
| 138 |
+
hf_account_input,
|
| 139 |
hf_token_input,
|
| 140 |
endpoint_name_input,
|
| 141 |
provider_selector,
|
|
|
|
| 158 |
type = compute_resources[-1].strip()
|
| 159 |
|
| 160 |
payload = {
|
| 161 |
+
"accountId": hf_account_input.strip(),
|
| 162 |
"compute": {
|
| 163 |
"accelerator": accelerator.lower(),
|
| 164 |
"instanceSize": size[1:],
|
|
|
|
| 220 |
""")
|
| 221 |
|
| 222 |
gr.Markdown("""
|
| 223 |
+
#### Your 🤗 Account ID(Name)
|
| 224 |
+
""")
|
| 225 |
+
hf_account_input = gr.Textbox(
|
| 226 |
+
show_label=False,
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
gr.Markdown("""
|
| 230 |
#### Your 🤗 Access Token
|
| 231 |
""")
|
| 232 |
hf_token_input = gr.Textbox(
|
|
|
|
| 283 |
)
|
| 284 |
|
| 285 |
revision_selector = gr.Textbox(
|
| 286 |
+
value=f"v1673447112/{head_sha[:7]}",
|
| 287 |
interactive=False,
|
| 288 |
show_label=False,
|
| 289 |
)
|
|
|
|
| 367 |
submit_button.click(
|
| 368 |
submit,
|
| 369 |
inputs=[
|
| 370 |
+
hf_account_input,
|
| 371 |
hf_token_input,
|
| 372 |
endpoint_name_input,
|
| 373 |
provider_selector,
|
|
|
|
| 381 |
security_selector],
|
| 382 |
outputs=status_txt)
|
| 383 |
|
| 384 |
+
gr.Markdown("""
|
| 385 |
+
#### Pricing Table(CPU) - 2023/1/11
|
| 386 |
+
""")
|
| 387 |
+
|
| 388 |
+
gr.Dataframe(
|
| 389 |
+
headers=["provider", "size", "$/h", "vCPUs", "Memory", "Architecture"],
|
| 390 |
+
datatype=["str", "str", "str", "number", "str", "str"],
|
| 391 |
+
row_count=8,
|
| 392 |
+
col_count=(6, "fixed"),
|
| 393 |
+
value=[
|
| 394 |
+
["aws", "small", "$0.06", 1, "2GB", "Intel Xeon - Ice Lake"],
|
| 395 |
+
["aws", "medium", "$0.12", 2, "4GB", "Intel Xeon - Ice Lake"],
|
| 396 |
+
["aws", "large", "$0.24", 4, "8GB", "Intel Xeon - Ice Lake"],
|
| 397 |
+
["aws", "xlarge", "$0.48", 8, "16GB", "Intel Xeon - Ice Lake"],
|
| 398 |
+
["azure", "small", "$0.06", 1, "2GB", "Intel Xeon"],
|
| 399 |
+
["azure", "medium", "$0.12", 2, "4GB", "Intel Xeon"],
|
| 400 |
+
["azure", "large", "$0.24", 4, "8GB", "Intel Xeon"],
|
| 401 |
+
["azure", "xlarge", "$0.48", 8, "16GB", "Intel Xeon"],
|
| 402 |
+
]
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
gr.Markdown("""
|
| 406 |
+
#### Pricing Table(GPU) - 2023/1/11
|
| 407 |
+
""")
|
| 408 |
+
|
| 409 |
+
gr.Dataframe(
|
| 410 |
+
headers=["provider", "size", "$/h", "GPUs", "Memory", "Architecture"],
|
| 411 |
+
datatype=["str", "str", "str", "number", "str", "str"],
|
| 412 |
+
row_count=6,
|
| 413 |
+
col_count=(6, "fixed"),
|
| 414 |
+
value=[
|
| 415 |
+
["aws", "small", "$0.60", 1, "14GB", "NVIDIA T4"],
|
| 416 |
+
["aws", "medium", "$1.30", 1, "24GB", "NVIDIA A10G"],
|
| 417 |
+
["aws", "large", "$4.50", 4, "156B", "NVIDIA T4"],
|
| 418 |
+
["aws", "xlarge", "$6.50", 1, "80GB", "NVIDIA A100"],
|
| 419 |
+
["aws", "xxlarge", "$7.00", 4, "96GB", "NVIDIA A10G"],
|
| 420 |
+
["aws", "xxxlarge", "$45.0", 8, "640GB", "NVIDIA A100"],
|
| 421 |
+
]
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
gr.TabbedInterface(
|
| 425 |
[demoInterface, hf_endpoint], ["Playground", " Deploy on 🤗 Endpoint"]
|
| 426 |
).launch(enable_queue=True)
|