Commit
·
744a1fc
1
Parent(s):
0098e32
Move all to gallery
Browse files
app.py
CHANGED
|
@@ -29,20 +29,34 @@ def text2image_latent(text,steps,width,height,images,diversity):
|
|
| 29 |
image_path = f'{temp_dir}/{url}.png'
|
| 30 |
img.save(f'{temp_dir}/{url}.png')
|
| 31 |
image_paths.append(image_path)
|
| 32 |
-
return(
|
| 33 |
|
| 34 |
def text2image_rudalle(text,aspect,model):
|
| 35 |
image = rudalle(text,aspect,model)[0]
|
| 36 |
-
return(image)
|
| 37 |
|
| 38 |
def text2image_vqgan(text,width,height,style,steps,flavor):
|
| 39 |
results = vqgan(text,width,height,style,steps,flavor)
|
| 40 |
-
return(results)
|
| 41 |
|
| 42 |
def text2image_diffusion(steps_diff, images_diff, weight, clip):
|
| 43 |
results = diffusion(steps_diff, images_diff, weight, clip)
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
css_mt = {"margin-top": "1em"}
|
| 48 |
|
|
@@ -85,13 +99,13 @@ with gr.Blocks() as mindseye:
|
|
| 85 |
get_image_diffusion = gr.button("Generate Image",css=css_mt)
|
| 86 |
with gr.Row():
|
| 87 |
with gr.Tabs():
|
| 88 |
-
with gr.TabItem("Image output"):
|
| 89 |
-
|
| 90 |
with gr.TabItem("Gallery output"):
|
| 91 |
gallery = gr.Gallery(label="Individual images")
|
| 92 |
|
| 93 |
-
get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=
|
| 94 |
-
get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=
|
| 95 |
-
get_image_vqgan.click(text2image_vqgan, inputs=[text,width_vq,height_vq,style,steps,flavor],outputs=
|
| 96 |
get_image_diffusion.click(text2image_diffusion, inputs=[steps_diff, images_diff, weight, clip],outputs=gallery)
|
| 97 |
mindseye.launch()
|
|
|
|
| 29 |
image_path = f'{temp_dir}/{url}.png'
|
| 30 |
img.save(f'{temp_dir}/{url}.png')
|
| 31 |
image_paths.append(image_path)
|
| 32 |
+
return(image_paths)
|
| 33 |
|
| 34 |
def text2image_rudalle(text,aspect,model):
|
| 35 |
image = rudalle(text,aspect,model)[0]
|
| 36 |
+
return([image])
|
| 37 |
|
| 38 |
def text2image_vqgan(text,width,height,style,steps,flavor):
|
| 39 |
results = vqgan(text,width,height,style,steps,flavor)
|
| 40 |
+
return([results])
|
| 41 |
|
| 42 |
def text2image_diffusion(steps_diff, images_diff, weight, clip):
|
| 43 |
results = diffusion(steps_diff, images_diff, weight, clip)
|
| 44 |
+
image_paths = []
|
| 45 |
+
image_arrays = []
|
| 46 |
+
for image in results:
|
| 47 |
+
image_str = image[0]
|
| 48 |
+
image_str = image_str.replace("data:image/png;base64,","")
|
| 49 |
+
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
|
| 50 |
+
img = Image.open(io.BytesIO(decoded_bytes))
|
| 51 |
+
#image_arrays.append(numpy.asarray(img))
|
| 52 |
+
url = shortuuid.uuid()
|
| 53 |
+
temp_dir = './tmp'
|
| 54 |
+
if not os.path.exists(temp_dir):
|
| 55 |
+
os.makedirs(temp_dir, exist_ok=True)
|
| 56 |
+
image_path = f'{temp_dir}/{url}.png'
|
| 57 |
+
img.save(f'{temp_dir}/{url}.png')
|
| 58 |
+
image_paths.append(image_path)
|
| 59 |
+
return(image_paths)
|
| 60 |
|
| 61 |
css_mt = {"margin-top": "1em"}
|
| 62 |
|
|
|
|
| 99 |
get_image_diffusion = gr.button("Generate Image",css=css_mt)
|
| 100 |
with gr.Row():
|
| 101 |
with gr.Tabs():
|
| 102 |
+
#with gr.TabItem("Image output"):
|
| 103 |
+
# image = gr.outputs.Image()
|
| 104 |
with gr.TabItem("Gallery output"):
|
| 105 |
gallery = gr.Gallery(label="Individual images")
|
| 106 |
|
| 107 |
+
get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=gallery)
|
| 108 |
+
get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=gallery)
|
| 109 |
+
get_image_vqgan.click(text2image_vqgan, inputs=[text,width_vq,height_vq,style,steps,flavor],outputs=gallery)
|
| 110 |
get_image_diffusion.click(text2image_diffusion, inputs=[steps_diff, images_diff, weight, clip],outputs=gallery)
|
| 111 |
mindseye.launch()
|