AkiKagura commited on
Commit
c4cbaf2
1 Parent(s): 6b04d1e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -0
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ #from torch import autocast // only for GPU
4
+
5
+ from PIL import Image
6
+ import numpy as np
7
+ from io import BytesIO
8
+ import os
9
+ MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
10
+
11
+ #from diffusers import StableDiffusionPipeline
12
+ from diffusers import StableDiffusionImg2ImgPipeline
13
+
14
+ print("hello")
15
+
16
+ YOUR_TOKEN=MY_SECRET_TOKEN
17
+
18
+ device="cpu"
19
+
20
+ img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("AkiKagura/mkgen-diffusion", duse_auth_token=YOUR_TOKEN)
21
+ img_pipe.to(device)
22
+
23
+ source_img = gr.Image(source="upload", type="filepath", label="init_img | 512*512 px")
24
+ gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[1], height="auto")
25
+
26
+ def resize(value,img):
27
+ #baseheight = value
28
+ img = Image.open(img)
29
+ #hpercent = (baseheight/float(img.size[1]))
30
+ #wsize = int((float(img.size[0])*float(hpercent)))
31
+ #img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
32
+ img = img.resize((value,value), Image.Resampling.LANCZOS)
33
+ return img
34
+
35
+
36
+ def infer(source_img, prompt, guide, steps, seed, strength):
37
+ generator = torch.Generator('cpu').manual_seed(seed)
38
+
39
+ source_image = resize(512, source_img)
40
+ source_image.save('source.png')
41
+
42
+ images_list = img_pipe([prompt] * 1, init_image=source_image, strength=strength, guidance_scale=guide, num_inference_steps=steps)
43
+ images = []
44
+
45
+ for i, image in enumerate(images_list["images"]):
46
+ images.append(image)
47
+ return images
48
+
49
+ print("done")
50
+
51
+ title="Marco Generation Img2ing"
52
+ description="<p style='text-align: center;'>Upload your image and input 'mkmk woman' to get Marco image. <br />Warning: Slow process... about 10 min inference time.</p>"
53
+
54
+ gr.Interface(fn=infer, inputs=[source_img,
55
+ "text",
56
+ gr.Slider(2, 15, value = 7, label = 'Guidence Scale'),
57
+ gr.Slider(10, 50, value = 25, step = 1, label = 'Number of Iterations'),
58
+ gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True),
59
+ gr.Slider(label='Strength', minimum = 0, maximum = 1, step = .05, value = .75)],
60
+ outputs=gallery,title=title,description=description, allow_flagging="manual", flagging_dir="flagged").queue(max_size=100).launch(enable_queue=True)