kottu commited on
Commit
265bfb8
β€’
1 Parent(s): 39bb3ab

Create app_base.py

Browse files
Files changed (1) hide show
  1. app_base.py +274 -0
app_base.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ import PIL.Image
5
+ from diffusers.utils import load_image
6
+
7
+ from model import ADAPTER_NAMES, Model
8
+ from utils import (
9
+ DEFAULT_STYLE_NAME,
10
+ MAX_SEED,
11
+ STYLE_NAMES,
12
+ apply_style,
13
+ randomize_seed_fn,
14
+ )
15
+
16
+ CACHE_EXAMPLES = os.environ.get("CACHE_EXAMPLES") == "1"
17
+
18
+
19
+ def create_demo(model: Model) -> gr.Blocks:
20
+ def run(
21
+ image: PIL.Image.Image,
22
+ prompt: str,
23
+ negative_prompt: str,
24
+ adapter_name: str,
25
+ style_name: str = DEFAULT_STYLE_NAME,
26
+ num_inference_steps: int = 30,
27
+ guidance_scale: float = 5.0,
28
+ adapter_conditioning_scale: float = 1.0,
29
+ adapter_conditioning_factor: float = 1.0,
30
+ seed: int = 0,
31
+ apply_preprocess: bool = True,
32
+ progress=gr.Progress(track_tqdm=True),
33
+ ) -> list[PIL.Image.Image]:
34
+ prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
35
+
36
+ return model.run(
37
+ image=image,
38
+ prompt=prompt,
39
+ negative_prompt=negative_prompt,
40
+ adapter_name=adapter_name,
41
+ num_inference_steps=num_inference_steps,
42
+ guidance_scale=guidance_scale,
43
+ adapter_conditioning_scale=adapter_conditioning_scale,
44
+ adapter_conditioning_factor=adapter_conditioning_factor,
45
+ seed=seed,
46
+ apply_preprocess=apply_preprocess,
47
+ )
48
+
49
+ def process_example(
50
+ image_url: str,
51
+ prompt: str,
52
+ adapter_name: str,
53
+ guidance_scale: float,
54
+ adapter_conditioning_scale: float,
55
+ seed: int,
56
+ apply_preprocess: bool,
57
+ ) -> list[PIL.Image.Image]:
58
+ image = load_image(image_url)
59
+ return run(
60
+ image=image,
61
+ prompt=prompt,
62
+ negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured",
63
+ adapter_name=adapter_name,
64
+ style_name="(No style)",
65
+ guidance_scale=guidance_scale,
66
+ adapter_conditioning_scale=adapter_conditioning_scale,
67
+ seed=seed,
68
+ apply_preprocess=apply_preprocess,
69
+ )
70
+
71
+ examples = [
72
+ [
73
+ "assets/org_canny.jpg",
74
+ "Mystical fairy in real, magic, 4k picture, high quality",
75
+ "canny",
76
+ 7.5,
77
+ 0.75,
78
+ 42,
79
+ True,
80
+ ],
81
+ [
82
+ "assets/org_sketch.png",
83
+ "a robot, mount fuji in the background, 4k photo, highly detailed",
84
+ "sketch",
85
+ 7.5,
86
+ 1.0,
87
+ 42,
88
+ True,
89
+ ],
90
+ [
91
+ "assets/org_lin.jpg",
92
+ "Ice dragon roar, 4k photo",
93
+ "lineart",
94
+ 7.5,
95
+ 0.8,
96
+ 42,
97
+ True,
98
+ ],
99
+ [
100
+ "assets/org_mid.jpg",
101
+ "A photo of a room, 4k photo, highly detailed",
102
+ "depth-midas",
103
+ 7.5,
104
+ 1.0,
105
+ 42,
106
+ True,
107
+ ],
108
+ [
109
+ "assets/org_zoe.jpg",
110
+ "A photo of a orchid, 4k photo, highly detailed",
111
+ "depth-zoe",
112
+ 5.0,
113
+ 1.0,
114
+ 42,
115
+ True,
116
+ ],
117
+ [
118
+ "assets/people.jpg",
119
+ "A couple, 4k photo, highly detailed",
120
+ "openpose",
121
+ 5.0,
122
+ 1.0,
123
+ 42,
124
+ True,
125
+ ],
126
+ [
127
+ "assets/depth-midas-image.png",
128
+ "stormtrooper lecture, 4k photo, highly detailed",
129
+ "depth-midas",
130
+ 7.5,
131
+ 1.0,
132
+ 42,
133
+ False,
134
+ ],
135
+ [
136
+ "assets/openpose-image.png",
137
+ "spiderman, 4k photo, highly detailed",
138
+ "openpose",
139
+ 5.0,
140
+ 1.0,
141
+ 42,
142
+ False,
143
+ ],
144
+ ]
145
+
146
+ with gr.Blocks() as demo:
147
+ with gr.Row():
148
+ with gr.Column():
149
+ with gr.Group():
150
+ image = gr.Image(label="Input image", type="pil", height=600)
151
+ prompt = gr.Textbox(label="Prompt")
152
+ with gr.Row():
153
+ adapter_name = gr.Dropdown(label="Adapter name", choices=ADAPTER_NAMES, value=ADAPTER_NAMES[0])
154
+ style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
155
+ run_button = gr.Button("Run")
156
+ with gr.Accordion("Advanced options", open=False):
157
+ apply_preprocess = gr.Checkbox(label="Apply preprocess", value=True)
158
+ negative_prompt = gr.Textbox(
159
+ label="Negative prompt",
160
+ value=" extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured",
161
+ )
162
+ num_inference_steps = gr.Slider(
163
+ label="Number of steps",
164
+ minimum=1,
165
+ maximum=Model.MAX_NUM_INFERENCE_STEPS,
166
+ step=1,
167
+ value=25,
168
+ )
169
+ guidance_scale = gr.Slider(
170
+ label="Guidance scale",
171
+ minimum=0.1,
172
+ maximum=30.0,
173
+ step=0.1,
174
+ value=5.0,
175
+ )
176
+ adapter_conditioning_scale = gr.Slider(
177
+ label="Adapter conditioning scale",
178
+ minimum=0.5,
179
+ maximum=1,
180
+ step=0.1,
181
+ value=1.0,
182
+ )
183
+ adapter_conditioning_factor = gr.Slider(
184
+ label="Adapter conditioning factor",
185
+ info="Fraction of timesteps for which adapter should be applied",
186
+ minimum=0.5,
187
+ maximum=1.0,
188
+ step=0.1,
189
+ value=1.0,
190
+ )
191
+ seed = gr.Slider(
192
+ label="Seed",
193
+ minimum=0,
194
+ maximum=MAX_SEED,
195
+ step=1,
196
+ value=42,
197
+ )
198
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=False)
199
+ with gr.Column():
200
+ result = gr.Gallery(label="Result", columns=2, height=600, object_fit="scale-down", show_label=False)
201
+
202
+ gr.Examples(
203
+ examples=examples,
204
+ inputs=[
205
+ image,
206
+ prompt,
207
+ adapter_name,
208
+ guidance_scale,
209
+ adapter_conditioning_scale,
210
+ seed,
211
+ apply_preprocess,
212
+ ],
213
+ outputs=result,
214
+ fn=process_example,
215
+ cache_examples=CACHE_EXAMPLES,
216
+ )
217
+
218
+ inputs = [
219
+ image,
220
+ prompt,
221
+ negative_prompt,
222
+ adapter_name,
223
+ style,
224
+ num_inference_steps,
225
+ guidance_scale,
226
+ adapter_conditioning_scale,
227
+ adapter_conditioning_factor,
228
+ seed,
229
+ apply_preprocess,
230
+ ]
231
+ prompt.submit(
232
+ fn=randomize_seed_fn,
233
+ inputs=[seed, randomize_seed],
234
+ outputs=seed,
235
+ queue=False,
236
+ api_name=False,
237
+ ).then(
238
+ fn=run,
239
+ inputs=inputs,
240
+ outputs=result,
241
+ api_name=False,
242
+ )
243
+ negative_prompt.submit(
244
+ fn=randomize_seed_fn,
245
+ inputs=[seed, randomize_seed],
246
+ outputs=seed,
247
+ queue=False,
248
+ api_name=False,
249
+ ).then(
250
+ fn=run,
251
+ inputs=inputs,
252
+ outputs=result,
253
+ api_name=False,
254
+ )
255
+ run_button.click(
256
+ fn=randomize_seed_fn,
257
+ inputs=[seed, randomize_seed],
258
+ outputs=seed,
259
+ queue=False,
260
+ api_name=False,
261
+ ).then(
262
+ fn=run,
263
+ inputs=inputs,
264
+ outputs=result,
265
+ api_name="run",
266
+ )
267
+
268
+ return demo
269
+
270
+
271
+ if __name__ == "__main__":
272
+ model = Model(ADAPTER_NAMES[0])
273
+ demo = create_demo(model)
274
+ demo.queue(max_size=20).launch()