ujin-song commited on
Commit
12b9fab
1 Parent(s): 9e3e754

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -53,8 +53,9 @@ def generate(region1_concept,
53
  region1 = pose_image['region1']
54
  region2 = pose_image['region2']
55
 
56
- region1_prompt = f'[<{region1_concept}1> <{region1_concept}2>, {region1_prompt}]'
57
- region2_prompt = f'[<{region2_concept}1> <{region2_concept}2>, {region2_prompt}]'
 
58
  prompt_rewrite=f"{region1_prompt}-*-{region_neg_prompt}-*-{region1}|{region2_prompt}-*-{region_neg_prompt}-*-{region2}"
59
  print(prompt_rewrite)
60
 
@@ -117,6 +118,7 @@ def merge(concept1, concept2):
117
  modelbase_path = os.path.join(save_path,'combined_model_base')
118
  assert os.path.isdir(modelbase_path)
119
 
 
120
  return modelbase_path
121
 
122
  def infer(pretrained_model,
@@ -180,19 +182,18 @@ def infer(pretrained_model,
180
 
181
  return image[0]
182
 
183
-
184
  def on_select(evt: gr.SelectData): # SelectData is a subclass of EventData
185
  return evt.value['image']['orig_name']
186
 
187
  examples_context = [
188
  'walking at Stanford university campus',
189
- 'in a castle',
190
  'in the forest',
191
- 'in front of Eiffel tower'
192
  ]
193
 
194
- examples_region1 = ['wearing red hat, high resolution, best quality']
195
- examples_region2 = ['smilling, wearing blue shirt, high resolution, best quality']
196
 
197
  with open('multi-concept/pose_data/pose.json') as f:
198
  d = json.load(f)
@@ -231,15 +232,15 @@ with gr.Blocks(css=css) as demo:
231
 
232
 
233
  with gr.Row():
234
-
235
- character_list = ["Elsa", "Moana", "Woody", "Rapunzel"]
236
  region1_concept = gr.Dropdown(
237
- character_list,
238
  label="Character 1",
239
  info="Will add more characters later!"
240
  )
241
  region2_concept = gr.Dropdown(
242
- character_list,
243
  label="Character 2",
244
  info="Will add more characters later!"
245
  )
@@ -337,7 +338,7 @@ with gr.Blocks(css=css) as demo:
337
 
338
  keypose_adaptor_weight = gr.Slider(
339
  label="Keypose Adapter Weight",
340
- minimum = 0,
341
  maximum = 1,
342
  step= 0.01,
343
  value=1.0,
 
53
  region1 = pose_image['region1']
54
  region2 = pose_image['region2']
55
 
56
+ region_pos_prompt = "high resolution, best quality, highly detailed, sharp focus, expressive, 8k uhd, detailed, sophisticated"
57
+ region1_prompt = f'<{region1_concept}1> <{region1_concept}2>, {region1_prompt}, {region_pos_prompt}'
58
+ region2_prompt = f'<{region2_concept}1> <{region2_concept}2>, {region2_prompt}, {region_pos_prompt}'
59
  prompt_rewrite=f"{region1_prompt}-*-{region_neg_prompt}-*-{region1}|{region2_prompt}-*-{region_neg_prompt}-*-{region2}"
60
  print(prompt_rewrite)
61
 
 
118
  modelbase_path = os.path.join(save_path,'combined_model_base')
119
  assert os.path.isdir(modelbase_path)
120
 
121
+ # save_path = 'experiments/multi-concept/elsa_moana_weight18/combined_model_base'
122
  return modelbase_path
123
 
124
  def infer(pretrained_model,
 
182
 
183
  return image[0]
184
 
 
185
  def on_select(evt: gr.SelectData): # SelectData is a subclass of EventData
186
  return evt.value['image']['orig_name']
187
 
188
  examples_context = [
189
  'walking at Stanford university campus',
190
+ 'in front of a castle',
191
  'in the forest',
192
+ 'in the style of cyberpunk'
193
  ]
194
 
195
+ examples_region1 = ['wearing a red hat']
196
+ examples_region2 = ['smiling, wearing a blue shirt']
197
 
198
  with open('multi-concept/pose_data/pose.json') as f:
199
  d = json.load(f)
 
232
 
233
 
234
  with gr.Row():
235
+
236
+ concept_list = ["Elsa", "Moana", "Woody", "Rapunzel", "Elastigirl"]
237
  region1_concept = gr.Dropdown(
238
+ concept_list,
239
  label="Character 1",
240
  info="Will add more characters later!"
241
  )
242
  region2_concept = gr.Dropdown(
243
+ concept_list,
244
  label="Character 2",
245
  info="Will add more characters later!"
246
  )
 
338
 
339
  keypose_adaptor_weight = gr.Slider(
340
  label="Keypose Adapter Weight",
341
+ minimum = 0.1,
342
  maximum = 1,
343
  step= 0.01,
344
  value=1.0,