Yw22 commited on
Commit
39598c2
1 Parent(s): 0a08da9
Files changed (2) hide show
  1. app.py +21 -3
  2. requirements.txt +1 -1
app.py CHANGED
@@ -181,10 +181,28 @@ image_examples = [
181
  json.load(open("__asset__/trajs/camera/painting-1.json")),
182
  "__asset__/images/camera/painting-1.jpg",
183
  ],
184
-
185
  ]
186
 
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
  DREAM_BOOTH = {
190
  'HelloObject': 'models/personalized/helloobjects_V12c.safetensors',
@@ -587,13 +605,13 @@ with block as demo:
587
  with gr.Row():
588
  def process_example(input_image, prompt, drag_mode, seed, personalized, tracking_points, first_frame_path):
589
 
590
- return input_image, prompt, drag_mode, seed, personalized
591
 
592
  example = gr.Examples(
593
  label="Input Example",
594
  examples=image_examples,
595
  inputs=[input_image, prompt, drag_mode, seed, personalized, tracking_points, first_frame_path],
596
- outputs=[input_image, prompt, drag_mode, seed, personalized],
597
  fn=process_example,
598
  run_on_click=True,
599
  examples_per_page=10,
 
181
  json.load(open("__asset__/trajs/camera/painting-1.json")),
182
  "__asset__/images/camera/painting-1.jpg",
183
  ],
 
184
  ]
185
 
186
 
187
+ # POINTS = {
188
+ # 'turtle': "__asset__/trajs/object/turtle-1.json",
189
+ # 'rose': "__asset__/trajs/object/rose-1.json",
190
+ # 'jellyfish': "__asset__/trajs/object/jellyfish-1.json",
191
+ # 'lsuh': "__asset__/trajs/camera/lush-1.json",
192
+ # 'tusun': "__asset__/trajs/camera/tusun-1.json",
193
+ # 'painting': "__asset__/trajs/camera/painting-1.json",
194
+ # }
195
+
196
+ # IMAGE_PATH = {
197
+ # 'turtle': "__asset__/images/object/turtle-1.jpg",
198
+ # 'rose': "__asset__/images/object/rose-1.jpg",
199
+ # 'jellyfish': "__asset__/images/object/jellyfish-1.jpg",
200
+ # 'lsuh': "__asset__/images/camera/lush-1.jpg",
201
+ # 'tusun': "__asset__/images/camera/tusun-1.jpg",
202
+ # 'painting': "__asset__/images/camera/painting-1.jpg",
203
+ # }
204
+
205
+
206
 
207
  DREAM_BOOTH = {
208
  'HelloObject': 'models/personalized/helloobjects_V12c.safetensors',
 
605
  with gr.Row():
606
  def process_example(input_image, prompt, drag_mode, seed, personalized, tracking_points, first_frame_path):
607
 
608
+ return input_image, prompt, drag_mode, seed, personalized, tracking_points, first_frame_path
609
 
610
  example = gr.Examples(
611
  label="Input Example",
612
  examples=image_examples,
613
  inputs=[input_image, prompt, drag_mode, seed, personalized, tracking_points, first_frame_path],
614
+ outputs=[input_image, prompt, drag_mode, seed, personalized, tracking_points, first_frame_path],
615
  fn=process_example,
616
  run_on_click=True,
617
  examples_per_page=10,
requirements.txt CHANGED
@@ -2,7 +2,7 @@ torch
2
  torchvision
3
  torchaudio
4
  transformers==4.32.1
5
- gradio==4.37.2
6
  ftfy
7
  tensorboard
8
  datasets
 
2
  torchvision
3
  torchaudio
4
  transformers==4.32.1
5
+ gradio==4.0.0
6
  ftfy
7
  tensorboard
8
  datasets