sagar007 commited on
Commit
1d7ab4c
1 Parent(s): 9452c41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -26
app.py CHANGED
@@ -13,10 +13,6 @@ import yaml
13
  with open('config.yaml', 'r') as file:
14
  config = yaml.safe_load(file)
15
 
16
- # Debug: Print the structure of the config
17
- print("Config structure:")
18
- print(yaml.dump(config, default_flow_style=False))
19
-
20
  # Authenticate using the token stored in Hugging Face Spaces secrets
21
  if 'HF_TOKEN' in os.environ:
22
  login(token=os.environ['HF_TOKEN'])
@@ -24,22 +20,12 @@ else:
24
  raise ValueError("HF_TOKEN not found in environment variables. Please add it to your Space's secrets.")
25
 
26
  # Correctly access the config values
27
- try:
28
- base_model = config['config']['model']['name_or_path']
29
- except KeyError:
30
- print("Could not find 'config' -> 'model' -> 'name_or_path' in the config file.")
31
- print("Available keys in config:", list(config.keys()))
32
- if 'config' in config:
33
- print("Available keys in config['config']:", list(config['config'].keys()))
34
- base_model = "FLUX.1-dev" # Default value
35
 
 
36
  lora_model = "sagar007/sagar_flux" # This isn't in the config, so we're keeping it as is
 
37
 
38
- try:
39
- trigger_word = config['config']['trigger_word']
40
- except KeyError:
41
- print("Could not find 'config' -> 'trigger_word' in the config file.")
42
- trigger_word = "sagar"
43
  # Global variables
44
  pipe = None
45
  cache = {}
@@ -143,9 +129,9 @@ load_cache()
143
  # Pre-generate and cache example images
144
  def cache_example_images():
145
  for prompt in example_prompts:
146
- run_lora(prompt, config['config']['sample']['guidance_scale'], config['config']['sample']['sample_steps'],
147
- config['config']['sample']['walk_seed'], config['config']['sample']['seed'],
148
- config['config']['sample']['width'], config['config']['sample']['height'], 0.75)
149
 
150
  # Gradio interface setup
151
  with gr.Blocks() as app:
@@ -158,14 +144,14 @@ with gr.Blocks() as app:
158
  with gr.Column():
159
  result = gr.Image(label="Result")
160
  with gr.Row():
161
- cfg_scale = gr.Slider(minimum=1, maximum=20, value=config['config']['sample']['guidance_scale'], step=0.1, label="CFG Scale")
162
- steps = gr.Slider(minimum=1, maximum=100, value=config['config']['sample']['sample_steps'], step=1, label="Steps")
163
  with gr.Row():
164
- width = gr.Slider(minimum=128, maximum=1024, value=config['config']['sample']['width'], step=64, label="Width")
165
- height = gr.Slider(minimum=128, maximum=1024, value=config['config']['sample']['height'], step=64, label="Height")
166
  with gr.Row():
167
- seed = gr.Number(label="Seed", value=config['config']['sample']['seed'], precision=0)
168
- randomize_seed = gr.Checkbox(label="Randomize seed", value=config['config']['sample']['walk_seed'])
169
  lora_scale = gr.Slider(minimum=0, maximum=1, value=0.75, step=0.01, label="LoRA Scale")
170
 
171
  example_dropdown.change(update_prompt, inputs=[example_dropdown], outputs=[prompt])
 
13
  with open('config.yaml', 'r') as file:
14
  config = yaml.safe_load(file)
15
 
 
 
 
 
16
  # Authenticate using the token stored in Hugging Face Spaces secrets
17
  if 'HF_TOKEN' in os.environ:
18
  login(token=os.environ['HF_TOKEN'])
 
20
  raise ValueError("HF_TOKEN not found in environment variables. Please add it to your Space's secrets.")
21
 
22
  # Correctly access the config values
23
+ process_config = config['config']['process'][0] # Assuming the first process is the one we want
 
 
 
 
 
 
 
24
 
25
+ base_model = process_config['model']['name_or_path']
26
  lora_model = "sagar007/sagar_flux" # This isn't in the config, so we're keeping it as is
27
+ trigger_word = process_config['trigger_word']
28
 
 
 
 
 
 
29
  # Global variables
30
  pipe = None
31
  cache = {}
 
129
  # Pre-generate and cache example images
130
  def cache_example_images():
131
  for prompt in example_prompts:
132
+ run_lora(prompt, process_config['sample']['guidance_scale'], process_config['sample']['sample_steps'],
133
+ process_config['sample']['walk_seed'], process_config['sample']['seed'],
134
+ process_config['sample']['width'], process_config['sample']['height'], 0.75)
135
 
136
  # Gradio interface setup
137
  with gr.Blocks() as app:
 
144
  with gr.Column():
145
  result = gr.Image(label="Result")
146
  with gr.Row():
147
+ cfg_scale = gr.Slider(minimum=1, maximum=20, value=process_config['sample']['guidance_scale'], step=0.1, label="CFG Scale")
148
+ steps = gr.Slider(minimum=1, maximum=100, value=process_config['sample']['sample_steps'], step=1, label="Steps")
149
  with gr.Row():
150
+ width = gr.Slider(minimum=128, maximum=1024, value=process_config['sample']['width'], step=64, label="Width")
151
+ height = gr.Slider(minimum=128, maximum=1024, value=process_config['sample']['height'], step=64, label="Height")
152
  with gr.Row():
153
+ seed = gr.Number(label="Seed", value=process_config['sample']['seed'], precision=0)
154
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=process_config['sample']['walk_seed'])
155
  lora_scale = gr.Slider(minimum=0, maximum=1, value=0.75, step=0.01, label="LoRA Scale")
156
 
157
  example_dropdown.change(update_prompt, inputs=[example_dropdown], outputs=[prompt])