sagar007 commited on
Commit
3cabadc
1 Parent(s): 1d7ab4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -23
app.py CHANGED
@@ -8,6 +8,10 @@ from diffusers import DiffusionPipeline
8
  import hashlib
9
  import pickle
10
  import yaml
 
 
 
 
11
 
12
  # Load config file
13
  with open('config.yaml', 'r') as file:
@@ -16,16 +20,21 @@ with open('config.yaml', 'r') as file:
16
  # Authenticate using the token stored in Hugging Face Spaces secrets
17
  if 'HF_TOKEN' in os.environ:
18
  login(token=os.environ['HF_TOKEN'])
 
19
  else:
20
- raise ValueError("HF_TOKEN not found in environment variables. Please add it to your Space's secrets.")
21
 
22
  # Correctly access the config values
23
  process_config = config['config']['process'][0] # Assuming the first process is the one we want
24
 
25
- base_model = process_config['model']['name_or_path']
26
  lora_model = "sagar007/sagar_flux" # This isn't in the config, so we're keeping it as is
27
  trigger_word = process_config['trigger_word']
28
 
 
 
 
 
29
  # Global variables
30
  pipe = None
31
  cache = {}
@@ -46,15 +55,13 @@ def initialize_model():
46
  global pipe
47
  if pipe is None:
48
  try:
49
- print("Loading base model...")
50
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.float16)
51
- print("Moving model to CUDA...")
52
  pipe = pipe.to("cuda")
53
- print(f"Successfully loaded base model: {base_model}")
54
  except Exception as e:
55
- print(f"Error initializing model: {str(e)}")
56
- import traceback
57
- print(traceback.format_exc())
58
  raise
59
 
60
  def load_cache():
@@ -62,12 +69,12 @@ def load_cache():
62
  if os.path.exists(CACHE_FILE):
63
  with open(CACHE_FILE, 'rb') as f:
64
  cache = pickle.load(f)
65
- print(f"Loaded {len(cache)} cached images")
66
 
67
  def save_cache():
68
  with open(CACHE_FILE, 'wb') as f:
69
  pickle.dump(cache, f)
70
- print(f"Saved {len(cache)} cached images")
71
 
72
  def get_cache_key(prompt, cfg_scale, steps, seed, width, height, lora_scale):
73
  return hashlib.md5(f"{prompt}{cfg_scale}{steps}{seed}{width}{height}{lora_scale}".encode()).hexdigest()
@@ -82,23 +89,23 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
82
  cache_key = get_cache_key(prompt, cfg_scale, steps, seed, width, height, lora_scale)
83
 
84
  if cache_key in cache:
85
- print("Using cached image")
86
  return cache[cache_key], seed
87
 
88
  try:
89
- print(f"Starting run_lora with prompt: {prompt}")
90
  if pipe is None:
91
- print("Initializing model...")
92
  initialize_model()
93
 
94
- print(f"Using seed: {seed}")
95
 
96
  generator = torch.Generator(device="cuda").manual_seed(seed)
97
 
98
  full_prompt = f"{prompt} {trigger_word}"
99
- print(f"Full prompt: {full_prompt}")
100
 
101
- print("Starting image generation...")
102
  image = pipe(
103
  prompt=full_prompt,
104
  num_inference_steps=steps,
@@ -107,7 +114,7 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
107
  height=height,
108
  generator=generator,
109
  ).images[0]
110
- print("Image generation completed successfully")
111
 
112
  # Cache the generated image
113
  cache[cache_key] = image
@@ -115,9 +122,9 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
115
 
116
  return image, seed
117
  except Exception as e:
118
- print(f"Error during generation: {str(e)}")
119
  import traceback
120
- print(traceback.format_exc())
121
  return None, seed
122
 
123
  def update_prompt(example):
@@ -164,8 +171,8 @@ with gr.Blocks() as app:
164
 
165
  # Launch the app
166
  if __name__ == "__main__":
167
- print("Starting the Gradio app...")
168
- print("Pre-generating example images...")
169
  cache_example_images()
170
  app.launch(share=True)
171
- print("Gradio app launched successfully")
 
8
  import hashlib
9
  import pickle
10
  import yaml
11
+ import logging
12
+
13
+ # Set up logging
14
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
15
 
16
  # Load config file
17
  with open('config.yaml', 'r') as file:
 
20
  # Authenticate using the token stored in Hugging Face Spaces secrets
21
  if 'HF_TOKEN' in os.environ:
22
  login(token=os.environ['HF_TOKEN'])
23
+ logging.info("Successfully logged in with HF_TOKEN")
24
  else:
25
+ logging.warning("HF_TOKEN not found in environment variables. Some functionality may be limited.")
26
 
27
  # Correctly access the config values
28
  process_config = config['config']['process'][0] # Assuming the first process is the one we want
29
 
30
+ base_model = "black-forest-labs/FLUX.1-dev"
31
  lora_model = "sagar007/sagar_flux" # This isn't in the config, so we're keeping it as is
32
  trigger_word = process_config['trigger_word']
33
 
34
+ logging.info(f"Base model: {base_model}")
35
+ logging.info(f"LoRA model: {lora_model}")
36
+ logging.info(f"Trigger word: {trigger_word}")
37
+
38
  # Global variables
39
  pipe = None
40
  cache = {}
 
55
  global pipe
56
  if pipe is None:
57
  try:
58
+ logging.info(f"Attempting to load model: {base_model}")
59
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.float16, use_safetensors=True)
60
+ logging.info("Moving model to CUDA...")
61
  pipe = pipe.to("cuda")
62
+ logging.info(f"Successfully loaded model: {base_model}")
63
  except Exception as e:
64
+ logging.error(f"Error loading model {base_model}: {str(e)}")
 
 
65
  raise
66
 
67
  def load_cache():
 
69
  if os.path.exists(CACHE_FILE):
70
  with open(CACHE_FILE, 'rb') as f:
71
  cache = pickle.load(f)
72
+ logging.info(f"Loaded {len(cache)} cached images")
73
 
74
  def save_cache():
75
  with open(CACHE_FILE, 'wb') as f:
76
  pickle.dump(cache, f)
77
+ logging.info(f"Saved {len(cache)} cached images")
78
 
79
  def get_cache_key(prompt, cfg_scale, steps, seed, width, height, lora_scale):
80
  return hashlib.md5(f"{prompt}{cfg_scale}{steps}{seed}{width}{height}{lora_scale}".encode()).hexdigest()
 
89
  cache_key = get_cache_key(prompt, cfg_scale, steps, seed, width, height, lora_scale)
90
 
91
  if cache_key in cache:
92
+ logging.info("Using cached image")
93
  return cache[cache_key], seed
94
 
95
  try:
96
+ logging.info(f"Starting run_lora with prompt: {prompt}")
97
  if pipe is None:
98
+ logging.info("Initializing model...")
99
  initialize_model()
100
 
101
+ logging.info(f"Using seed: {seed}")
102
 
103
  generator = torch.Generator(device="cuda").manual_seed(seed)
104
 
105
  full_prompt = f"{prompt} {trigger_word}"
106
+ logging.info(f"Full prompt: {full_prompt}")
107
 
108
+ logging.info("Starting image generation...")
109
  image = pipe(
110
  prompt=full_prompt,
111
  num_inference_steps=steps,
 
114
  height=height,
115
  generator=generator,
116
  ).images[0]
117
+ logging.info("Image generation completed successfully")
118
 
119
  # Cache the generated image
120
  cache[cache_key] = image
 
122
 
123
  return image, seed
124
  except Exception as e:
125
+ logging.error(f"Error during generation: {str(e)}")
126
  import traceback
127
+ logging.error(traceback.format_exc())
128
  return None, seed
129
 
130
  def update_prompt(example):
 
171
 
172
  # Launch the app
173
  if __name__ == "__main__":
174
+ logging.info("Starting the Gradio app...")
175
+ logging.info("Pre-generating example images...")
176
  cache_example_images()
177
  app.launch(share=True)
178
+ logging.info("Gradio app launched successfully")