macadeliccc commited on
Commit
933bc05
β€’
1 Parent(s): 7f45d73

switch to LCM demo

Browse files
Files changed (3) hide show
  1. README.md +2 -2
  2. app.py +132 -48
  3. requirements.txt +2 -2
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: SSD-1B + SDXL 1.0 & Refiner
3
  emoji: πŸ†
4
  colorFrom: indigo
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 4.1.2
8
  app_file: app.py
 
1
  ---
2
+ title: LCM-LoRa-SDXL Demo + Papercut
3
  emoji: πŸ†
4
  colorFrom: indigo
5
+ colorTo: purple
6
  sdk: gradio
7
  sdk_version: 4.1.2
8
  app_file: app.py
app.py CHANGED
@@ -1,68 +1,152 @@
1
  import spaces
2
- from diffusers import StableDiffusionXLPipeline
3
- from diffusers import DiffusionPipeline
4
- from pydantic import BaseModel
5
- from PIL import Image
6
  import gradio as gr
7
  import torch
8
- import uuid
 
 
 
9
  import io
10
- import os
11
 
 
 
 
 
12
 
13
- # Load your model
14
- pipe = StableDiffusionXLPipeline.from_pretrained(
15
- "segmind/SSD-1B",
16
- torch_dtype=torch.float16,
17
- use_safetensors=True,
18
- variant="fp16"
19
- )
20
 
21
- pipe.to("cuda:0")
 
 
22
 
 
 
 
 
23
 
 
 
 
 
 
 
 
 
 
24
 
25
- @spaces.GPU
26
- def generate_and_save_image(prompt, negative_prompt=''):
27
- # Generate image using the provided prompts
28
- image = pipe(prompt=prompt, negative_prompt=negative_prompt).images[0]
 
29
 
30
- # Generate a unique UUID for the filename
31
- unique_id = str(uuid.uuid4())
32
- image_path = f"generated_images/{unique_id}.jpeg"
 
 
33
 
34
- # Save generated image locally
35
- os.makedirs('generated_images', exist_ok=True)
36
- image.save(image_path, format='JPEG')
 
 
 
 
 
 
 
37
 
38
- # Return the path of the saved image to display in Gradio interface
39
- return image_path
40
 
 
 
 
 
 
 
41
 
42
- # Start of the Gradio Blocks interface
43
- with gr.Blocks() as demo:
44
- with gr.Column():
45
- gr.Markdown("# Image Generation with SSD-1B")
46
- gr.Markdown("Enter a prompt and (optionally) a negative prompt to generate an image.")
47
-
48
- # Input fields for positive and negative prompts
49
- with gr.Row():
50
- prompt1 = gr.Textbox(label="Enter prompt")
51
- negative_prompt = gr.Textbox(label="Enter negative prompt (optional)")
52
-
53
- # Button for generating the image
54
- generate_button1 = gr.Button("Generate Image")
55
 
56
- # Output image display, set to a larger default size
57
- output_image1 = gr.Image(label="Generated Image")
 
58
 
59
- # Click event for the generate button
60
- generate_button1.click(
61
- generate_and_save_image,
62
- inputs=[prompt1, negative_prompt],
63
- outputs=output_image1
64
- )
 
 
65
 
66
 
67
- # Launch the combined Gradio app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  demo.launch()
 
1
  import spaces
 
 
 
 
2
  import gradio as gr
3
  import torch
4
+ from diffusers import LCMScheduler, AutoPipelineForText2Image
5
+ from diffusers import AutoPipelineForInpainting, LCMScheduler
6
+ from diffusers import DiffusionPipeline, LCMScheduler
7
+ from PIL import Image, ImageEnhance
8
  import io
 
9
 
10
+ @spaces.GPU
11
+ def generate_image(prompt, num_inference_steps, guidance_scale):
12
+ model_id = "stabilityai/stable-diffusion-xl-base-1.0"
13
+ adapter_id = "latent-consistency/lcm-lora-sdxl"
14
 
15
+ pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16")
16
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
17
+ pipe.to("cuda")
 
 
 
 
18
 
19
+ # Load and fuse lcm lora
20
+ pipe.load_lora_weights(adapter_id)
21
+ pipe.fuse_lora()
22
 
23
+ # Generate the image
24
+ image = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
25
+
26
+ return image
27
 
28
+ def inpaint_image(prompt, init_image, mask_image, num_inference_steps, guidance_scale):
29
+ pipe = AutoPipelineForInpainting.from_pretrained(
30
+ "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
31
+ torch_dtype=torch.float16,
32
+ variant="fp16",
33
+ ).to("cuda")
34
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
35
+ pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
36
+ pipe.fuse_lora()
37
 
38
+ if init_image is not None:
39
+ init_image_path = init_image.name # Get the file path
40
+ init_image = Image.open(init_image_path).resize((1024, 1024))
41
+ else:
42
+ raise ValueError("Initial image not provided or invalid")
43
 
44
+ if mask_image is not None:
45
+ mask_image_path = mask_image.name # Get the file path
46
+ mask_image = Image.open(mask_image_path).resize((1024, 1024))
47
+ else:
48
+ raise ValueError("Mask image not provided or invalid")
49
 
50
+ # Generate the inpainted image
51
+ generator = torch.manual_seed(42)
52
+ image = pipe(
53
+ prompt=prompt,
54
+ image=init_image,
55
+ mask_image=mask_image,
56
+ generator=generator,
57
+ num_inference_steps=num_inference_steps,
58
+ guidance_scale=guidance_scale,
59
+ ).images[0]
60
 
61
+ return image
 
62
 
63
+ def generate_image_with_adapter(prompt, num_inference_steps, guidance_scale):
64
+ pipe = DiffusionPipeline.from_pretrained(
65
+ "stabilityai/stable-diffusion-xl-base-1.0",
66
+ variant="fp16",
67
+ torch_dtype=torch.float16
68
+ ).to("cuda")
69
 
70
+ # set scheduler
71
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
 
 
 
 
 
 
 
 
 
 
 
72
 
73
+ # Load and fuse lcm lora
74
+ pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm")
75
+ pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut")
76
 
77
+ # Combine LoRAs
78
+ pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8])
79
+ pipe.fuse_lora()
80
+ generator = torch.manual_seed(0)
81
+ # Generate the image
82
+ image = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator).images[0]
83
+ pipe.unfuse_lora()
84
+ return image
85
 
86
 
87
+ def modify_image(image, brightness, contrast):
88
+ # Function to modify brightness and contrast
89
+ image = Image.open(io.BytesIO(image))
90
+ enhancer = ImageEnhance.Brightness(image)
91
+ image = enhancer.enhance(brightness)
92
+ enhancer = ImageEnhance.Contrast(image)
93
+ image = enhancer.enhance(contrast)
94
+ return image
95
+
96
+ with gr.Blocks(gr.themes.Soft()) as demo:
97
+ with gr.Row():
98
+ image_output = gr.Image(label="Generated Image")
99
+
100
+ with gr.Row():
101
+ with gr.Accordion(label="Configuration Options"):
102
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Self-portrait oil painting, a beautiful cyborg with golden hair, 8k")
103
+ steps_input = gr.Slider(minimum=1, maximum=10, label="Inference Steps", value=4)
104
+ guidance_input = gr.Slider(minimum=0, maximum=2, label="Guidance Scale", value=1)
105
+ generate_button = gr.Button("Generate Image")
106
+ with gr.Row():
107
+ with gr.Accordion(label="Papercut Image Generation"):
108
+ adapter_prompt_input = gr.Textbox(label="Prompt", placeholder="papercut, a cute fox")
109
+ adapter_steps_input = gr.Slider(minimum=1, maximum=10, label="Inference Steps", value=4)
110
+ adapter_guidance_input = gr.Slider(minimum=0, maximum=2, label="Guidance Scale", value=1)
111
+ adapter_generate_button = gr.Button("Generate Image with Adapter")
112
+
113
+ with gr.Row():
114
+ with gr.Accordion(label="Inpainting"):
115
+ inpaint_prompt_input = gr.Textbox(label="Prompt for Inpainting", placeholder="a castle on top of a mountain, highly detailed, 8k")
116
+ init_image_input = gr.File(label="Initial Image")
117
+ mask_image_input = gr.File(label="Mask Image")
118
+ inpaint_steps_input = gr.Slider(minimum=1, maximum=10, label="Inference Steps", value=4)
119
+ inpaint_guidance_input = gr.Slider(minimum=0, maximum=2, label="Guidance Scale", value=1)
120
+ inpaint_button = gr.Button("Inpaint Image")
121
+
122
+ with gr.Row():
123
+ with gr.Accordion(label="Image Modification (Experimental)"):
124
+ brightness_slider = gr.Slider(minimum=0.5, maximum=1.5, step=1, label="Brightness")
125
+ contrast_slider = gr.Slider(minimum=0.5, maximum=1.5, step=1, label="Contrast")
126
+ modify_button = gr.Button("Modify Image")
127
+
128
+
129
+
130
+ generate_button.click(
131
+ generate_image,
132
+ inputs=[prompt_input, steps_input, guidance_input],
133
+ outputs=image_output
134
+ )
135
+
136
+ modify_button.click(
137
+ modify_image,
138
+ inputs=[image_output, brightness_slider, contrast_slider],
139
+ outputs=image_output
140
+ )
141
+ inpaint_button.click(
142
+ inpaint_image,
143
+ inputs=[inpaint_prompt_input, init_image_input, mask_image_input, inpaint_steps_input, inpaint_guidance_input],
144
+ outputs=image_output
145
+ )
146
+ adapter_generate_button.click(
147
+ generate_image_with_adapter,
148
+ inputs=[adapter_prompt_input, adapter_steps_input, adapter_guidance_input],
149
+ outputs=image_output
150
+ )
151
+
152
  demo.launch()
requirements.txt CHANGED
@@ -2,9 +2,9 @@ git+https://github.com/huggingface/diffusers.git
2
  git+https://github.com/huggingface/transformers.git
3
  --extra-index-url https://download.pytorch.org/whl/cu113
4
  torch
5
- uuid
6
  pydantic
7
  Pillow
8
  accelerate
9
  spaces
10
- invisible_watermark
 
2
  git+https://github.com/huggingface/transformers.git
3
  --extra-index-url https://download.pytorch.org/whl/cu113
4
  torch
5
+ peft
6
  pydantic
7
  Pillow
8
  accelerate
9
  spaces
10
+ invisible_watermark