fahad11182 commited on
Commit
47b4f02
1 Parent(s): a03bee1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +130 -106
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import torch
2
  from diffusers import StableDiffusionInstructPix2PixPipeline
3
  import gradio as gr
@@ -9,151 +10,174 @@ model_id = "timbrooks/instruct-pix2pix"
9
  pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
10
  pipe = pipe.to("cpu")
11
 
 
 
 
 
 
 
12
  # Initialize a random seed
13
  seed = random.randint(0, 10000)
14
 
15
- # Function to reset the seed
16
  def change_style():
17
  global seed
18
- seed = random.randint(0, 10000)
19
- return f"Seed changed to: {seed}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- # Changign the walls' color function
22
- def change_color(image,color):
23
- # Construct the color prompt
24
- prompt = f"paint the walls with {color} color "
25
 
26
- # Text CFG (guidance_scale) controls how strongly the model follows the prompt
27
- text_cfg = 7.5
28
 
29
- # Image CFG: Although not explicitly part of InstructPix2Pix, you can "simulate" image preservation
30
- # by lowering the impact of the guidance. Here, we assume lower guidance impacts image preservation.
31
- image_cfg = 1.5
32
 
33
- # Apply the edit using InstructPix2Pix, with text CFG and image CFG influencing the guidance scale
34
- edited_image = pipe(
35
- prompt=prompt,
36
- image=image,
37
- num_inference_steps=70, # Number of diffusion steps
38
- guidance_scale=text_cfg, # Text CFG for following the prompt
39
- image_guidance_scale=image_cfg, # Simulated Image CFG to preserve image content
40
- generator=torch.manual_seed(seed) # Random seed for consistency
41
- ).images[0]
42
 
43
- return edited_image
44
 
 
 
 
 
45
 
46
- # General image editing function
47
- def edit_image(image, instruction):
48
- # Text CFG (guidance_scale) controls how strongly the model follows the prompt
49
- text_cfg = 12.0
50
-
51
- # Image CFG: Simulated value for preserving the original image content
52
- image_cfg = 1.5
53
-
54
- # Apply the edit using InstructPix2Pix, with text CFG and simulated image CFG
55
- edited_image = pipe(
56
- prompt=instruction,
57
- image=image,
58
- num_inference_steps=70, # Number of diffusion steps
59
- guidance_scale=text_cfg, # Text CFG for following the prompt
60
- image_guidance_scale=image_cfg, # Simulated Image CFG to preserve image content
61
- generator=torch.manual_seed(seed) # Random seed for consistency
62
- ).images[0]
63
-
64
- return edited_image
65
 
 
 
 
 
 
 
66
 
67
- # Gradio interface for image editing
68
- def image_interface():
69
- with gr.Blocks() as demo_color:
70
- gr.Markdown("## Painting Color Changing App")
71
-
72
- # Image upload
73
- image_input = gr.Image(type="pil", label="Upload Room Image")
74
-
75
- # List of common painting colors
76
- common_colors = [
77
- "Alabaster", # Off-white
78
- "Agreeable Gray", # Warm gray
79
- "Sea Salt", # Soft greenish-blue
80
- "Pure White", # Bright white
81
- "Accessible Beige", # Warm beige
82
- "Mindful Gray", # Cool gray
83
- "Peppercorn", # Dark charcoal gray
84
- "Hale Navy", # Dark navy blue
85
- "Tricorn Black", # Pure black
86
- "Pale Oak", # Soft taupe
87
- "Silver Strand", # Soft blue-gray
88
- "Rainwashed", # Light aqua
89
- "Orange Burst", # Bright orange
90
- "Sunny Yellow", # Bright yellow
91
- "Sage Green", # Muted green
92
- "Firebrick Red", # Deep red
93
- "Lavender", # Soft purple
94
- "Sky Blue", # Light blue
95
- "Coral", # Vibrant coral
96
 
97
- ]
 
98
 
 
 
99
 
100
- # Dropdown for wall color
101
- color_input = gr.Dropdown(common_colors, label="Choose Wall Color")
102
 
103
- # Display output image
104
- result_image = gr.Image(label="Edited Image")
105
 
106
- # Button to apply the wall color transformation
107
- submit_button = gr.Button("Paint the walls")
108
 
109
- # Define action on button click
110
- submit_button.click(fn=change_color, inputs=[image_input, color_input], outputs=result_image)
 
 
 
 
111
 
112
- return demo_color
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
 
115
- # Gradio interface for general image editing
116
  def general_editing_interface():
117
- with gr.Blocks() as demo_general:
118
- gr.Markdown("## General Image Editing App")
119
 
120
- # Image upload
121
- image_input = gr.Image(type="pil", label="Upload an Image")
122
 
123
- # Textbox for instruction
124
- instruction_input = gr.Textbox(label="Enter the Instruction", placeholder="Describe the changes (e.g., 'Make it snowy')")
125
 
126
- # Display output image
127
- result_image = gr.Image(label="Edited Image")
128
 
129
- # Button to apply the transformation
130
- submit_button = gr.Button("Apply Edit")
131
 
132
- # Button to change the seed (style)
133
- change_style_button = gr.Button("Change the Style")
134
 
135
- # Output for seed change message
136
- seed_output = gr.Textbox(label="Seed Info", interactive=False)
137
 
138
- # Define action on button click
139
- submit_button.click(fn=edit_image, inputs=[image_input, instruction_input], outputs=result_image)
140
- change_style_button.click(fn=change_style, outputs=seed_output)
 
 
141
 
142
- return demo_general
143
 
144
-
145
  # Launch both Gradio apps
146
  color_app = image_interface()
147
  general_editing_app = general_editing_interface()
148
 
149
- with gr.Blocks() as combined_demo:
150
- gr.Markdown("## Select the Application")
151
 
152
- with gr.Tab("General Image Editing App"):
153
- general_editing_app.render()
154
 
155
- with gr.Tab("Changing The Paint App"):
156
- color_app.render()
157
 
158
  # Launch the combined Gradio app
159
  combined_demo.launch()
 
1
+ from transformers import MarianMTModel, MarianTokenizer
2
  import torch
3
  from diffusers import StableDiffusionInstructPix2PixPipeline
4
  import gradio as gr
 
10
  pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
11
  pipe = pipe.to("cpu")
12
 
13
+ # Load the translation model (from Arabic to English)
14
+ translation_model_name = 'Helsinki-NLP/opus-mt-ar-en'
15
+ translation_tokenizer = MarianTokenizer.from_pretrained(translation_model_name)
16
+ translation_model = MarianMTModel.from_pretrained(translation_model_name)
17
+
18
+
19
  # Initialize a random seed
20
  seed = random.randint(0, 10000)
21
 
22
+ # Function to reset the seed (style change)
23
  def change_style():
24
  global seed
25
+ seed = torch.manual_seed(torch.randint(0, 10000, (1,)).item())
26
+ return f"تم تغيير النمط. المعرف الجديد: {seed}"
27
+
28
+ # Dictionary to map Arabic colors to English
29
+ arabic_to_english_colors = {
30
+ "أبيض": "White",
31
+ "أسود": "Black",
32
+ "أزرق": "Blue",
33
+ "أخضر": "Green",
34
+ "أحمر": "Red",
35
+ "أصفر": "Yellow",
36
+ "رمادي": "Gray",
37
+ "برتقالي": "Orange",
38
+ "بنفسجي": "Purple",
39
+ "وردي": "Pink",
40
+ "بني": "Brown",
41
+ "كحلي": "Navy",
42
+ "زهري": "Coral",
43
+ "فيروزي": "Teal",
44
+ "بيج": "Beige"
45
+ }
46
+
47
+ # Function to translate Arabic color to English and change the wall color
48
+ def change_color(image, color):
49
+ # Translate Arabic color to English using the dictionary
50
+ color_in_english = arabic_to_english_colors.get(color, None)
51
+
52
+ # If color not found in the dictionary, return an error message
53
+ if not color_in_english:
54
+ return f"اللون '{color}' غير موجود في القائمة. يرجى إدخال لون صحيح."
55
 
56
+ # Construct the furniture prompt in English
57
+ prompt = f"paint the walls with {color_in_english} color"
 
 
58
 
59
+ # Text CFG (guidance_scale) controls how strongly the model follows the prompt
60
+ text_cfg = 7.5
61
 
62
+ # Image CFG: Simulated value for preserving the original image content
63
+ image_cfg = 1.5
 
64
 
65
+ # Apply the edit using InstructPix2Pix, with text CFG and image CFG influencing the guidance scale
66
+ edited_image = pipe(
67
+ prompt=prompt,
68
+ image=image,
69
+ num_inference_steps=70, # Number of diffusion steps
70
+ guidance_scale=text_cfg, # Text CFG for following the prompt
71
+ image_guidance_scale=image_cfg, # Simulated Image CFG to preserve image content
72
+ generator=torch.manual_seed(seed) # Random seed for consistency
73
+ ).images[0]
74
 
75
+ return edited_image
76
 
77
+ # Gradio interface for image editing in Arabic
78
+ def image_interface():
79
+ with gr.Blocks(css=".gradio-container {direction: rtl}") as demo_color:
80
+ gr.Markdown("## تطبيق لتغيير لون الجدران")
81
 
82
+ # Image upload (translated to Arabic)
83
+ image_input = gr.Image(type="pil", label="قم برفع صورة للغرفة")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
+ # List of common painting colors in Arabic
86
+ common_colors = [
87
+ "أبيض", "أسود", "أزرق", "أخضر", "أحمر", "أصفر",
88
+ "رمادي", "برتقالي", "بنفسجي", "وردي", "بني",
89
+ "كحلي", "زهري", "فيروزي", "بيج"
90
+ ]
91
 
92
+ # Dropdown for wall color (Arabic)
93
+ color_input = gr.Dropdown(common_colors, label="اختر لون الجدران")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ # Display output image
96
+ result_image = gr.Image(label="الصورة المعدلة")
97
 
98
+ # Button to apply the wall color transformation
99
+ submit_button = gr.Button("قم بتغيير لون الجدران")
100
 
101
+ # Define action on button click (directly pass dropdown color input to the function)
102
+ submit_button.click(fn=change_color, inputs=[image_input, color_input], outputs=result_image)
103
 
104
+ return demo_color
 
105
 
 
 
106
 
107
+ # Function to translate Arabic prompt to English
108
+ def translate_prompt(prompt_ar):
109
+ translated_tokens = translation_tokenizer(prompt_ar, return_tensors="pt", truncation=True)
110
+ translated = translation_model.generate(**translated_tokens)
111
+ prompt_en = translation_tokenizer.decode(translated[0], skip_special_tokens=True)
112
+ return prompt_en
113
 
114
+
115
+ # General image editing function
116
+ def edit_image(image, instruction_ar):
117
+ # Translate Arabic instruction to English
118
+ instruction_en = translate_prompt(instruction_ar)
119
+
120
+ # Text CFG (guidance_scale) controls how strongly the model follows the prompt
121
+ text_cfg = 12.0
122
+
123
+ # Image CFG: Simulated value for preserving the original image content
124
+ image_cfg = 1.5
125
+
126
+ # Apply the edit using InstructPix2Pix with the translated prompt
127
+ edited_image = pipe(
128
+ prompt=instruction_en,
129
+ image=image,
130
+ num_inference_steps=70, # Number of diffusion steps
131
+ guidance_scale=text_cfg, # Text CFG for following the prompt
132
+ image_guidance_scale=image_cfg, # Simulated Image CFG to preserve image content
133
+ generator=torch.manual_seed(seed) # Random seed for consistency
134
+ ).images[0]
135
+
136
+ return edited_image
137
 
138
 
139
+ # Gradio interface for general image editing in Arabic
140
  def general_editing_interface():
141
+ with gr.Blocks(css=".gradio-container {direction: rtl}") as demo_general:
142
+ gr.Markdown("## تطبيق تحرير الصور العام")
143
 
144
+ # Image upload in Arabic
145
+ image_input = gr.Image(type="pil", label="قم بتحميل صورة")
146
 
147
+ # Textbox for instruction in Arabic
148
+ instruction_input = gr.Textbox(label="أدخل التعليمات", placeholder="وصف التعديلات (مثل: 'اجعل الجو مثلج')")
149
 
150
+ # Display output image
151
+ result_image = gr.Image(label="الصورة المعدلة")
152
 
153
+ # Button to apply the transformation
154
+ submit_button = gr.Button("تطبيق التعديلات")
155
 
156
+ # Button to change the seed (style)
157
+ change_style_button = gr.Button("تغيير النمط")
158
 
159
+ # Output for seed change message
160
+ seed_output = gr.Textbox(label="معلومات النمط", interactive=False)
161
 
162
+ # Define action on button click
163
+ submit_button.click(fn=edit_image, inputs=[image_input, instruction_input], outputs=result_image)
164
+ change_style_button.click(fn=change_style, outputs=seed_output)
165
+
166
+ return demo_general
167
 
 
168
 
 
169
  # Launch both Gradio apps
170
  color_app = image_interface()
171
  general_editing_app = general_editing_interface()
172
 
173
+ with gr.Blocks(css=".gradio-container {direction: rtl}") as combined_demo:
174
+ gr.Markdown("## اختر التطبيق")
175
 
176
+ with gr.Tab("تطبيق تحرير الصور "):
177
+ general_editing_app.render()
178
 
179
+ with gr.Tab("تطبيق تغيير لون الطلاء"):
180
+ color_app.render()
181
 
182
  # Launch the combined Gradio app
183
  combined_demo.launch()