michaelj commited on
Commit
44a51c4
1 Parent(s): 30a6d7b

Upload folder using huggingface_hub

Browse files
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
+
9
+ RUN useradd -m -u 1000 user
10
+
11
+ USER user
12
+
13
+ ENV HOME=/home/user \
14
+ PATH=/home/user/.local/bin:$PATH
15
+
16
+ WORKDIR $HOME/app
17
+
18
+ COPY --chown=user . $HOME/app
19
+
20
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--reload"]
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Testlcm
3
- emoji: 🏢
4
- colorFrom: pink
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.7.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Fastlcm
3
+ emoji: 🏆
4
+ colorFrom: gray
5
+ colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 4.8.0
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -7,6 +7,7 @@ from constants import APP_VERSION, LCM_DEFAULT_MODEL_OPENVINO
7
  from models.interface_types import InterfaceType
8
  from constants import DEVICE
9
  from state import get_settings
 
10
 
11
 
12
  from fastapi import FastAPI,Body
@@ -22,15 +23,12 @@ import base64
22
  import io
23
  from datetime import datetime
24
 
25
- app = FastAPI(name="mutilParam")
26
-
27
  from typing import Any
28
  from backend.models.lcmdiffusion_setting import DiffusionTask
29
 
30
  from frontend.utils import is_reshape_required
31
  from concurrent.futures import ThreadPoolExecutor
32
 
33
- app_settings = get_settings()
34
 
35
  context = Context(InterfaceType.WEBUI)
36
  previous_width = 0
@@ -38,136 +36,136 @@ previous_height = 0
38
  previous_model_id = ""
39
  previous_num_of_images = 0
40
 
41
- parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
42
- parser.add_argument(
43
- "-s",
44
- "--share",
45
- action="store_true",
46
- help="Create sharable link(Web UI)",
47
- required=False,
48
- )
49
- group = parser.add_mutually_exclusive_group(required=False)
50
- group.add_argument(
51
- "-g",
52
- "--gui",
53
- action="store_true",
54
- help="Start desktop GUI",
55
- )
56
- group.add_argument(
57
- "-w",
58
- "--webui",
59
- action="store_true",
60
- help="Start Web UI",
61
- )
62
- group.add_argument(
63
- "-r",
64
- "--realtime",
65
- action="store_true",
66
- help="Start realtime inference UI(experimental)",
67
- )
68
- group.add_argument(
69
- "-v",
70
- "--version",
71
- action="store_true",
72
- help="Version",
73
- )
74
- parser.add_argument(
75
- "--lcm_model_id",
76
- type=str,
77
- help="Model ID or path,Default SimianLuo/LCM_Dreamshaper_v7",
78
- default="SimianLuo/LCM_Dreamshaper_v7",
79
- )
80
- parser.add_argument(
81
- "--prompt",
82
- type=str,
83
- help="Describe the image you want to generate",
84
- )
85
- parser.add_argument(
86
- "--image_height",
87
- type=int,
88
- help="Height of the image",
89
- default=512,
90
- )
91
- parser.add_argument(
92
- "--image_width",
93
- type=int,
94
- help="Width of the image",
95
- default=512,
96
- )
97
- parser.add_argument(
98
- "--inference_steps",
99
- type=int,
100
- help="Number of steps,default : 4",
101
- default=4,
102
- )
103
- parser.add_argument(
104
- "--guidance_scale",
105
- type=int,
106
- help="Guidance scale,default : 1.0",
107
- default=1.0,
108
- )
109
 
110
- parser.add_argument(
111
- "--number_of_images",
112
- type=int,
113
- help="Number of images to generate ,default : 1",
114
- default=1,
115
- )
116
- parser.add_argument(
117
- "--seed",
118
- type=int,
119
- help="Seed,default : -1 (disabled) ",
120
- default=-1,
121
- )
122
- parser.add_argument(
123
- "--use_openvino",
124
- action="store_true",
125
- help="Use OpenVINO model",
126
- )
127
 
128
- parser.add_argument(
129
- "--use_offline_model",
130
- action="store_true",
131
- help="Use offline model",
132
- )
133
- parser.add_argument(
134
- "--use_safety_checker",
135
- action="store_false",
136
- help="Use safety checker",
137
- )
138
- parser.add_argument(
139
- "--use_lcm_lora",
140
- action="store_true",
141
- help="Use LCM-LoRA",
142
- )
143
- parser.add_argument(
144
- "--base_model_id",
145
- type=str,
146
- help="LCM LoRA base model ID,Default Lykon/dreamshaper-8",
147
- default="Lykon/dreamshaper-8",
148
- )
149
- parser.add_argument(
150
- "--lcm_lora_id",
151
- type=str,
152
- help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5",
153
- default="latent-consistency/lcm-lora-sdv1-5",
154
- )
155
- parser.add_argument(
156
- "-i",
157
- "--interactive",
158
- action="store_true",
159
- help="Interactive CLI mode",
160
- )
161
- parser.add_argument(
162
- "--use_tiny_auto_encoder",
163
- action="store_true",
164
- help="Use tiny auto encoder for SD (TAESD)",
165
- )
166
- args = parser.parse_args()
167
 
168
- if args.version:
169
- print(APP_VERSION)
170
- exit()
171
 
172
  # parser.print_help()
173
  show_system_info()
@@ -184,7 +182,7 @@ print(
184
  print(
185
  f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt"
186
  )
187
-
188
  from frontend.webui.ui import start_webui
189
 
190
  print("Starting web UI mode")
@@ -192,91 +190,85 @@ start_webui(
192
  args.share,
193
  )
194
 
195
- app.get("/")
196
- def root():
197
- return {"API": "hello"}
198
-
199
- @app.post("/img2img")
200
- async def predict(prompt=Body(...),imgbase64data=Body(...),negative_prompt=Body(None),userId=Body(None)):
201
- pipeline = get_pipeline()
202
- MAX_QUEUE_SIZE = 4
203
- start = time.time()
204
- print("参数",imgbase64data,prompt)
205
- image_data = base64.b64decode(imgbase64data)
206
- image1 = Image.open(io.BytesIO(image_data))
207
- w, h = image1.size
208
- newW = 512
209
- newH = int(h * newW / w)
210
- img = image1.resize((newW, newH))
211
- end1 = time.time()
212
- now = datetime.now()
213
- print(now)
214
- print("图像:", img.size)
215
- print("加载管道:", end1 - start)
216
- global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
 
217
 
218
- app_settings.settings.lcm_diffusion_setting.prompt = prompt
219
- app_settings.settings.lcm_diffusion_setting.negative_prompt = negative_prompt
220
- app_settings.settings.lcm_diffusion_setting.init_image = img
221
- app_settings.settings.lcm_diffusion_setting.strength = 0.6
222
 
223
- app_settings.settings.lcm_diffusion_setting.diffusion_task = (
224
- DiffusionTask.image_to_image.value
225
- )
226
- model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
227
- reshape = False
228
- app_settings.settings.lcm_diffusion_setting.image_height=newH
229
- image_width = app_settings.settings.lcm_diffusion_setting.image_width
230
- image_height = app_settings.settings.lcm_diffusion_setting.image_height
231
- num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
232
- reshape = is_reshape_required(
233
- previous_width,
234
- image_width,
235
- previous_height,
236
- image_height,
237
- previous_model_id,
238
- model_id,
239
- previous_num_of_images,
240
- num_images,
241
- )
242
 
243
 
244
- with ThreadPoolExecutor(max_workers=1) as executor:
245
- future = executor.submit(
246
- context.generate_text_to_image,
247
- app_settings.settings,
248
- reshape,
249
- DEVICE,
250
- )
251
- images = future.result()
252
- # images = context.generate_text_to_image(
253
- # app_settings.settings,
254
- # reshape,
255
- # DEVICE,
256
- # )
257
- previous_width = image_width
258
- previous_height = image_height
259
- previous_model_id = model_id
260
- previous_num_of_images = num_images
261
- output_image = images[0]
262
- end2 = time.time()
263
- print("测试",output_image)
264
- print("s生成完成:", end2 - end1)
265
- # 将图片对象转换为bytes
266
- image_data = io.BytesIO()
267
 
268
- # 将图像保存到BytesIO对象中,格式为JPEG
269
- output_image.save(image_data, format='JPEG')
270
 
271
- # 将BytesIO对象的内容转换为字节串
272
- image_data_bytes = image_data.getvalue()
273
- output_image_base64 = base64.b64encode(image_data_bytes).decode('utf-8')
274
- print("完成的图片:", output_image_base64)
275
- logger = logging.getLogger('')
276
- logger.info(output_image_base64)
277
- return output_image_base64
278
 
279
 
280
- @app.post("/predict")
281
- async def predict(prompt=Body(...)):
282
- return f"您好,{prompt}"
 
7
  from models.interface_types import InterfaceType
8
  from constants import DEVICE
9
  from state import get_settings
10
+ import traceback
11
 
12
 
13
  from fastapi import FastAPI,Body
 
23
  import io
24
  from datetime import datetime
25
 
 
 
26
  from typing import Any
27
  from backend.models.lcmdiffusion_setting import DiffusionTask
28
 
29
  from frontend.utils import is_reshape_required
30
  from concurrent.futures import ThreadPoolExecutor
31
 
 
32
 
33
  context = Context(InterfaceType.WEBUI)
34
  previous_width = 0
 
36
  previous_model_id = ""
37
  previous_num_of_images = 0
38
 
39
+ # parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
40
+ # parser.add_argument(
41
+ # "-s",
42
+ # "--share",
43
+ # action="store_true",
44
+ # help="Create sharable link(Web UI)",
45
+ # required=False,
46
+ # )
47
+ # group = parser.add_mutually_exclusive_group(required=False)
48
+ # group.add_argument(
49
+ # "-g",
50
+ # "--gui",
51
+ # action="store_true",
52
+ # help="Start desktop GUI",
53
+ # )
54
+ # group.add_argument(
55
+ # "-w",
56
+ # "--webui",
57
+ # action="store_true",
58
+ # help="Start Web UI",
59
+ # )
60
+ # group.add_argument(
61
+ # "-r",
62
+ # "--realtime",
63
+ # action="store_true",
64
+ # help="Start realtime inference UI(experimental)",
65
+ # )
66
+ # group.add_argument(
67
+ # "-v",
68
+ # "--version",
69
+ # action="store_true",
70
+ # help="Version",
71
+ # )
72
+ # parser.add_argument(
73
+ # "--lcm_model_id",
74
+ # type=str,
75
+ # help="Model ID or path,Default SimianLuo/LCM_Dreamshaper_v7",
76
+ # default="SimianLuo/LCM_Dreamshaper_v7",
77
+ # )
78
+ # parser.add_argument(
79
+ # "--prompt",
80
+ # type=str,
81
+ # help="Describe the image you want to generate",
82
+ # )
83
+ # parser.add_argument(
84
+ # "--image_height",
85
+ # type=int,
86
+ # help="Height of the image",
87
+ # default=512,
88
+ # )
89
+ # parser.add_argument(
90
+ # "--image_width",
91
+ # type=int,
92
+ # help="Width of the image",
93
+ # default=512,
94
+ # )
95
+ # parser.add_argument(
96
+ # "--inference_steps",
97
+ # type=int,
98
+ # help="Number of steps,default : 4",
99
+ # default=4,
100
+ # )
101
+ # parser.add_argument(
102
+ # "--guidance_scale",
103
+ # type=int,
104
+ # help="Guidance scale,default : 1.0",
105
+ # default=1.0,
106
+ # )
107
 
108
+ # parser.add_argument(
109
+ # "--number_of_images",
110
+ # type=int,
111
+ # help="Number of images to generate ,default : 1",
112
+ # default=1,
113
+ # )
114
+ # parser.add_argument(
115
+ # "--seed",
116
+ # type=int,
117
+ # help="Seed,default : -1 (disabled) ",
118
+ # default=-1,
119
+ # )
120
+ # parser.add_argument(
121
+ # "--use_openvino",
122
+ # action="store_true",
123
+ # help="Use OpenVINO model",
124
+ # )
125
 
126
+ # parser.add_argument(
127
+ # "--use_offline_model",
128
+ # action="store_true",
129
+ # help="Use offline model",
130
+ # )
131
+ # parser.add_argument(
132
+ # "--use_safety_checker",
133
+ # action="store_false",
134
+ # help="Use safety checker",
135
+ # )
136
+ # parser.add_argument(
137
+ # "--use_lcm_lora",
138
+ # action="store_true",
139
+ # help="Use LCM-LoRA",
140
+ # )
141
+ # parser.add_argument(
142
+ # "--base_model_id",
143
+ # type=str,
144
+ # help="LCM LoRA base model ID,Default Lykon/dreamshaper-8",
145
+ # default="Lykon/dreamshaper-8",
146
+ # )
147
+ # parser.add_argument(
148
+ # "--lcm_lora_id",
149
+ # type=str,
150
+ # help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5",
151
+ # default="latent-consistency/lcm-lora-sdv1-5",
152
+ # )
153
+ # parser.add_argument(
154
+ # "-i",
155
+ # "--interactive",
156
+ # action="store_true",
157
+ # help="Interactive CLI mode",
158
+ # )
159
+ # parser.add_argument(
160
+ # "--use_tiny_auto_encoder",
161
+ # action="store_true",
162
+ # help="Use tiny auto encoder for SD (TAESD)",
163
+ # )
164
+ # args = parser.parse_args()
165
 
166
+ # if args.version:
167
+ # print(APP_VERSION)
168
+ # exit()
169
 
170
  # parser.print_help()
171
  show_system_info()
 
182
  print(
183
  f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt"
184
  )
185
+ app_settings.settings.lcm_diffusion_setting.use_openvino = True
186
  from frontend.webui.ui import start_webui
187
 
188
  print("Starting web UI mode")
 
190
  args.share,
191
  )
192
 
193
+ # app = FastAPI(name="mutilParam")
194
+ # print("我执行了")
195
+ # @app.get("/")
196
+ # def root():
197
+ # return {"API": "hello"}
198
+
199
+ # @app.post("/img2img")
200
+ # async def predict(prompt=Body(...),imgbase64data=Body(...),negative_prompt=Body(None),userId=Body(None)):
201
+ # MAX_QUEUE_SIZE = 4
202
+ # start = time.time()
203
+ # print("参数",imgbase64data,prompt)
204
+ # image_data = base64.b64decode(imgbase64data)
205
+ # image1 = Image.open(io.BytesIO(image_data))
206
+ # w, h = image1.size
207
+ # newW = 512
208
+ # newH = int(h * newW / w)
209
+ # img = image1.resize((newW, newH))
210
+ # end1 = time.time()
211
+ # now = datetime.now()
212
+ # print(now)
213
+ # print("图像:", img.size)
214
+ # print("加载管道:", end1 - start)
215
+ # global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
216
 
217
+ # app_settings.settings.lcm_diffusion_setting.prompt = prompt
218
+ # app_settings.settings.lcm_diffusion_setting.negative_prompt = negative_prompt
219
+ # app_settings.settings.lcm_diffusion_setting.init_image = image1
220
+ # app_settings.settings.lcm_diffusion_setting.strength = 0.6
221
 
222
+ # app_settings.settings.lcm_diffusion_setting.diffusion_task = (
223
+ # DiffusionTask.image_to_image.value
224
+ # )
225
+ # model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
226
+ # reshape = False
227
+ # app_settings.settings.lcm_diffusion_setting.image_height=newH
228
+ # image_width = app_settings.settings.lcm_diffusion_setting.image_width
229
+ # image_height = app_settings.settings.lcm_diffusion_setting.image_height
230
+ # num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
231
+ # reshape = is_reshape_required(
232
+ # previous_width,
233
+ # image_width,
234
+ # previous_height,
235
+ # image_height,
236
+ # previous_model_id,
237
+ # model_id,
238
+ # previous_num_of_images,
239
+ # num_images,
240
+ # )
241
 
242
 
243
+ # with ThreadPoolExecutor(max_workers=1) as executor:
244
+ # future = executor.submit(
245
+ # context.generate_text_to_image,
246
+ # app_settings.settings,
247
+ # reshape,
248
+ # DEVICE,
249
+ # )
250
+ # images = future.result()
251
+ # previous_width = image_width
252
+ # previous_height = image_height
253
+ # previous_model_id = model_id
254
+ # previous_num_of_images = num_images
255
+ # output_image = images[0]
256
+ # end2 = time.time()
257
+ # print("测试",output_image)
258
+ # print("s生成完成:", end2 - end1)
259
+ # # 将图片对象转换为bytes
260
+ # image_data = io.BytesIO()
 
 
 
 
 
261
 
262
+ # # 将图像保存到BytesIO对象中,格式为JPEG
263
+ # output_image.save(image_data, format='JPEG')
264
 
265
+ # # 将BytesIO对象的内容转换为字节串
266
+ # image_data_bytes = image_data.getvalue()
267
+ # output_image_base64 = base64.b64encode(image_data_bytes).decode('utf-8')
268
+ # print("完成的图片:", output_image_base64)
269
+ # return output_image_base64
 
 
270
 
271
 
272
+ # @app.post("/predict")
273
+ # async def predict(prompt=Body(...)):
274
+ # return f"您好,{prompt}"
app_settings.py CHANGED
@@ -15,9 +15,7 @@ from copy import deepcopy
15
  class AppSettings:
16
  def __init__(self):
17
  self.config_path = FastStableDiffusionPaths().get_app_settings_path()
18
- print("打印模型地址",self.config_path)
19
- base_url='/app/'
20
- self._stable_diffsuion_models = get_models_from_text_file (
21
  FastStableDiffusionPaths().get_models_config_path(SD_MODELS_FILE)
22
  )
23
  self._lcm_lora_models = get_models_from_text_file(
 
15
  class AppSettings:
16
  def __init__(self):
17
  self.config_path = FastStableDiffusionPaths().get_app_settings_path()
18
+ self._stable_diffsuion_models = get_models_from_text_file(
 
 
19
  FastStableDiffusionPaths().get_models_config_path(SD_MODELS_FILE)
20
  )
21
  self._lcm_lora_models = get_models_from_text_file(
backend/lcm_text_to_image.py CHANGED
@@ -38,6 +38,7 @@ class LCMTextToImage:
38
  self.previous_use_openvino = False
39
  self.img_to_img_pipeline = None
40
  self.is_openvino_init = False
 
41
  self.torch_data_type = (
42
  torch.float32 if is_openvino_device() or DEVICE == "mps" else torch.float16
43
  )
@@ -93,12 +94,18 @@ class LCMTextToImage:
93
  lcm_lora: LCMLora = lcm_diffusion_setting.lcm_lora
94
  ov_model_id = lcm_diffusion_setting.openvino_lcm_model_id
95
 
96
- # if lcm_diffusion_setting.diffusion_task == DiffusionTask.image_to_image.value:
97
- # lcm_diffusion_setting.init_image = resize_pil_image(
98
- # lcm_diffusion_setting.init_image,
99
- # lcm_diffusion_setting.image_width,
100
- # lcm_diffusion_setting.image_height,
101
- # )
 
 
 
 
 
 
102
 
103
  if (
104
  self.pipeline is None
@@ -110,6 +117,7 @@ class LCMTextToImage:
110
  or self.previous_ov_model_id != ov_model_id
111
  or self.previous_safety_checker != lcm_diffusion_setting.use_safety_checker
112
  or self.previous_use_openvino != lcm_diffusion_setting.use_openvino
 
113
  ):
114
  if self.use_openvino and is_openvino_device():
115
  if self.pipeline:
@@ -218,6 +226,7 @@ class LCMTextToImage:
218
  self.previous_use_lcm_lora = use_lora
219
  self.previous_safety_checker = lcm_diffusion_setting.use_safety_checker
220
  self.previous_use_openvino = lcm_diffusion_setting.use_openvino
 
221
  if (
222
  lcm_diffusion_setting.diffusion_task
223
  == DiffusionTask.text_to_image.value
@@ -238,12 +247,6 @@ class LCMTextToImage:
238
  reshape: bool = False,
239
  ) -> Any:
240
  guidance_scale = lcm_diffusion_setting.guidance_scale
241
- # w, h = lcm_diffusion_setting.init_image.size
242
- # newW = lcm_diffusion_setting.image_width
243
- # newH = int(h * newW / w)
244
- # lcm_diffusion_setting.image_height=newH
245
- # lcm_diffusion_setting.init_image = lcm_diffusion_setting.init_image.resize((newW, newH))
246
- print("修改图像尺寸了",lcm_diffusion_setting.image_height,lcm_diffusion_setting.image_width)
247
  img_to_img_inference_steps = lcm_diffusion_setting.inference_steps
248
  check_step_value = int(
249
  lcm_diffusion_setting.inference_steps * lcm_diffusion_setting.strength
@@ -268,7 +271,7 @@ class LCMTextToImage:
268
  if is_openvino_pipe:
269
  print("Using OpenVINO")
270
  if reshape and not self.is_openvino_init:
271
- print("Reshape and compile,调整尺寸")
272
  self.pipeline.reshape(
273
  batch_size=-1,
274
  height=lcm_diffusion_setting.image_height,
 
38
  self.previous_use_openvino = False
39
  self.img_to_img_pipeline = None
40
  self.is_openvino_init = False
41
+ self.task_type = DiffusionTask.text_to_image
42
  self.torch_data_type = (
43
  torch.float32 if is_openvino_device() or DEVICE == "mps" else torch.float16
44
  )
 
94
  lcm_lora: LCMLora = lcm_diffusion_setting.lcm_lora
95
  ov_model_id = lcm_diffusion_setting.openvino_lcm_model_id
96
 
97
+ if lcm_diffusion_setting.diffusion_task == DiffusionTask.image_to_image.value:
98
+ w, h = lcm_diffusion_setting.init_image.size
99
+ newW = lcm_diffusion_setting.image_width
100
+ newH = int(h * newW / w)
101
+ img = lcm_diffusion_setting.init_image.resize((newW, newH))
102
+ print("新图",newH,newW, lcm_diffusion_setting.image_height)
103
+ lcm_diffusion_setting.init_image = resize_pil_image(
104
+ img,
105
+ lcm_diffusion_setting.image_width,
106
+ lcm_diffusion_setting.image_height,
107
+ )
108
+ print("图片大小",lcm_diffusion_setting.init_image)
109
 
110
  if (
111
  self.pipeline is None
 
117
  or self.previous_ov_model_id != ov_model_id
118
  or self.previous_safety_checker != lcm_diffusion_setting.use_safety_checker
119
  or self.previous_use_openvino != lcm_diffusion_setting.use_openvino
120
+ or self.previous_task_type != lcm_diffusion_setting.diffusion_task
121
  ):
122
  if self.use_openvino and is_openvino_device():
123
  if self.pipeline:
 
226
  self.previous_use_lcm_lora = use_lora
227
  self.previous_safety_checker = lcm_diffusion_setting.use_safety_checker
228
  self.previous_use_openvino = lcm_diffusion_setting.use_openvino
229
+ self.previous_task_type = lcm_diffusion_setting.diffusion_task
230
  if (
231
  lcm_diffusion_setting.diffusion_task
232
  == DiffusionTask.text_to_image.value
 
247
  reshape: bool = False,
248
  ) -> Any:
249
  guidance_scale = lcm_diffusion_setting.guidance_scale
 
 
 
 
 
 
250
  img_to_img_inference_steps = lcm_diffusion_setting.inference_steps
251
  check_step_value = int(
252
  lcm_diffusion_setting.inference_steps * lcm_diffusion_setting.strength
 
271
  if is_openvino_pipe:
272
  print("Using OpenVINO")
273
  if reshape and not self.is_openvino_init:
274
+ print("Reshape and compile")
275
  self.pipeline.reshape(
276
  batch_size=-1,
277
  height=lcm_diffusion_setting.image_height,
configs/lcm-models.txt CHANGED
@@ -1,5 +1,5 @@
 
1
  stabilityai/sd-turbo
2
  stabilityai/sdxl-turbo
3
- SimianLuo/LCM_Dreamshaper_v7
4
  latent-consistency/lcm-sdxl
5
  latent-consistency/lcm-ssd-1b
 
1
+ SimianLuo/LCM_Dreamshaper_v7
2
  stabilityai/sd-turbo
3
  stabilityai/sdxl-turbo
 
4
  latent-consistency/lcm-sdxl
5
  latent-consistency/lcm-ssd-1b
configs/openvino-lcm-models.txt CHANGED
@@ -1,4 +1,4 @@
 
1
  rupeshs/sd-turbo-openvino
2
  rupeshs/sdxl-turbo-openvino-int8
3
- rupeshs/LCM-dreamshaper-v7-openvino
4
  Disty0/LCM_SoteMix
 
1
+ rupeshs/LCM-dreamshaper-v7-openvino
2
  rupeshs/sd-turbo-openvino
3
  rupeshs/sdxl-turbo-openvino-int8
 
4
  Disty0/LCM_SoteMix
constants.py CHANGED
@@ -1,6 +1,6 @@
1
  from os import environ
2
 
3
- APP_VERSION = "v1.0.0 beta 22"
4
  LCM_DEFAULT_MODEL = "SimianLuo/LCM_Dreamshaper_v7"
5
  LCM_DEFAULT_MODEL_OPENVINO = "rupeshs/LCM-dreamshaper-v7-openvino"
6
  APP_NAME = "FastSD CPU"
 
1
  from os import environ
2
 
3
+ APP_VERSION = "v1.0.0 beta 23"
4
  LCM_DEFAULT_MODEL = "SimianLuo/LCM_Dreamshaper_v7"
5
  LCM_DEFAULT_MODEL_OPENVINO = "rupeshs/LCM-dreamshaper-v7-openvino"
6
  APP_NAME = "FastSD CPU"
context.py CHANGED
@@ -5,7 +5,6 @@ from backend.lcm_text_to_image import LCMTextToImage
5
  from time import perf_counter
6
  from backend.image_saver import ImageSaver
7
  from pprint import pprint
8
- from state import get_settings
9
 
10
 
11
  class Context:
@@ -23,8 +22,10 @@ class Context:
23
  reshape: bool = False,
24
  device: str = "cpu",
25
  ) -> Any:
26
- get_settings().save()
27
  tick = perf_counter()
 
 
 
28
  pprint(settings.lcm_diffusion_setting.model_dump())
29
  if not settings.lcm_diffusion_setting.lcm_lora:
30
  return None
 
5
  from time import perf_counter
6
  from backend.image_saver import ImageSaver
7
  from pprint import pprint
 
8
 
9
 
10
  class Context:
 
22
  reshape: bool = False,
23
  device: str = "cpu",
24
  ) -> Any:
 
25
  tick = perf_counter()
26
+ from state import get_settings
27
+
28
+ get_settings().save()
29
  pprint(settings.lcm_diffusion_setting.model_dump())
30
  if not settings.lcm_diffusion_setting.lcm_lora:
31
  return None
frontend/webui/generation_settings_ui.py CHANGED
@@ -137,4 +137,4 @@ def get_generation_settings_ui() -> None:
137
  tiny_auto_encoder_checkbox.change(
138
  on_change_tiny_auto_encoder_checkbox, tiny_auto_encoder_checkbox
139
  )
140
- offline_checkbox.change(on_change_tiny_auto_encoder_checkbox, offline_checkbox)
 
137
  tiny_auto_encoder_checkbox.change(
138
  on_change_tiny_auto_encoder_checkbox, tiny_auto_encoder_checkbox
139
  )
140
+ offline_checkbox.change(on_offline_checkbox, offline_checkbox)
frontend/webui/image_to_image_ui.py CHANGED
@@ -1,16 +1,15 @@
1
  from typing import Any
2
  import gradio as gr
3
  from backend.models.lcmdiffusion_setting import DiffusionTask
4
- from context import Context
5
  from models.interface_types import InterfaceType
6
  from frontend.utils import is_reshape_required
7
  from constants import DEVICE
8
- from state import get_settings
9
  from concurrent.futures import ThreadPoolExecutor
10
 
11
  app_settings = get_settings()
12
 
13
- context = Context(InterfaceType.WEBUI)
14
  previous_width = 0
15
  previous_height = 0
16
  previous_model_id = ""
@@ -24,22 +23,17 @@ def generate_image_to_image(
24
  strength,
25
  ) -> Any:
26
  global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
27
- w, h = init_image.size
28
- newW = app_settings.settings.lcm_diffusion_setting.image_width
29
- newH = int(h * newW / w)
30
- img = init_image.resize((newW, newH))
31
- print("新图",newH,newW,app_settings.settings.lcm_diffusion_setting.image_height)
32
  app_settings.settings.lcm_diffusion_setting.prompt = prompt
33
  app_settings.settings.lcm_diffusion_setting.negative_prompt = negative_prompt
34
- app_settings.settings.lcm_diffusion_setting.init_image = img
35
  app_settings.settings.lcm_diffusion_setting.strength = strength
36
-
37
  app_settings.settings.lcm_diffusion_setting.diffusion_task = (
38
  DiffusionTask.image_to_image.value
39
  )
40
  model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
41
  reshape = False
42
- # app_settings.settings.lcm_diffusion_setting.image_height=newH
43
  image_width = app_settings.settings.lcm_diffusion_setting.image_width
44
  image_height = app_settings.settings.lcm_diffusion_setting.image_height
45
  num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
@@ -55,7 +49,6 @@ def generate_image_to_image(
55
  num_images,
56
  )
57
 
58
-
59
  with ThreadPoolExecutor(max_workers=1) as executor:
60
  future = executor.submit(
61
  context.generate_text_to_image,
 
1
  from typing import Any
2
  import gradio as gr
3
  from backend.models.lcmdiffusion_setting import DiffusionTask
 
4
  from models.interface_types import InterfaceType
5
  from frontend.utils import is_reshape_required
6
  from constants import DEVICE
7
+ from state import get_settings, get_context
8
  from concurrent.futures import ThreadPoolExecutor
9
 
10
  app_settings = get_settings()
11
 
12
+ context = get_context(InterfaceType.WEBUI)
13
  previous_width = 0
14
  previous_height = 0
15
  previous_model_id = ""
 
23
  strength,
24
  ) -> Any:
25
  global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
26
+
 
 
 
 
27
  app_settings.settings.lcm_diffusion_setting.prompt = prompt
28
  app_settings.settings.lcm_diffusion_setting.negative_prompt = negative_prompt
29
+ app_settings.settings.lcm_diffusion_setting.init_image = init_image
30
  app_settings.settings.lcm_diffusion_setting.strength = strength
31
+
32
  app_settings.settings.lcm_diffusion_setting.diffusion_task = (
33
  DiffusionTask.image_to_image.value
34
  )
35
  model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
36
  reshape = False
 
37
  image_width = app_settings.settings.lcm_diffusion_setting.image_width
38
  image_height = app_settings.settings.lcm_diffusion_setting.image_height
39
  num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
 
49
  num_images,
50
  )
51
 
 
52
  with ThreadPoolExecutor(max_workers=1) as executor:
53
  future = executor.submit(
54
  context.generate_text_to_image,
frontend/webui/image_variations_ui.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ import gradio as gr
3
+ from backend.models.lcmdiffusion_setting import DiffusionTask
4
+ from context import Context
5
+ from models.interface_types import InterfaceType
6
+ from frontend.utils import is_reshape_required
7
+ from constants import DEVICE
8
+ from state import get_settings, get_context
9
+ from concurrent.futures import ThreadPoolExecutor
10
+
11
+ app_settings = get_settings()
12
+
13
+ context = get_context(InterfaceType.WEBUI)
14
+ previous_width = 0
15
+ previous_height = 0
16
+ previous_model_id = ""
17
+ previous_num_of_images = 0
18
+
19
+
20
+ def generate_image_variations(
21
+ init_image,
22
+ variation_strength,
23
+ ) -> Any:
24
+ global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
25
+
26
+ app_settings.settings.lcm_diffusion_setting.init_image = init_image
27
+ app_settings.settings.lcm_diffusion_setting.strength = variation_strength
28
+ app_settings.settings.lcm_diffusion_setting.prompt = ""
29
+ app_settings.settings.lcm_diffusion_setting.negative_prompt = ""
30
+
31
+ app_settings.settings.lcm_diffusion_setting.diffusion_task = (
32
+ DiffusionTask.image_to_image.value
33
+ )
34
+ model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
35
+ reshape = False
36
+ image_width = app_settings.settings.lcm_diffusion_setting.image_width
37
+ image_height = app_settings.settings.lcm_diffusion_setting.image_height
38
+ num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
39
+ if app_settings.settings.lcm_diffusion_setting.use_openvino:
40
+ reshape = is_reshape_required(
41
+ previous_width,
42
+ image_width,
43
+ previous_height,
44
+ image_height,
45
+ previous_model_id,
46
+ model_id,
47
+ previous_num_of_images,
48
+ num_images,
49
+ )
50
+
51
+ with ThreadPoolExecutor(max_workers=1) as executor:
52
+ future = executor.submit(
53
+ context.generate_text_to_image,
54
+ app_settings.settings,
55
+ reshape,
56
+ DEVICE,
57
+ )
58
+ images = future.result()
59
+
60
+ previous_width = image_width
61
+ previous_height = image_height
62
+ previous_model_id = model_id
63
+ previous_num_of_images = num_images
64
+ return images
65
+
66
+
67
+ def get_image_variations_ui() -> None:
68
+ with gr.Blocks():
69
+ with gr.Row():
70
+ with gr.Column():
71
+ input_image = gr.Image(label="Init image", type="pil")
72
+ with gr.Row():
73
+ generate_btn = gr.Button(
74
+ "Generate",
75
+ elem_id="generate_button",
76
+ scale=0,
77
+ )
78
+
79
+ variation_strength = gr.Slider(
80
+ 0.1,
81
+ 1,
82
+ value=0.4,
83
+ step=0.01,
84
+ label="Variations Strength",
85
+ )
86
+
87
+ input_params = [
88
+ input_image,
89
+ variation_strength,
90
+ ]
91
+
92
+ with gr.Column():
93
+ output = gr.Gallery(
94
+ label="Generated images",
95
+ show_label=True,
96
+ elem_id="gallery",
97
+ columns=2,
98
+ height=512,
99
+ )
100
+
101
+ generate_btn.click(
102
+ fn=generate_image_variations,
103
+ inputs=input_params,
104
+ outputs=output,
105
+ )
frontend/webui/models_ui.py CHANGED
@@ -71,7 +71,6 @@ def get_models_ui() -> None:
71
  value=get_valid_model_id(
72
  app_settings.openvino_lcm_models,
73
  app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id,
74
- LCM_DEFAULT_MODEL_OPENVINO
75
  ),
76
  interactive=True,
77
  )
 
71
  value=get_valid_model_id(
72
  app_settings.openvino_lcm_models,
73
  app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id,
 
74
  ),
75
  interactive=True,
76
  )
frontend/webui/text_to_image_ui.py CHANGED
@@ -1,16 +1,15 @@
1
  import gradio as gr
2
  from typing import Any
3
  from backend.models.lcmdiffusion_setting import DiffusionTask
4
- from context import Context
5
  from models.interface_types import InterfaceType
6
  from constants import DEVICE
7
- from state import get_settings
8
  from frontend.utils import is_reshape_required
9
  from concurrent.futures import ThreadPoolExecutor
10
  from pprint import pprint
11
 
12
  app_settings = get_settings()
13
- context = Context(InterfaceType.WEBUI)
14
  previous_width = 0
15
  previous_height = 0
16
  previous_model_id = ""
 
1
  import gradio as gr
2
  from typing import Any
3
  from backend.models.lcmdiffusion_setting import DiffusionTask
 
4
  from models.interface_types import InterfaceType
5
  from constants import DEVICE
6
+ from state import get_settings, get_context
7
  from frontend.utils import is_reshape_required
8
  from concurrent.futures import ThreadPoolExecutor
9
  from pprint import pprint
10
 
11
  app_settings = get_settings()
12
+ context = get_context(InterfaceType.WEBUI)
13
  previous_width = 0
14
  previous_height = 0
15
  previous_model_id = ""
frontend/webui/ui.py CHANGED
@@ -4,6 +4,7 @@ from frontend.webui.text_to_image_ui import get_text_to_image_ui
4
  from frontend.webui.image_to_image_ui import get_image_to_image_ui
5
  from frontend.webui.generation_settings_ui import get_generation_settings_ui
6
  from frontend.webui.models_ui import get_models_ui
 
7
  from paths import FastStableDiffusionPaths
8
  from state import get_settings
9
 
@@ -53,6 +54,8 @@ def get_web_ui() -> gr.Blocks:
53
  get_text_to_image_ui()
54
  with gr.TabItem("Image to Image"):
55
  get_image_to_image_ui()
 
 
56
  with gr.TabItem("Generation Settings"):
57
  get_generation_settings_ui()
58
  with gr.TabItem("Models"):
 
4
  from frontend.webui.image_to_image_ui import get_image_to_image_ui
5
  from frontend.webui.generation_settings_ui import get_generation_settings_ui
6
  from frontend.webui.models_ui import get_models_ui
7
+ from frontend.webui.image_variations_ui import get_image_variations_ui
8
  from paths import FastStableDiffusionPaths
9
  from state import get_settings
10
 
 
54
  get_text_to_image_ui()
55
  with gr.TabItem("Image to Image"):
56
  get_image_to_image_ui()
57
+ with gr.TabItem("Image Variations"):
58
+ get_image_variations_ui()
59
  with gr.TabItem("Generation Settings"):
60
  get_generation_settings_ui()
61
  with gr.TabItem("Models"):
image_ops.py CHANGED
@@ -6,9 +6,6 @@ def resize_pil_image(
6
  image_width,
7
  image_height,
8
  ):
9
- w, h = pil_image.size
10
- newW = image_width
11
- newH = int(h * newW / w)
12
  return pil_image.convert("RGB").resize(
13
  (
14
  image_width,
 
6
  image_width,
7
  image_height,
8
  ):
 
 
 
9
  return pil_image.convert("RGB").resize(
10
  (
11
  image_width,
paths.py CHANGED
@@ -11,8 +11,7 @@ def join_paths(
11
 
12
  def get_app_path() -> str:
13
  app_dir = os.path.dirname(__file__)
14
- work_dir = os.path.dirname(app_dir)+'/app'
15
- print("基础地址",work_dir)
16
  return work_dir
17
 
18
 
@@ -51,7 +50,6 @@ class FastStableDiffusionPaths:
51
  @staticmethod
52
  def get_models_config_path(model_config_file: str) -> str:
53
  configs_path = get_configs_path()
54
-
55
  models_path = join_paths(
56
  configs_path,
57
  model_config_file,
 
11
 
12
  def get_app_path() -> str:
13
  app_dir = os.path.dirname(__file__)
14
+ work_dir = os.path.dirname(app_dir)+'/app/'
 
15
  return work_dir
16
 
17
 
 
50
  @staticmethod
51
  def get_models_config_path(model_config_file: str) -> str:
52
  configs_path = get_configs_path()
 
53
  models_path = join_paths(
54
  configs_path,
55
  model_config_file,
state.py CHANGED
@@ -1,10 +1,14 @@
1
  from app_settings import AppSettings
2
  from typing import Optional
3
 
 
 
 
4
 
5
  class _AppState:
6
  _instance: Optional["_AppState"] = None
7
  settings: Optional[AppSettings] = None
 
8
 
9
 
10
  def get_state() -> _AppState:
@@ -19,3 +23,10 @@ def get_settings(skip_file: bool = False) -> AppSettings:
19
  state.settings = AppSettings()
20
  state.settings.load(skip_file)
21
  return state.settings
 
 
 
 
 
 
 
 
1
  from app_settings import AppSettings
2
  from typing import Optional
3
 
4
+ from context import Context
5
+ from models.interface_types import InterfaceType
6
+
7
 
8
  class _AppState:
9
  _instance: Optional["_AppState"] = None
10
  settings: Optional[AppSettings] = None
11
+ context: Optional[Context] = None
12
 
13
 
14
  def get_state() -> _AppState:
 
23
  state.settings = AppSettings()
24
  state.settings.load(skip_file)
25
  return state.settings
26
+
27
+
28
+ def get_context(interface_type: InterfaceType) -> Context:
29
+ state = get_state()
30
+ if state.context is None:
31
+ state.context = Context(interface_type)
32
+ return state.context