Blane187 commited on
Commit
1ecc2a8
1 Parent(s): f7a3616

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -76
app.py CHANGED
@@ -1,88 +1,28 @@
1
  import gradio as gr
2
  import requests
3
- import io
4
- import random
5
- import os
6
- import time
7
  from PIL import Image
8
- from deep_translator import GoogleTranslator
9
- import json
10
-
11
-
12
 
13
- API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers"
14
  API_TOKEN = os.getenv("HF_READ_TOKEN")
15
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
16
- timeout = 100
17
 
18
- def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7):
19
- if prompt == "" or prompt == None:
20
- return None
21
 
22
- key = random.randint(0, 999)
23
-
24
- API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
25
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
26
-
27
- prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
28
- print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
29
 
30
- prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
31
- print(f'\033[1mGeneration {key}:\033[0m {prompt}')
32
 
33
- payload = {
34
- "inputs": prompt,
35
- "is_negative": is_negative,
36
- "steps": steps,
37
- "cfg_scale": cfg_scale,
38
- "seed": seed if seed != -1 else random.randint(1, 1000000000),
39
- "strength": strength
40
- }
41
-
42
- response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
43
- if response.status_code != 200:
44
- print(f"Error: Failed to get image. Response status: {response.status_code}")
45
- print(f"Response content: {response.text}")
46
- if response.status_code == 503:
47
- raise gr.Error(f"{response.status_code} : The model is being loaded")
48
- raise gr.Error(f"{response.status_code}")
49
 
50
- try:
51
- image_bytes = response.content
52
- image = Image.open(io.BytesIO(image_bytes))
53
- print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
54
- return image
55
- except Exception as e:
56
- print(f"Error when trying to open the image: {e}")
57
- return None
58
-
59
- css = """
60
- #app-container {
61
- max-width: 600px;
62
- margin-left: auto;
63
- margin-right: auto;
64
- }
65
- """
66
 
67
- with gr.Blocks(theme='Hev832/niceandsimple', css=css) as app:
68
- gr.HTML("<center><h1>Stable Diffusion 3 Medium</h1></center>")
69
- with gr.Column(elem_id="app-container"):
70
- with gr.Row():
71
- with gr.Column(elem_id="prompt-container"):
72
- with gr.Row():
73
- text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input")
74
- text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
75
- with gr.Row():
76
- with gr.Accordion("Advanced Settings", open=False):
77
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
78
- steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
79
- cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
80
- method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
81
- strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
82
- seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
83
- with gr.Row():
84
- image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
85
-
86
- text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength], outputs=image_output)
87
 
88
- app.launch(show_api=False, share=False)
 
1
  import gradio as gr
2
  import requests
 
 
 
 
3
  from PIL import Image
4
+ import io
 
 
 
5
 
6
+ API_URL = "https://api-inference.huggingface.co/models/Blane187/kana-arima-s1-ponyxl-lora-nochekaiser"
7
  API_TOKEN = os.getenv("HF_READ_TOKEN")
 
 
8
 
9
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
 
 
10
 
11
+ def query(inputs):
12
+ response = requests.post(API_URL, headers=headers, json={"inputs": inputs})
13
+ image_bytes = response.content
14
+ image = Image.open(io.BytesIO(image_bytes))
15
+ return image
 
 
16
 
17
+ with gr.Blocks() as demo:
18
+ gr.Markdown("## Generate an Image using Hugging Face Model")
19
 
20
+ with gr.Row():
21
+ prompt_input = gr.Textbox(label="Enter a prompt", placeholder="Astronaut riding a horse")
22
+ generate_btn = gr.Button("Generate Image")
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ output_image = gr.Image(label="Generated Image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ generate_btn.click(fn=query, inputs=prompt_input, outputs=output_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ demo.launch()