File size: 1,478 Bytes
61e2e51
 
 
 
5c9ae71
cd12599
a938ce6
 
c132c65
cd12599
a938ce6
46ccdda
 
5c9ae71
 
88cd373
5c9ae71
 
c132c65
46ccdda
39c6d42
c8f0bd6
39c6d42
 
 
3f364c4
 
39c6d42
 
2a58849
39c6d42
9b69e5f
39c6d42
 
 
 
 
282a4ed
39c6d42
 
 
 
3f364c4
39c6d42
 
23f4a19
282a4ed
c687a70
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import sys



#clone https://github.com/openai/CLIP
os.system("git clone https://github.com/openai/CLIP")
os.system("git clone https://github.com/CompVis/taming-transformers.git")
# !pip install taming-transformers
#clone https://github.com/CompVis/taming-transformers.git
os.system("git clone https://github.com/dribnet/pixray")
os.mkdir("steps")
os.mkdir("models")
import gradio as gr
import torch
sys.path.append("pixray")
import pixray



# Define the main function
def generate(prompt, quality, aspect):
    torch.cuda.empty_cache()
    pixray.reset_settings()
    
    # use_pixeldraw = (style == 'pixel art')
    # use_clipdraw = (style == 'painting')
    pixray.add_settings(prompts=prompt,
                        aspect=aspect,
                        iterations=150,
                        quality=quality,
                        make_video=False)
  
    settings = pixray.apply_settings()
    pixray.do_init(settings)
    pixray.do_run(settings)

    return 'output.png'

# Create the UI
prompt = gr.inputs.Textbox(default="Underwater city", label="Text Prompt")
quality = gr.inputs.Radio(choices=['draft', 'normal', 'better'], label="Quality")
# style = gr.inputs.Radio(choices=['image', 'painting','pixel art'], label="Type")
aspect = gr.inputs.Radio(choices=['square', 'widescreen','portrait'], label="Size")


iface = gr.Interface(generate, inputs=[prompt, quality, aspect], outputs=['image'], enable_queue=True, live=False)
iface.launch(debug=False)