|
import os |
|
import sys |
|
|
|
|
|
|
|
|
|
os.system("git clone https://github.com/openai/CLIP") |
|
os.system("git clone https://github.com/CompVis/taming-transformers.git") |
|
|
|
|
|
os.system("git clone https://github.com/dribnet/pixray") |
|
os.mkdir("steps") |
|
os.mkdir("models") |
|
import gradio as gr |
|
import torch |
|
sys.path.append("pixray") |
|
import pixray |
|
|
|
|
|
|
|
|
|
def generate(prompt, quality, aspect): |
|
torch.cuda.empty_cache() |
|
pixray.reset_settings() |
|
|
|
|
|
|
|
pixray.add_settings(prompts=prompt, |
|
aspect=aspect, |
|
iterations=150, |
|
quality=quality, |
|
make_video=False) |
|
|
|
settings = pixray.apply_settings() |
|
pixray.do_init(settings) |
|
pixray.do_run(settings) |
|
|
|
return 'output.png' |
|
|
|
|
|
prompt = gr.inputs.Textbox(default="Underwater city", label="Text Prompt") |
|
quality = gr.inputs.Radio(choices=['draft', 'normal', 'better'], label="Quality") |
|
|
|
aspect = gr.inputs.Radio(choices=['square', 'widescreen','portrait'], label="Size") |
|
|
|
|
|
iface = gr.Interface(generate, inputs=[prompt, quality, aspect], outputs=['image'], live=False) |
|
iface.launch(debug=False, enable_queue=True) |