hairfastgan / app.py
multimodalart's picture
Update app.py
3cf0016 verified
raw
history blame
2.26 kB
import subprocess
import shutil
import os
import gradio as gr
import torchvision.transforms as T
import sys
import spaces
from PIL import Image
subprocess.run(["git", "clone", "https://github.com/AIRI-Institute/HairFastGAN"], check=True)
os.chdir("HairFastGAN")
subprocess.run(["git", "clone", "https://huggingface.co/AIRI-Institute/HairFastGAN"], check=True)
os.chdir("HairFastGAN")
subprocess.run(["git", "lfs", "pull"], check=True)
os.chdir("..")
shutil.move("HairFastGAN/pretrained_models", "pretrained_models")
shutil.move("HairFastGAN/input", "input")
shutil.rmtree("HairFastGAN")
items = os.listdir()
for item in items:
print(item)
shutil.move(item, os.path.join('..', item))
os.chdir("..")
shutil.rmtree("HairFastGAN")
from hair_swap import HairFast, get_parser
hair_fast = HairFast(get_parser().parse_args([]))
def resize(image_path):
img = Image.open("image_path")
square_size = 1024
left = (img.width - square_size) / 2
top = (img.height - square_size) / 2
right = (img.width + square_size) / 2
bottom = (img.height + square_size) / 2
img_cropped = img.crop((left, top, right, bottom))
return img_cropped
@spaces.GPU
def swap_hair(source, target_1, target_2, progress=gr.Progress(track_tqdm=True)):
target_2 = target_2 if target_2 else target_1
final_image = hair_fast.swap(source, target_1, target_2)
return T.functional.to_pil_image(final_image)
with gr.Blocks() as demo:
gr.Markdown("## HairFastGan")
with gr.Row():
source = gr.Image(label="Photo that you want to replace the hair", type="filepath")
target_1 = gr.Image(label="Reference hair you want to get", type="filepath")
target_2 = gr.Image(label="Reference color hair you want to get (optional)", type="filepath")
btn = gr.Button("Get the haircut")
output = gr.Image(label="Your result")
gr.Examples(examples=[("michael_cera-min.png", "leo_square-min.png", "pink_hair_celeb-min.png")])
source.upload(fn=resize, input=source, output=source)
target_1.upload(fn=resize, input=target_1, output=target_1)
target_2.upload(fn=resize, input=target_2, output=target_2)
btn.click(fn=swap_hair, inputs=[source, target_1, target_2], outputs=[output])
demo.launch()