File size: 2,576 Bytes
c4cbaf2
 
 
 
 
 
 
 
 
 
6c2c213
c4cbaf2
 
962e235
 
c4cbaf2
 
 
 
 
 
3fd8421
c4cbaf2
962e235
c4cbaf2
 
24da135
c4cbaf2
 
24da135
c4cbaf2
 
 
 
 
24da135
 
 
 
 
 
 
 
 
 
 
 
 
c4cbaf2
 
 
 
 
24da135
6c2c213
24da135
c4cbaf2
 
 
 
 
 
 
 
5ed3d43
c4cbaf2
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gradio as gr
import torch
#from torch import autocast // only for GPU

from PIL import Image
import numpy as np
from io import BytesIO
import os
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')

#from diffusers import StableDiffusionPipeline
from diffusers import StableDiffusionImg2ImgPipeline

def empty_checker(images, **kwargs): return images, False

print("hello")

YOUR_TOKEN=MY_SECRET_TOKEN

device="cpu"

# img2img pipeline
img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("AkiKagura/mkgen-diffusion", duse_auth_token=YOUR_TOKEN)
img_pipe.safety_checker = empty_checker
img_pipe.to(device)

source_img = gr.Image(source="upload", type="filepath", label="init_img")
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[1], height="auto")

def resize(img):
  #baseheight = value
  img = Image.open(img)
  #hpercent = (baseheight/float(img.size[1]))
  #wsize = int((float(img.size[0])*float(hpercent)))
  #img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
  hsize = img.size[1]
  wsize = img.size[0]
  if 6*wsize <= 5*hsize:
      wsize = 512
      hsize = 768
  elif 4*wsize >= 5*hsize:
      wsize = 768
      hsize = 512
  else:
      wsize = 512
      hsize = 512
  img = img.resize((wsize,hsize), Image.Resampling.LANCZOS)
  return img, wsize, hsize


def infer(source_img, prompt, guide, steps, seed, strength): 
    generator = torch.Generator('cpu').manual_seed(seed)
    
    source_image, img_w, img_h = resize(source_img)
    source_image.save('source.png')
    images_list = img_pipe([prompt] * 1, init_image=source_image, strength=strength, guidance_scale=guide, num_inference_steps=steps, width=img_w, height=img_h)
    images = []
    
    for i, image in enumerate(images_list["images"]):
            images.append(image)    
    return images

print("done")

title="Marco Generation Img2img"
description="<p style='text-align: center;'>Upload your image and input 'mkmk woman' to get Marco image. <br />Warning: Slow process... about 10 min inference time.</p>" 

gr.Interface(fn=infer, inputs=[source_img,
    "text",
    gr.Slider(2, 15, value = 7, label = 'Guidence Scale'),
    gr.Slider(10, 50, value = 25, step = 1, label = 'Number of Iterations'),
    gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True),
    gr.Slider(label='Strength', minimum = 0, maximum = 1, step = .05, value = .75)],
    outputs=gallery,title=title,description=description, allow_flagging="manual", flagging_dir="flagged").queue(max_size=100).launch(enable_queue=True)