File size: 8,468 Bytes
9ed4ad4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import os
import cohere as co
import random
import io
import os
import warnings
from IPython.display import display
from PIL import Image
from stability_sdk import client
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
from PIL import Image
import re
import matplotlib.pyplot as plt
import torch
from diffusers import StableDiffusionPipeline
import gradio as gr

STABILITY_HOST = 'grpc.stability.ai:443'#'<ADD >' # os.environ["STABILITY_HOST"] 
STABILITY_KEY = 'sk-LMwG6TPz8NHVPDHw4LAXdgLiSH37XB12r5wTGx9mD2b0oivS' # os.environ["STABILITY_KEY"] # IF NOT SECRET ADD STABILITY AI API KEY WITHIN ''
cohere_key = 'EZbtz80P7j0R4ZyoyTNz13CPuZdyWPKtVlBU7dzr' # os.environ["cohere_key"] # IF NOT SECRET ADD COHERE API KEY WITHIN ''


def generate_caption_keywords(promptz, model='command-xlarge-20221108', max_tokens=200, temperature=random.uniform(0.1, 2), k=0, p=0.75, frequency_penalty=0, presence_penalty=0, stop_sequences=[]):
 
    response = co.generate(
      model='command-xlarge-20221108',
      prompt=f'write prompts for interior designs which does not exist\n\nexterior design of house = Peaceful wooden mansion, unreal engine 5 tech demo, zillow interior, living room, cozy, Frank Lloyd Wright\n##\nexterior design of house = Double story suburban house,it is raining, night time , peaceful atmosphere, moody lighting , digital art , highly detailed , high contrast, beautiful lighting, award winning , trending on art station, photorealistic, 8k \n##\nexterior design of house =cute chubby house, 3 d render, minimalistic, octane, 1 0 0 mm, depth of field, diorama, blue background \n##\nexterior design of house =elevation of a modern luxurious villa in a rich urban area, photorealist, 4 k \n##\ninterior design  of  of kitchen room = Full modern kitchen, purple furniture, star wallpaper, digital Painting, ultradetailed, artstation, oil Painting, ultradetailed, artstation\n##\ninterior design of  of bathroom =bathroom design archi-viz biopunck grean schem, by Stanley Artgerm Lau, WLOP, Rossdraws, James Jean, Andrei Riabovitchevy, Marc Simonetti, and Sakimichan, trending on artstation\n##\ninterior design  of    bathroom = modern bathroom, gloomy and foggy atmosphere, octane render, artstation trending, horror scene, highly detailed \n##\n{promptz} =',
      max_tokens=25,
      temperature=0.6,
      k=0,
      p=1,
      frequency_penalty=0,
      presence_penalty=0,
      stop_sequences=["##"],
      return_likelihoods='NONE')

    def highlight_keywords(text):
        keywords = []
        text = text.lower()
        text = re.sub(r'[^a-z\s]', '', text) # remove punctuation
        text = re.sub(r'\b(the|and|of)\b', '', text) # remove stop words
        words = text.split()
        for word in words:
            if word not in keywords:
                keywords.append(word)
        return keywords

    caption = response.generations[0].text
    keywords = highlight_keywords(caption)
    keywords_string = ', '.join(keywords)

    return caption, keywords_string

# def g_search(image):
  

def img2img(design,is_prompt,is_GPU,custom_prompt,cfg_scale,steps):
  try:
    caption, keywords = generate_caption_keywords(design)
    prompt = design+keywords
  except:
    prompt = design

  if is_prompt == True:
    prompt = custom_prompt


  if is_GPU == True:
    model_id = "CompVis/stable-diffusion-v1-4"
    device = "cuda"


    pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16")
    pipe = pipe.to(device)

    # prompt = "a photo of an astronaut riding a horse on mars"
    img = pipe(prompt).images[0]  
    import matplotlib.pyplot as plt
    # img.save(f"{prompt}image")

  else:



    engine="stable-diffusion-v1-5"
    # Set up our connection to the API.
    stability_api = client.StabilityInference(
        key= 'sk-LMwG6TPz8NHVPDHw4LAXdgLiSH37XB12r5wTGx9mD2b0oivS', # os.environ['STABILITY_KEY'], # API Key reference.
        verbose=True, # Print debug messages.
        engine=engine, # Set the engine to use for generation. 
        # Available engines: stable-diffusion-v1 stable-diffusion-v1-5 stable-diffusion-512-v2-0  stable-inpainting-v1-0 stable-inpainting-512-v2-0
    )

    # Set up our initial generation parameters.
    answers = stability_api.generate(
        prompt=prompt,
        seed=random.seed(), # If a seed is provided, the resulting generated image will be deterministic.
                        # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
                        # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
        steps=steps, # Amount of inference steps performed on image generation. Defaults to 30. 
        cfg_scale=cfg_scale, # Influences how strongly your generation is guided to match your prompt.
                      # Setting this value higher increases the strength in which it tries to match your prompt.
                      # Defaults to 7.0 if not specified.
        width=512, # Generation width, defaults to 512 if not included.
        height=512, # Generation height, defaults to 512 if not included.
        samples=1, # Number of images to generate, defaults to 1 if not included.
        sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
                                                    # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
                                                    # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
    )

    for resp in answers:
        for artifact in resp.artifacts:
            if artifact.finish_reason == generation.FILTER:
                warnings.warn(
                    "Your request activated the API's safety filters and could not be processed."
                    "Please modify the prompt and try again.")
            if artifact.type == generation.ARTIFACT_IMAGE:
                img = Image.open(io.BytesIO(artifact.binary))
    
                img.save(f"new_image{prompt}{cfg_scale}{steps}{random.seed()}.jpg")
                print(prompt)
  return img ,prompt+'steps'+str(steps)+'strength'+str(cfg_scale)+str(random.seed())

gr.Interface(img2img,  [
                        gr.Dropdown(['exterior design of home',
                                     'interior design of living room', 
 
                                         'interior design of kitchen',
 
                                         'interior design of bathroom'
                                          ],label="Click here to select your design by Cohere command Langauge model",value = 'interior design'),
                                            
                                            gr.Checkbox(label="Check Custom design if you already have prompt refer  https://lexica.art/?q=interior+design for inpiration",value = False),
                                            gr.Checkbox(label="click to use GPU (not working in Huggingface space )",value = False),

                        gr.Textbox(label = ' Input custom Prompt Text'), 
                         
                        gr.Slider(2, 15, value = 7, label = 'Guidence Scale'),
                        gr.Slider(10, 50, value = 50, step = 1, label = 'Number of steps / Iterations')
                        ], 
                        [gr.Image(),gr.Textbox(label="Prompt for future reference",lines=2 )], 

             title = "" +'Baith ul Ahsan🏡(the best house),Create Aesthetic exteriors and interiors of house with power of COhere Language model and Stable Diffusion '+ "",
                                    description="Baith ul Ahsan(the best house) 🏡  is a web app that uses the power of artificial intelligence to create Aesthetic exteriors and interiors . With the Cohere language Command model, it can generate descriptions of exterior or interior of your home, and the Stable Diffusion algorithm creates relevant appealing images to bring your vision to your thoughts. Give Baith al ahsan a try and see how it can elevate your interior design.--if you want to scale / reaserch / build mobile app on this space konnect me   @[here](https://www.linkedin.com/in/sallu-mandya/)").launch(  debug = True)