Spaces:
Running
Running
aakashch0179
commited on
Commit
•
9b75707
1
Parent(s):
6eeb50b
Update app.py
Browse files
app.py
CHANGED
@@ -430,36 +430,55 @@
|
|
430 |
|
431 |
# text to Image
|
432 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
433 |
import streamlit as st
|
434 |
-
import
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
450 |
-
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
451 |
-
return pipe
|
452 |
-
|
453 |
-
# Streamlit UI
|
454 |
-
st.title("Image Generation")
|
455 |
-
prompt = st.text_input("Enter your image prompt:")
|
456 |
-
|
457 |
-
if st.button("Generate Image"):
|
458 |
-
if not prompt:
|
459 |
-
st.warning("Please enter a prompt.")
|
460 |
-
else:
|
461 |
-
pipe = load_sdxl_pipeline() # Load the pipeline from cache
|
462 |
-
with torch.no_grad():
|
463 |
-
image = pipe(prompt).images[0]
|
464 |
-
|
465 |
-
st.image(image)
|
|
|
430 |
|
431 |
# text to Image
|
432 |
|
433 |
+
# import streamlit as st
|
434 |
+
# import torch
|
435 |
+
# from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
|
436 |
+
# from huggingface_hub import hf_hub_download
|
437 |
+
# from safetensors.torch import load_file
|
438 |
+
|
439 |
+
# # Model Path/Repo Information
|
440 |
+
# base = "stabilityai/stable-diffusion-xl-base-1.0"
|
441 |
+
# repo = "ByteDance/SDXL-Lightning"
|
442 |
+
# ckpt = "sdxl_lightning_4step_unet.safetensors"
|
443 |
+
|
444 |
+
# # Load model (Executed only once for efficiency)
|
445 |
+
# @st.cache_resource
|
446 |
+
# def load_sdxl_pipeline():
|
447 |
+
# unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
448 |
+
# unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
|
449 |
+
# pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
450 |
+
# pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
451 |
+
# return pipe
|
452 |
+
|
453 |
+
# # Streamlit UI
|
454 |
+
# st.title("Image Generation")
|
455 |
+
# prompt = st.text_input("Enter your image prompt:")
|
456 |
+
|
457 |
+
# if st.button("Generate Image"):
|
458 |
+
# if not prompt:
|
459 |
+
# st.warning("Please enter a prompt.")
|
460 |
+
# else:
|
461 |
+
# pipe = load_sdxl_pipeline() # Load the pipeline from cache
|
462 |
+
# with torch.no_grad():
|
463 |
+
# image = pipe(prompt).images[0]
|
464 |
+
|
465 |
+
# st.image(image)
|
466 |
+
|
467 |
+
|
468 |
+
# text generation
|
469 |
import streamlit as st
|
470 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
471 |
+
|
472 |
+
st.title("Text Generation with Bloom")
|
473 |
+
|
474 |
+
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom")
|
475 |
+
model = AutoModelForCausalLM.from_pretrained("bigscience/bloom")
|
476 |
+
|
477 |
+
user_input = st.text_area("Enter your prompt:", height=100)
|
478 |
+
|
479 |
+
if st.button('Generate Text'):
|
480 |
+
inputs = tokenizer(user_input, return_tensors="pt")
|
481 |
+
outputs = model.generate(**inputs, max_length=100) # Adjust max_length as needed
|
482 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
483 |
+
st.write("Generated Text:")
|
484 |
+
st.write(generated_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|