import streamlit as st import os from transformers import pipeline, set_seed from huggingface_hub import HfFolder import transformers import torch # Ensure the HF_TOKEN environment variable is set correctly HF_TOKEN = os.getenv('HF_TOKEN') if HF_TOKEN: HfFolder.save_token(HF_TOKEN) else: st.warning("HF_TOKEN is not set. Proceeding without a token.") # Use a valid model identifie #generator = pipeline("text-generation", model="openai-community/gpt2") generator = pipeline('text-generation', model='gpt2-large') st.title("Text Generation") st.write("Enter your text below.") text = st.text_area("Your input") st.write("Enter seed.") seed_input = st.text_area("Set seed") st.write("Enter max length.") maxLength = st.text_area("max length") # Convert seed input to integer try: seed = int(seed_input) maxLength = int(maxLength) except ValueError: seed = None maxLength = None set_seed(seed) if text and seed and maxLength: out = generator(text, max_length=maxLength, num_return_sequences=5) st.json(out) st.write(f"Reply: {out[0]['generated_text']}")