import streamlit as st import os import torch # Add this line to import the torch library import transformers HF_TOKEN = os.getenv('HF_TOKEN') from huggingface_hub import HfFolder # Set the token using HfFolder (this persists the token) HfFolder.save_token(HF_TOKEN) from transformers import pipeline # Load the model, specifying the use of GPU if available #device = 0 if torch.cuda.is_available() else -1 # use GPU if available #generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B', device=device) generator = pipeline("text-generation", model="gpt-3") st.title("text class") st.write("your text.") text = st.text_area("your input") if text: out = generator(text, do_sample=False) st.json(out) st.write(f"reply: {out}")