HG_Llama3.2 / app.py
hgdgng's picture
Update app.py
66b1da0 verified
raw
history blame
No virus
1.1 kB
import requests
import torch
from PIL import Image
from transformers import pipeline
pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B")
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
# Load the processor
processor = AutoProcessor.from_pretrained(model_id)
# Define the function to generate text based on input prompt
def generate_text(prompt):
if llm_pipeline is None:
return "Error: Model not loaded."
result = llm_pipeline(prompt, max_length=100, num_return_sequences=1)
return result[0]['generated_text']
# Create the Gradio interface
interface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(lines=7, label="Input Prompt"),
outputs="text",
title="Large Language Model Text Generation",
description="Enter a prompt to generate text using a large language model."
)
print("Launching the Gradio interface...")
# Launch the interface
interface.launch()