hgdgng commited on
Commit
66b1da0
1 Parent(s): e9aa98b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -24
app.py CHANGED
@@ -14,28 +14,23 @@ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
14
  # Load the processor
15
  processor = AutoProcessor.from_pretrained(model_id)
16
 
17
- # Define an image URL
18
- url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
19
 
20
- # Fetch the image using requests
21
- image = Image.open(requests.get(url, stream=True).raw)
22
-
23
- # Define the messages in a format the model understands (adjust as needed)
24
- messages = [
25
- {"role": "user", "content": [
26
- {"type": "image"}, # This indicates that the input contains an image
27
- {"type": "text", "text": "Can you please describe this image in one sentence?"}
28
- ]}
29
- ]
30
-
31
- # Generate input text with the processor
32
- input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
33
-
34
- # Process the image and input text, prepare them for the model
35
- inputs = processor(image, input_text, return_tensors="pt").to(model.device)
36
-
37
- # Run the model to generate a response
38
- output = model.generate(**inputs, max_new_tokens=70)
39
-
40
- # Decode and print the output
41
- print(processor.decode(output[0][inputs["input_ids"].shape[-1]:]))
 
14
  # Load the processor
15
  processor = AutoProcessor.from_pretrained(model_id)
16
 
 
 
17
 
18
+ # Define the function to generate text based on input prompt
19
+ def generate_text(prompt):
20
+ if llm_pipeline is None:
21
+ return "Error: Model not loaded."
22
+ result = llm_pipeline(prompt, max_length=100, num_return_sequences=1)
23
+ return result[0]['generated_text']
24
+
25
+ # Create the Gradio interface
26
+ interface = gr.Interface(
27
+ fn=generate_text,
28
+ inputs=gr.Textbox(lines=7, label="Input Prompt"),
29
+ outputs="text",
30
+ title="Large Language Model Text Generation",
31
+ description="Enter a prompt to generate text using a large language model."
32
+ )
33
+
34
+ print("Launching the Gradio interface...")
35
+ # Launch the interface
36
+ interface.launch()