hgdgng commited on
Commit
e9aa98b
1 Parent(s): be51f78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -9
app.py CHANGED
@@ -1,17 +1,15 @@
1
  import requests
2
  import torch
3
  from PIL import Image
4
- from transformers import LlamaForConditionalGeneration, AutoProcessor
5
 
6
- # Define the model ID, replace with the correct ID if needed
7
- model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
8
 
9
- # Load the model in bfloat16 or float16 if needed
10
- model = LlamaForConditionalGeneration.from_pretrained(
11
- model_id,
12
- torch_dtype=torch.bfloat16, # Change to torch.float16 if hardware doesn't support bfloat16
13
- device_map="auto", # Automatically selects the appropriate device
14
- )
15
 
16
  # Load the processor
17
  processor = AutoProcessor.from_pretrained(model_id)
 
1
  import requests
2
  import torch
3
  from PIL import Image
4
+ from transformers import pipeline
5
 
6
+ pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B")
 
7
 
8
+ # Load model directly
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
12
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
 
13
 
14
  # Load the processor
15
  processor = AutoProcessor.from_pretrained(model_id)