ashisdeveloper commited on
Commit
9ff61cd
1 Parent(s): 3a187ef

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -0
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig, AutoTokenizer, Qwen2TokenizerFast
2
+ from PIL import Image
3
+ import torch
4
+ import requests
5
+ from accelerate import init_empty_weights
6
+
7
+
8
+ USE_GPU = True
9
+
10
+ device = torch.device("cuda" if USE_GPU and torch.cuda.is_available() else "cpu")
11
+
12
+ processor = AutoProcessor.from_pretrained(
13
+ 'allenai/MolmoE-1B-0924',
14
+ trust_remote_code=True,
15
+ torch_dtype='auto',
16
+ device_map='auto' if USE_GPU else None,
17
+ cache_dir="./models/molmo1"
18
+ )
19
+ with init_empty_weights():
20
+ model = AutoModelForCausalLM.from_pretrained(
21
+ 'allenai/MolmoE-1B-0924',
22
+ trust_remote_code=True,
23
+ torch_dtype='auto',
24
+ device_map='auto' if USE_GPU else None,
25
+ cache_dir="./models/molmo1",
26
+ attn_implementation="eager"
27
+ )
28
+
29
+
30
+
31
+ if not USE_GPU:
32
+ model.to(device)
33
+
34
+ model.tie_weights()
35
+
36
+ image_path = "./public/image.jpg" # Replace with your image file path
37
+ image = Image.open(image_path)
38
+ image = image.convert("RGB")
39
+
40
+ inputs = processor.process(
41
+ images=[image],
42
+ text="Extract text"
43
+ )
44
+
45
+ inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
46
+ print('STARTED')
47
+ output = model.generate_from_batch(
48
+ inputs,
49
+ GenerationConfig(
50
+ max_new_tokens=2000,
51
+ # temperature=0.1,
52
+ # top_p=top_p,
53
+ stop_strings="<|endoftext|>"
54
+ ),
55
+ tokenizer=processor.tokenizer
56
+ )
57
+
58
+ # Only get generated tokens; decode them to text
59
+ generated_tokens = output[0, inputs['input_ids'].size(1):]
60
+ generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
61
+
62
+ print(generated_text)
63
+