praysimanjuntak commited on
Commit
7c344d6
1 Parent(s): 15b0eda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -9,8 +9,18 @@ from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
9
  from llava.model.builder import load_pretrained_model
10
  from llava.mm_utils import tokenizer_image_token
11
  from transformers.generation.streamers import TextIteratorStreamer
 
12
 
13
- # import spaces
 
 
 
 
 
 
 
 
 
14
 
15
  device = "cuda:0"
16
  tokenizer, model, image_processor, context_len = load_pretrained_model(model_path="./checkpoints/llava-phi3-3.8b-lora", model_name="llava-phi3-3.8b-lora", model_base="microsoft/Phi-3-mini-128k-instruct", load_8bit=False, load_4bit=False, device=device)
@@ -24,7 +34,7 @@ def load_image(image_file):
24
  image = Image.open(image_file).convert('RGB')
25
  return image
26
 
27
- # @spaces.GPU
28
  def bot_streaming(message, history):
29
  print(message)
30
  if message["files"]:
 
9
  from llava.model.builder import load_pretrained_model
10
  from llava.mm_utils import tokenizer_image_token
11
  from transformers.generation.streamers import TextIteratorStreamer
12
+ import spaces
13
 
14
+ import subprocess
15
+
16
+ if torch.cuda.is_available():
17
+ device = "cuda"
18
+ print("Using GPU")
19
+ else:
20
+ device = "cpu"
21
+ print("Using CPU")
22
+
23
+ subprocess.run(["pip", "install", "."])
24
 
25
  device = "cuda:0"
26
  tokenizer, model, image_processor, context_len = load_pretrained_model(model_path="./checkpoints/llava-phi3-3.8b-lora", model_name="llava-phi3-3.8b-lora", model_base="microsoft/Phi-3-mini-128k-instruct", load_8bit=False, load_4bit=False, device=device)
 
34
  image = Image.open(image_file).convert('RGB')
35
  return image
36
 
37
+ @spaces.GPU
38
  def bot_streaming(message, history):
39
  print(message)
40
  if message["files"]: