praysimanjuntak commited on
Commit
b61da51
1 Parent(s): 7c344d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,6 +1,9 @@
1
  import time
2
  from threading import Thread
3
 
 
 
 
4
  import gradio as gr
5
  from io import BytesIO
6
  import requests
@@ -11,7 +14,6 @@ from llava.mm_utils import tokenizer_image_token
11
  from transformers.generation.streamers import TextIteratorStreamer
12
  import spaces
13
 
14
- import subprocess
15
 
16
  if torch.cuda.is_available():
17
  device = "cuda"
@@ -20,8 +22,6 @@ else:
20
  device = "cpu"
21
  print("Using CPU")
22
 
23
- subprocess.run(["pip", "install", "."])
24
-
25
  device = "cuda:0"
26
  tokenizer, model, image_processor, context_len = load_pretrained_model(model_path="./checkpoints/llava-phi3-3.8b-lora", model_name="llava-phi3-3.8b-lora", model_base="microsoft/Phi-3-mini-128k-instruct", load_8bit=False, load_4bit=False, device=device)
27
  model.to(device)
 
1
  import time
2
  from threading import Thread
3
 
4
+ import subprocess
5
+ subprocess.run(["pip", "install", "."])
6
+
7
  import gradio as gr
8
  from io import BytesIO
9
  import requests
 
14
  from transformers.generation.streamers import TextIteratorStreamer
15
  import spaces
16
 
 
17
 
18
  if torch.cuda.is_available():
19
  device = "cuda"
 
22
  device = "cpu"
23
  print("Using CPU")
24
 
 
 
25
  device = "cuda:0"
26
  tokenizer, model, image_processor, context_len = load_pretrained_model(model_path="./checkpoints/llava-phi3-3.8b-lora", model_name="llava-phi3-3.8b-lora", model_base="microsoft/Phi-3-mini-128k-instruct", load_8bit=False, load_4bit=False, device=device)
27
  model.to(device)