asv7j commited on
Commit
f63bd36
1 Parent(s): 4db116a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -8,7 +8,7 @@ device = "cpu"
8
  access_token = os.getenv("access_token")
9
 
10
  tokenizer1 = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
11
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it", token=access_token)
12
  tokenizer3 = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
13
 
14
  llm1 = Llama.from_pretrained(
@@ -42,7 +42,7 @@ def modelResp1(prompt):
42
  {"role": "assistant", "content": "I am Sia, a small language model created by Sushma."},
43
  {"role": "user", "content": f"{prompt}"}
44
  ]
45
- text = tokenizer.apply_chat_template(
46
  messages,
47
  tokenize=False,
48
  add_generation_prompt=True
@@ -58,12 +58,11 @@ def modelResp1(prompt):
58
 
59
  def modelResp2(prompt):
60
  messages = [
61
- {"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."},
62
  {"role": "user", "content": "Who are you?"},
63
  {"role": "assistant", "content": "I am Sia, a small language model created by Sushma."},
64
  {"role": "user", "content": f"{prompt}"}
65
  ]
66
- text = tokenizer.apply_chat_template(
67
  messages,
68
  tokenize=False,
69
  add_generation_prompt=True
@@ -84,7 +83,7 @@ def modelResp3(prompt):
84
  {"role": "assistant", "content": "I am Sia, a small language model created by Sushma."},
85
  {"role": "user", "content": f"{prompt}"}
86
  ]
87
- text = tokenizer.apply_chat_template(
88
  messages,
89
  tokenize=False,
90
  add_generation_prompt=True
 
8
  access_token = os.getenv("access_token")
9
 
10
  tokenizer1 = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
11
+ tokenizer2 = AutoTokenizer.from_pretrained("google/gemma-2-2b-it", token=access_token)
12
  tokenizer3 = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
13
 
14
  llm1 = Llama.from_pretrained(
 
42
  {"role": "assistant", "content": "I am Sia, a small language model created by Sushma."},
43
  {"role": "user", "content": f"{prompt}"}
44
  ]
45
+ text = tokenizer1.apply_chat_template(
46
  messages,
47
  tokenize=False,
48
  add_generation_prompt=True
 
58
 
59
  def modelResp2(prompt):
60
  messages = [
 
61
  {"role": "user", "content": "Who are you?"},
62
  {"role": "assistant", "content": "I am Sia, a small language model created by Sushma."},
63
  {"role": "user", "content": f"{prompt}"}
64
  ]
65
+ text = tokenizer2.apply_chat_template(
66
  messages,
67
  tokenize=False,
68
  add_generation_prompt=True
 
83
  {"role": "assistant", "content": "I am Sia, a small language model created by Sushma."},
84
  {"role": "user", "content": f"{prompt}"}
85
  ]
86
+ text = tokenizer3.apply_chat_template(
87
  messages,
88
  tokenize=False,
89
  add_generation_prompt=True