shionhonda commited on
Commit
9947362
1 Parent(s): 89f2f05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -7,17 +7,18 @@ st.header("Reviewer #2 Bot")
7
  title = st.text_input("Title : ")
8
  submit = st.button("Submit")
9
 
 
10
  model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
11
  model = PeftModel.from_pretrained(model, "shionhonda/tiny-llama-reviewer2-1.1B-dpo-lora")
12
- tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
13
 
14
  prompt = tokenizer.apply_chat_template([
15
  {"role": "system", "content": "You are an experienced researcher and a reviewer of scientific papers. Given a title of the paper, write a review about it in one sentence."},
16
  {"role": "user", "content": title}
17
  ], tokenize=False, add_generation_prompt=True)
18
  inputs = tokenizer(prompt, return_tensors="pt")
19
- generate_ids = model.generate(inputs.input_ids, max_new_tokens=50, do_sample=True, temperature=0.5, top_k=50, top_p=0.95)
20
 
21
  if submit:
22
  st.subheader("Reviewer #2:")
23
- st.write(tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0])
 
 
7
  title = st.text_input("Title : ")
8
  submit = st.button("Submit")
9
 
10
+ tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
11
  model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
12
  model = PeftModel.from_pretrained(model, "shionhonda/tiny-llama-reviewer2-1.1B-dpo-lora")
 
13
 
14
  prompt = tokenizer.apply_chat_template([
15
  {"role": "system", "content": "You are an experienced researcher and a reviewer of scientific papers. Given a title of the paper, write a review about it in one sentence."},
16
  {"role": "user", "content": title}
17
  ], tokenize=False, add_generation_prompt=True)
18
  inputs = tokenizer(prompt, return_tensors="pt")
19
+ generate_ids = model.generate(inputs.input_ids, max_new_tokens=64, do_sample=True, temperature=0.5, top_k=50, top_p=0.95)
20
 
21
  if submit:
22
  st.subheader("Reviewer #2:")
23
+ output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
24
+ st.write(output.split("<|assistant|>")[-1])