pranjalpandey commited on
Commit
74123b1
1 Parent(s): 968c555

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -16,10 +16,10 @@ model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=HF_TOKEN)
16
  model = PeftModel.from_pretrained(model, "pranjalpandey/gemma-open-instruct")
17
  # model = AutoPeftModelForCausalLM.from_pretrained("pranjalpandey/llama-7b-finetuned-dialogue-summarizer")
18
  tokenizer = AutoTokenizer.from_pretrained("pranjalpandey/gemma-open-instruct", token=HF_TOKEN)
19
- model = model.to("cuda")
20
 
21
  def response(prompt):
22
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
23
  outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=100)
24
  return tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0].split("# Response:")[1].strip()
25
 
 
16
  model = PeftModel.from_pretrained(model, "pranjalpandey/gemma-open-instruct")
17
  # model = AutoPeftModelForCausalLM.from_pretrained("pranjalpandey/llama-7b-finetuned-dialogue-summarizer")
18
  tokenizer = AutoTokenizer.from_pretrained("pranjalpandey/gemma-open-instruct", token=HF_TOKEN)
19
+ # model = model.to("cuda")
20
 
21
  def response(prompt):
22
+ inputs = tokenizer(prompt, return_tensors="pt")
23
  outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=100)
24
  return tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0].split("# Response:")[1].strip()
25