Tonic commited on
Commit
a535175
1 Parent(s): 8877ae0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -20,7 +20,7 @@ raven_pipeline = pipeline(
20
  @spaces.GPU(enable_queue=True)
21
  def process_text(input_text: str) -> str:
22
  prompt = f"User Query: {input_text}<human_end>"
23
- result = raven_pipeline(prompt, temperature=0.001, max_new_tokens=300, return_full_text=False, do_sample=False)[0]["generated_text"].replace("Call:", "").strip()
24
  # torch.cuda.empty_cache()
25
  return result
26
 
 
20
  @spaces.GPU(enable_queue=True)
21
  def process_text(input_text: str) -> str:
22
  prompt = f"User Query: {input_text}<human_end>"
23
+ result = raven_pipeline(prompt, temperature=0.001, max_new_tokens=300, return_full_text=False, do_sample=True)[0]["generated_text"]#.replace("Call:", "").strip()
24
  # torch.cuda.empty_cache()
25
  return result
26