Tonic commited on
Commit
dec5480
1 Parent(s): e10040f

add spaces

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -3,7 +3,7 @@ import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  import json
5
  from globe import title, description, customtool
6
-
7
 
8
  model_path = "nvidia/Nemotron-Mini-4B-Instruct"
9
  tokenizer = AutoTokenizer.from_pretrained(model_path)
@@ -32,6 +32,7 @@ The current date is 2023-06-01.
32
  else:
33
  return f"<extra_id_0>System\n{system_message}\n\n<extra_id_1>User\n{user_message}\n<extra_id_1>Assistant\n"
34
 
 
35
  def generate_response(message, history, system_message, max_tokens, temperature, top_p, use_pipeline=False, tool_definition=""):
36
  full_prompt = create_prompt(system_message, message, tool_definition)
37
 
 
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  import json
5
  from globe import title, description, customtool
6
+ import spaces
7
 
8
  model_path = "nvidia/Nemotron-Mini-4B-Instruct"
9
  tokenizer = AutoTokenizer.from_pretrained(model_path)
 
32
  else:
33
  return f"<extra_id_0>System\n{system_message}\n\n<extra_id_1>User\n{user_message}\n<extra_id_1>Assistant\n"
34
 
35
+ @spaces.GPU
36
  def generate_response(message, history, system_message, max_tokens, temperature, top_p, use_pipeline=False, tool_definition=""):
37
  full_prompt = create_prompt(system_message, message, tool_definition)
38