jordigonzm commited on
Commit
1f24ef3
1 Parent(s): 26fc4d9
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -10,6 +10,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
10
  MAX_MAX_NEW_TOKENS = 2048
11
  DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
 
13
 
14
  DESCRIPTION = """\
15
  # Llama-2 13B Chat
@@ -146,4 +147,3 @@ with gr.Blocks(css="style.css", fill_height=True) as demo:
146
 
147
  if __name__ == "__main__":
148
  demo.queue(max_size=20).launch()
149
-
 
10
  MAX_MAX_NEW_TOKENS = 2048
11
  DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
+ HF_TOKEN = os.environ.get("HUGGINGFACE_HUB_TOKEN", None)
14
 
15
  DESCRIPTION = """\
16
  # Llama-2 13B Chat
 
147
 
148
  if __name__ == "__main__":
149
  demo.queue(max_size=20).launch()