azrai99 commited on
Commit
4ee9429
1 Parent(s): 23567be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -24,9 +24,9 @@ def configure_quantization():
24
  @st.cache_resource
25
  def initialize_llm(hf_token):
26
  # quantization_config = configure_quantization()
27
- model_name = 'ericzzz/falcon-rw-1b-chat'
28
  return HuggingFaceLLM(
29
- model_name = model_name, #meta-llama/Meta-Llama-3-8B-Instruct meta-llama/Llama-2-7b-chat-hf #google/gemma-7b-it #HuggingFaceH4/zephyr-7b-beta #'GeneZC/MiniChat-2-3B'
30
  tokenizer_name = model_name,
31
  context_window=1900,
32
  # model_kwargs={"token": hf_token, "quantization_config": quantization_config},
 
24
  @st.cache_resource
25
  def initialize_llm(hf_token):
26
  # quantization_config = configure_quantization()
27
+ model_name = 'GeneZC/MiniChat-1.5-3B'
28
  return HuggingFaceLLM(
29
+ model_name = model_name, #meta-llama/Meta-Llama-3-8B-Instruct meta-llama/Llama-2-7b-chat-hf #google/gemma-7b-it #HuggingFaceH4/zephyr-7b-beta #'GeneZC/MiniChat-2-3B''ericzzz/falcon-rw-1b-chat'
30
  tokenizer_name = model_name,
31
  context_window=1900,
32
  # model_kwargs={"token": hf_token, "quantization_config": quantization_config},