rovi27 commited on
Commit
64ffac4
1 Parent(s): 726dca2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -10,8 +10,8 @@ import os
10
  #base_model_name = "unsloth/Mistral-7B-Instruct-v0.2"
11
  #sft_model = "somosnlp/ComeBien_gemma-2b-it-bnb-4bit"
12
  sft_model = "somosnlp/RecetasDeLaAbuela5k_gemma-2b-bnb-4bit"
13
- #base_model_name = "unsloth/gemma-2b-bnb-4bit"
14
- base_model_name = "unsloth/gemma-2b-it-bnb-4bit"
15
 
16
  max_seq_length=200
17
  base_model = AutoModelForCausalLM.from_pretrained(base_model_name,return_dict=True,device_map="auto", torch_dtype=torch.float16,)
@@ -55,7 +55,7 @@ def generate_text(prompt, context, max_length=2100):
55
  max_new_tokens=max_length
56
  generation_config = GenerationConfig(
57
  max_new_tokens=max_new_tokens,
58
- temperature=0.1, #top_p=0.9, top_k=50, # 45
59
  repetition_penalty=1.3, # 1.04, #1.1
60
  do_sample=True,
61
  )
 
10
  #base_model_name = "unsloth/Mistral-7B-Instruct-v0.2"
11
  #sft_model = "somosnlp/ComeBien_gemma-2b-it-bnb-4bit"
12
  sft_model = "somosnlp/RecetasDeLaAbuela5k_gemma-2b-bnb-4bit"
13
+ base_model_name = "unsloth/gemma-2b-bnb-4bit"
14
+ #base_model_name = "unsloth/gemma-2b-it-bnb-4bit"
15
 
16
  max_seq_length=200
17
  base_model = AutoModelForCausalLM.from_pretrained(base_model_name,return_dict=True,device_map="auto", torch_dtype=torch.float16,)
 
55
  max_new_tokens=max_length
56
  generation_config = GenerationConfig(
57
  max_new_tokens=max_new_tokens,
58
+ temperature=0.2, #top_p=0.9, top_k=50, # 45
59
  repetition_penalty=1.3, # 1.04, #1.1
60
  do_sample=True,
61
  )