rovi27 commited on
Commit
addada5
1 Parent(s): 3720052

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -34,7 +34,8 @@ base_model = AutoModelForCausalLM.from_pretrained(base_model_name,return_dict=Tr
34
  #base_model = AutoModelForCausalLM.from_pretrained(base_model_name, return_dict=True, device_map = {"":0}, attn_implementation = attn_implementation,).eval()
35
 
36
  tokenizer = AutoTokenizer.from_pretrained(base_model_name, max_length = max_seq_length)
37
- ft_model = PeftModel.from_pretrained(base_model, sft_model1)
 
38
  model = ft_model.merge_and_unload()
39
  model.save_pretrained(".")
40
  #model.to('cuda')
@@ -69,7 +70,7 @@ stopping_criteria_list = StoppingCriteriaList([stopping_criteria])
69
 
70
  def generate_text(modelin, prompt, context, max_length=2100):
71
  print('Modelo es: '+modelin)
72
- if (modelin is not sft_model):
73
  sft_model = modelin
74
  ft_model = PeftModel.from_pretrained(base_model, sft_model)
75
  model = ft_model.merge_and_unload()
@@ -91,7 +92,11 @@ def generate_text(modelin, prompt, context, max_length=2100):
91
 
92
  def mostrar_respuesta(modelo, pregunta, contexto):
93
  try:
94
- res= generate_text(modelo, pregunta, contexto, max_length=500)
 
 
 
 
95
  return str(res)
96
  except Exception as e:
97
  return str(e)
 
34
  #base_model = AutoModelForCausalLM.from_pretrained(base_model_name, return_dict=True, device_map = {"":0}, attn_implementation = attn_implementation,).eval()
35
 
36
  tokenizer = AutoTokenizer.from_pretrained(base_model_name, max_length = max_seq_length)
37
+ sft_model = sft_model1
38
+ ft_model = PeftModel.from_pretrained(base_model, sft_model)
39
  model = ft_model.merge_and_unload()
40
  model.save_pretrained(".")
41
  #model.to('cuda')
 
70
 
71
  def generate_text(modelin, prompt, context, max_length=2100):
72
  print('Modelo es: '+modelin)
73
+ if (modelin != sft_model):
74
  sft_model = modelin
75
  ft_model = PeftModel.from_pretrained(base_model, sft_model)
76
  model = ft_model.merge_and_unload()
 
92
 
93
  def mostrar_respuesta(modelo, pregunta, contexto):
94
  try:
95
+ print('Modelo: '+str(modelo))
96
+ print('Pregunta: '+str(pregunta))
97
+ print('Contexto: '+str(contexto))
98
+ res= generate_text(modelo, pregunta, contexto, max_length=500)
99
+ print('Respuesta: '+str(contexto))
100
  return str(res)
101
  except Exception as e:
102
  return str(e)