Spaces:
Build error
Build error
jordigonzm
commited on
Commit
•
57e69ab
1
Parent(s):
26e9ade
Update app.py
Browse files
app.py
CHANGED
@@ -16,22 +16,29 @@ model = "gpt-3.5-turbo"
|
|
16 |
|
17 |
def predict(message, history):
|
18 |
messages = []
|
|
|
|
|
19 |
for entry in history:
|
20 |
-
if len(entry)
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
raise ValueError("Cada entrada en el historial debe contener exactamente dos mensajes.")
|
22 |
-
|
23 |
-
|
24 |
-
{"role": "user", "content": user_message},
|
25 |
-
{"role": "assistant", "content": assistant_message}
|
26 |
-
])
|
27 |
messages.append({"role": "user", "content": message})
|
28 |
|
|
|
29 |
response = llama.create_chat_completion_openai_v1(
|
30 |
model=model,
|
31 |
messages=messages,
|
32 |
stream=True
|
33 |
)
|
34 |
-
|
|
|
35 |
text = ""
|
36 |
for chunk in response:
|
37 |
content = chunk.choices[0].delta.content
|
|
|
16 |
|
17 |
def predict(message, history):
|
18 |
messages = []
|
19 |
+
|
20 |
+
# Procesar cada entrada en el historial si hay historial disponible.
|
21 |
for entry in history:
|
22 |
+
if len(entry) == 2:
|
23 |
+
user_message, assistant_message = entry
|
24 |
+
messages.extend([
|
25 |
+
{"role": "user", "content": user_message},
|
26 |
+
{"role": "assistant", "content": assistant_message}
|
27 |
+
])
|
28 |
+
else:
|
29 |
raise ValueError("Cada entrada en el historial debe contener exactamente dos mensajes.")
|
30 |
+
|
31 |
+
# Añadir el nuevo mensaje del usuario.
|
|
|
|
|
|
|
32 |
messages.append({"role": "user", "content": message})
|
33 |
|
34 |
+
# Crear la respuesta del modelo.
|
35 |
response = llama.create_chat_completion_openai_v1(
|
36 |
model=model,
|
37 |
messages=messages,
|
38 |
stream=True
|
39 |
)
|
40 |
+
|
41 |
+
# Recopilar y devolver la respuesta.
|
42 |
text = ""
|
43 |
for chunk in response:
|
44 |
content = chunk.choices[0].delta.content
|