Severian commited on
Commit
2b19df3
1 Parent(s): 881e9c1

Update llm_handler.py

Browse files
Files changed (1) hide show
  1. llm_handler.py +6 -8
llm_handler.py CHANGED
@@ -24,15 +24,13 @@ settings.stream = True
24
 
25
  def send_to_llm(provider, msg_list):
26
  try:
27
- # Convert the msg_list to the correct format expected by the agent
28
- formatted_messages = [
29
- {"role": msg["role"], "content": msg["content"]}
30
- for msg in msg_list
31
- ]
32
-
33
- # Call get_chat_response with the formatted messages
34
- response = agent.get_chat_response(formatted_messages, llm_sampling_settings=settings)
35
  return response.content, None # We don't have usage info in this case
36
  except Exception as e:
37
  print(f"Error in send_to_llm: {str(e)}")
38
  return f"Error: {str(e)}", None
 
 
24
 
25
  def send_to_llm(provider, msg_list):
26
  try:
27
+ # Concatenate all messages into a single string
28
+ full_message = "\n".join([f"{msg['role']}: {msg['content']}" for msg in msg_list])
29
+
30
+ # Call get_chat_response with the full message string
31
+ response = agent.get_chat_response(full_message, llm_sampling_settings=settings)
 
 
 
32
  return response.content, None # We don't have usage info in this case
33
  except Exception as e:
34
  print(f"Error in send_to_llm: {str(e)}")
35
  return f"Error: {str(e)}", None
36
+