Severian commited on
Commit
3a423b8
1 Parent(s): af1c430

Update llm_handler.py

Browse files
Files changed (1) hide show
  1. llm_handler.py +4 -54
llm_handler.py CHANGED
@@ -1,35 +1,11 @@
1
- import requests
2
- import json
3
  from openai import OpenAI
4
  from params import OPENAI_MODEL, OPENAI_API_KEY
5
 
6
- # Add this at the top of the file
7
- local_model_base_url = "http://localhost:11434/v1"
8
- anything_llm_workspace = "<input-workspace-name-here>"
9
-
10
  # Create an instance of the OpenAI class
11
- client = OpenAI(api_key="dummy_key", base_url=local_model_base_url)
12
-
13
- def set_local_model_base_url(url):
14
- global local_model_base_url
15
- local_model_base_url = url
16
-
17
- def set_anything_llm_workspace(workspace):
18
- global anything_llm_workspace
19
- anything_llm_workspace = workspace
20
 
21
  def send_to_chatgpt(msg_list):
22
  try:
23
- # Modify this part to use llamanet conditionally
24
- if OPENAI_MODEL.startswith("https://"):
25
- # This is a llamanet model
26
- import llamanet
27
- llamanet.run()
28
- client = OpenAI()
29
- else:
30
- # Use the existing client for other cases
31
- client = OpenAI(api_key="dummy_key", base_url=local_model_base_url)
32
-
33
  completion = client.chat.completions.create(
34
  model=OPENAI_MODEL,
35
  messages=msg_list,
@@ -42,41 +18,15 @@ def send_to_chatgpt(msg_list):
42
  if chunk.choices[0].delta.content is not None:
43
  chatgpt_response += chunk.choices[0].delta.content
44
 
45
- # Note: Usage information might not be available with llamanet
46
  chatgpt_usage = None
47
  return chatgpt_response, chatgpt_usage
48
  except Exception as e:
49
  print(f"Error in send_to_chatgpt: {str(e)}")
50
  return f"Error: {str(e)}", None
51
 
52
- def send_to_anything_llm(msg_list):
53
- url = f'http://localhost:3001/api/v1/workspace/{anything_llm_workspace}/chat'
54
- headers = {
55
- 'accept': 'application/json',
56
- 'Authorization': 'Bearer 0MACR41-7804XQB-MGC1GS0-FGSKB44',
57
- 'Content-Type': 'application/json'
58
- }
59
- message_content = " ".join(msg["content"] for msg in msg_list if "content" in msg)
60
- data = {
61
- "message": message_content,
62
- "mode": "chat"
63
- }
64
- data_json = json.dumps(data)
65
- try:
66
- response = requests.post(url, headers=headers, data=data_json)
67
- response.raise_for_status() # Raise an exception for bad status codes
68
- response_data = response.json()
69
- chatgpt_response = response_data.get("textResponse")
70
- chatgpt_usage = response_data.get("usage", {})
71
- return chatgpt_response, chatgpt_usage
72
- except requests.RequestException as e:
73
- print(f"Error in send_to_anything_llm: {str(e)}")
74
- return f"Error: {str(e)}", None
75
-
76
  def send_to_llm(provider, msg_list):
77
- if provider == "local-model":
78
  return send_to_chatgpt(msg_list)
79
- elif provider == "anything-llm":
80
- return send_to_anything_llm(msg_list)
81
  else:
82
- raise ValueError(f"Unknown provider: {provider}")
 
 
 
1
  from openai import OpenAI
2
  from params import OPENAI_MODEL, OPENAI_API_KEY
3
 
 
 
 
 
4
  # Create an instance of the OpenAI class
5
+ client = OpenAI(api_key=OPENAI_API_KEY)
 
 
 
 
 
 
 
 
6
 
7
  def send_to_chatgpt(msg_list):
8
  try:
 
 
 
 
 
 
 
 
 
 
9
  completion = client.chat.completions.create(
10
  model=OPENAI_MODEL,
11
  messages=msg_list,
 
18
  if chunk.choices[0].delta.content is not None:
19
  chatgpt_response += chunk.choices[0].delta.content
20
 
21
+ # Note: Usage information might not be available with LlamaNet
22
  chatgpt_usage = None
23
  return chatgpt_response, chatgpt_usage
24
  except Exception as e:
25
  print(f"Error in send_to_chatgpt: {str(e)}")
26
  return f"Error: {str(e)}", None
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  def send_to_llm(provider, msg_list):
29
+ if provider == "llamanet":
30
  return send_to_chatgpt(msg_list)
 
 
31
  else:
32
+ raise ValueError(f"Unknown provider: {provider}")