Update llm_handler.py
Browse files- llm_handler.py +49 -2
llm_handler.py
CHANGED
@@ -2,12 +2,45 @@ import requests
|
|
2 |
import json
|
3 |
from openai import OpenAI
|
4 |
from params import OPENAI_MODEL, OPENAI_API_KEY
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# Create an instance of the OpenAI class for the local model
|
7 |
-
client = OpenAI(api_key="local-model", base_url=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
def send_to_chatgpt(msg_list):
|
10 |
try:
|
|
|
|
|
11 |
completion = client.chat.completions.create(
|
12 |
model=OPENAI_MODEL,
|
13 |
temperature=0.6,
|
@@ -21,7 +54,7 @@ def send_to_chatgpt(msg_list):
|
|
21 |
return f"Error: {str(e)}", None
|
22 |
|
23 |
def send_to_anything_llm(msg_list):
|
24 |
-
url = 'http://localhost:3001/api/v1/workspace/
|
25 |
headers = {
|
26 |
'accept': 'application/json',
|
27 |
'Authorization': 'Bearer 0MACR41-7804XQB-MGC1GS0-FGSKB44',
|
@@ -44,10 +77,24 @@ def send_to_anything_llm(msg_list):
|
|
44 |
print(f"Error in send_to_anything_llm: {str(e)}")
|
45 |
return f"Error: {str(e)}", None
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
def send_to_llm(provider, msg_list):
|
48 |
if provider == "local-model":
|
49 |
return send_to_chatgpt(msg_list)
|
50 |
elif provider == "anything-llm":
|
51 |
return send_to_anything_llm(msg_list)
|
|
|
|
|
52 |
else:
|
53 |
raise ValueError(f"Unknown provider: {provider}")
|
|
|
2 |
import json
|
3 |
from openai import OpenAI
|
4 |
from params import OPENAI_MODEL, OPENAI_API_KEY
|
5 |
+
from llama_cpp import Llama
|
6 |
+
from llama_cpp_agent import LlamaCppAgent
|
7 |
+
from llama_cpp_agent import MessagesFormatterType
|
8 |
+
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
9 |
+
|
10 |
+
# Add this at the top of the file
|
11 |
+
local_model_base_url = "http://localhost:11434/v1"
|
12 |
+
anything_llm_workspace = "<input-workspace-name-here>"
|
13 |
+
|
14 |
+
def set_local_model_base_url(url):
|
15 |
+
global local_model_base_url
|
16 |
+
local_model_base_url = url
|
17 |
+
|
18 |
+
def set_anything_llm_workspace(workspace):
|
19 |
+
global anything_llm_workspace
|
20 |
+
anything_llm_workspace = workspace
|
21 |
|
22 |
# Create an instance of the OpenAI class for the local model
|
23 |
+
client = OpenAI(api_key="local-model", base_url=local_model_base_url)
|
24 |
+
|
25 |
+
# Initialize LlamaCpp model and agent
|
26 |
+
llama_model = Llama("/Users/anima/.cache/lm-studio/models/arcee-ai/Arcee-Spark-GGUF/Arcee-Spark-Q4_K_M.gguf", n_batch=1024, n_threads=24, n_gpu_layers=33, n_ctx=4098, verbose=False)
|
27 |
+
provider = LlamaCppPythonProvider(llama_model)
|
28 |
+
llama_agent = LlamaCppAgent(
|
29 |
+
provider,
|
30 |
+
system_prompt="You are a helpful assistant.",
|
31 |
+
predefined_messages_formatter_type=MessagesFormatterType.MISTRAL,
|
32 |
+
debug_output=True
|
33 |
+
)
|
34 |
+
|
35 |
+
# Configure provider settings
|
36 |
+
settings = provider.get_provider_default_settings()
|
37 |
+
settings.max_tokens = 2000
|
38 |
+
settings.stream = True
|
39 |
|
40 |
def send_to_chatgpt(msg_list):
|
41 |
try:
|
42 |
+
# Update the send_to_chatgpt function to use the dynamic base_url
|
43 |
+
client = OpenAI(api_key="local-model", base_url=local_model_base_url)
|
44 |
completion = client.chat.completions.create(
|
45 |
model=OPENAI_MODEL,
|
46 |
temperature=0.6,
|
|
|
54 |
return f"Error: {str(e)}", None
|
55 |
|
56 |
def send_to_anything_llm(msg_list):
|
57 |
+
url = f'http://localhost:3001/api/v1/workspace/{anything_llm_workspace}/chat'
|
58 |
headers = {
|
59 |
'accept': 'application/json',
|
60 |
'Authorization': 'Bearer 0MACR41-7804XQB-MGC1GS0-FGSKB44',
|
|
|
77 |
print(f"Error in send_to_anything_llm: {str(e)}")
|
78 |
return f"Error: {str(e)}", None
|
79 |
|
80 |
+
def send_to_llamacpp(msg_list):
|
81 |
+
try:
|
82 |
+
# Convert the message list to the format expected by LlamaCppAgent
|
83 |
+
formatted_messages = [{"role": msg["role"], "content": msg["content"]} for msg in msg_list]
|
84 |
+
response = llama_agent(formatted_messages, settings=settings)
|
85 |
+
chatgpt_response = response.message.content
|
86 |
+
chatgpt_usage = {"prompt_tokens": response.usage.prompt_tokens, "completion_tokens": response.usage.completion_tokens, "total_tokens": response.usage.total_tokens}
|
87 |
+
return chatgpt_response, chatgpt_usage
|
88 |
+
except Exception as e:
|
89 |
+
print(f"Error in send_to_llamacpp: {str(e)}")
|
90 |
+
return f"Error: {str(e)}", None
|
91 |
+
|
92 |
def send_to_llm(provider, msg_list):
|
93 |
if provider == "local-model":
|
94 |
return send_to_chatgpt(msg_list)
|
95 |
elif provider == "anything-llm":
|
96 |
return send_to_anything_llm(msg_list)
|
97 |
+
elif provider == "llamacpp":
|
98 |
+
return send_to_llamacpp(msg_list)
|
99 |
else:
|
100 |
raise ValueError(f"Unknown provider: {provider}")
|