File size: 3,988 Bytes
d6c416b
 
 
 
dd34b85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6c416b
 
dd34b85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6c416b
 
 
dd34b85
 
d6c416b
 
 
 
 
 
 
 
 
 
 
 
 
dd34b85
d6c416b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd34b85
 
 
 
 
 
 
 
 
 
 
 
d6c416b
 
 
 
 
dd34b85
 
d6c416b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import requests
import json
from openai import OpenAI
from params import OPENAI_MODEL, OPENAI_API_KEY
from llama_cpp import Llama
from llama_cpp_agent import LlamaCppAgent
from llama_cpp_agent import MessagesFormatterType
from llama_cpp_agent.providers import LlamaCppPythonProvider

# Add this at the top of the file
local_model_base_url = "http://localhost:11434/v1"
anything_llm_workspace = "<input-workspace-name-here>"

def set_local_model_base_url(url):
    global local_model_base_url
    local_model_base_url = url

def set_anything_llm_workspace(workspace):
    global anything_llm_workspace
    anything_llm_workspace = workspace

# Create an instance of the OpenAI class for the local model
client = OpenAI(api_key="local-model", base_url=local_model_base_url)

# Initialize LlamaCpp model and agent
llama_model = Llama("/Users/anima/.cache/lm-studio/models/arcee-ai/Arcee-Spark-GGUF/Arcee-Spark-Q4_K_M.gguf", n_batch=1024, n_threads=24, n_gpu_layers=33, n_ctx=4098, verbose=False)
provider = LlamaCppPythonProvider(llama_model)
llama_agent = LlamaCppAgent(
    provider,
    system_prompt="You are a helpful assistant.",
    predefined_messages_formatter_type=MessagesFormatterType.MISTRAL,
    debug_output=True
)

# Configure provider settings
settings = provider.get_provider_default_settings()
settings.max_tokens = 2000
settings.stream = True

def send_to_chatgpt(msg_list):
    try:
        # Update the send_to_chatgpt function to use the dynamic base_url
        client = OpenAI(api_key="local-model", base_url=local_model_base_url)
        completion = client.chat.completions.create(
            model=OPENAI_MODEL,
            temperature=0.6,
            messages=msg_list
        )
        chatgpt_response = completion.choices[0].message.content
        chatgpt_usage = completion.usage
        return chatgpt_response, chatgpt_usage
    except Exception as e:
        print(f"Error in send_to_chatgpt: {str(e)}")
        return f"Error: {str(e)}", None

def send_to_anything_llm(msg_list):
    url = f'http://localhost:3001/api/v1/workspace/{anything_llm_workspace}/chat'
    headers = {
        'accept': 'application/json',
        'Authorization': 'Bearer 0MACR41-7804XQB-MGC1GS0-FGSKB44',
        'Content-Type': 'application/json'
    }
    message_content = " ".join(msg["content"] for msg in msg_list if "content" in msg)
    data = {
        "message": message_content,
        "mode": "chat"
    }
    data_json = json.dumps(data)
    try:
        response = requests.post(url, headers=headers, data=data_json)
        response.raise_for_status()  # Raise an exception for bad status codes
        response_data = response.json()
        chatgpt_response = response_data.get("textResponse")
        chatgpt_usage = response_data.get("usage", {})
        return chatgpt_response, chatgpt_usage
    except requests.RequestException as e:
        print(f"Error in send_to_anything_llm: {str(e)}")
        return f"Error: {str(e)}", None

def send_to_llamacpp(msg_list):
    try:
        # Convert the message list to the format expected by LlamaCppAgent
        formatted_messages = [{"role": msg["role"], "content": msg["content"]} for msg in msg_list]
        response = llama_agent(formatted_messages, settings=settings)
        chatgpt_response = response.message.content
        chatgpt_usage = {"prompt_tokens": response.usage.prompt_tokens, "completion_tokens": response.usage.completion_tokens, "total_tokens": response.usage.total_tokens}
        return chatgpt_response, chatgpt_usage
    except Exception as e:
        print(f"Error in send_to_llamacpp: {str(e)}")
        return f"Error: {str(e)}", None

def send_to_llm(provider, msg_list):
    if provider == "local-model":
        return send_to_chatgpt(msg_list)
    elif provider == "anything-llm":
        return send_to_anything_llm(msg_list)
    elif provider == "llamacpp":
        return send_to_llamacpp(msg_list)
    else:
        raise ValueError(f"Unknown provider: {provider}")