BeTaLabs commited on
Commit
c8ee815
1 Parent(s): 0ad2bdb

Update llm_handler.py

Browse files
Files changed (1) hide show
  1. llm_handler.py +123 -57
llm_handler.py CHANGED
@@ -1,59 +1,125 @@
1
- import requests
2
- import json
3
- from openai import OpenAI
4
- from params import load_params
5
-
6
- def get_client():
7
- params = load_params()
8
- if params['PROVIDER'] == 'local-model':
9
- return OpenAI(api_key="local-model", base_url=params['BASE_URL'])
10
- return None
11
-
12
- def send_to_chatgpt(msg_list):
13
- try:
14
- client = get_client()
15
- completion = client.chat.completions.create(
16
- model="phi3:latest",
17
- temperature=0.6,
18
- messages=msg_list
19
- )
20
- chatgpt_response = completion.choices[0].message.content
21
- chatgpt_usage = completion.usage
22
- return chatgpt_response, chatgpt_usage
23
- except Exception as e:
24
- print(f"Error in send_to_chatgpt: {str(e)}")
25
- return f"Error: {str(e)}", None
26
-
27
- def send_to_anything_llm(msg_list):
28
- params = load_params()
29
- url = f"{params['BASE_URL']}/api/v1/workspace/{params['WORKSPACE']}/chat"
30
- headers = {
31
- 'accept': 'application/json',
32
- 'Authorization': f"Bearer {params['API_KEY']}",
33
- 'Content-Type': 'application/json'
34
- }
35
- message_content = " ".join(msg["content"] for msg in msg_list if "content" in msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  data = {
37
- "message": message_content,
38
- "mode": "chat"
 
39
  }
40
- data_json = json.dumps(data)
41
- try:
42
- response = requests.post(url, headers=headers, data=data_json)
43
- response.raise_for_status()
44
- response_data = response.json()
45
- chatgpt_response = response_data.get("textResponse")
46
- chatgpt_usage = response_data.get("usage", {})
47
- return chatgpt_response, chatgpt_usage
48
- except requests.RequestException as e:
49
- print(f"Error in send_to_anything_llm: {str(e)}")
50
- return f"Error: {str(e)}", None
51
-
52
- def send_to_llm(msg_list):
53
- params = load_params()
54
- if params['PROVIDER'] == "local-model":
55
- return send_to_chatgpt(msg_list)
56
- elif params['PROVIDER'] == "anything-llm":
57
- return send_to_anything_llm(msg_list)
58
- else:
59
- raise ValueError(f"Unknown provider: {params['PROVIDER']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import necessary libraries and modules
2
+ import json # Used for encoding and decoding JSON data
3
+ import numpy as np # Provides support for large, multi-dimensional arrays and matrices
4
+ from wiki import search as search_wikipedia # Import the search function from the wiki module and rename it
5
+ from concurrent.futures import ThreadPoolExecutor # Import ThreadPoolExecutor for concurrent execution
6
+ from llm_handler import send_to_llm # Import the send_to_llm function from the llm_handler module
7
+ from params import OUTPUT_FILE_PATH, NUM_WORKERS, PROVIDER # Import constants from the params module
8
+
9
+ # Set the provider for the language model to "local-model"
10
+ PROVIDER = "local-model"
11
+
12
+ # Import system messages from the system_messages module
13
+ from system_messages import (
14
+ SYSTEM_MESSAGES_VODALUS,
15
+ )
16
+ from topics import TOPICS # Import topics from the topics module
17
+
18
+ # Set the system messages to those specified in SYSTEM_MESSAGES_VODALUS
19
+ SYSTEM_MESSAGES = SYSTEM_MESSAGES_VODALUS
20
+
21
+ # Define a long multi-line string as a prompt for generating data
22
+ PROMPT_1 = """
23
+ For the following SUBJECT_AREA, generate a question that covers a very narrow topic in the SUBJECT_AREA, with sufficient depth and breadth. The topic in the question should be important to the SUBJECT_AREA, with known-answers present. The generated question should be detailed, seek true nature of our universe from first principles, curiosity invoking, thought provoking, and also should be able to be answered by an intelligence like yourself. Make sure the question is sufficiently harder and multi-part, like a graduate level course question. Keep the following in mind:
24
+ You are an AI assistant with a dynamic learning and reasoning capability. Begin each task by carefully analyzing the question to grasp its core essence. Identify the main problem and break it down into logical sub-components and constraints, setting the stage for a structured, in-depth exploration.
25
+ - For each component, employ your evolving knowledge base to generate multiple hypotheses. Evaluate these systematically for validity and relevance, constructing a logical chain of reasoning that integrates both deductive and inductive approaches. Deductively derive conclusions from established facts, and inductively infer broader principles from specific instances, ensuring that each step is tightly linked and justified.
26
+ - Simultaneously, maintain a 'Tree of Thoughts' approach. Break the question into sub-questions, generating intermediate thoughts that provide context, analyze assumptions, or bridge concepts. Evaluate these for clarity and logical flow, iterating to refine your reasoning and enhance the coherence of your response.
27
+ - Incorporate a scientific mindset by treating each hypothesis as testable. Design mental experiments to evaluate these hypotheses, gather observational data, and modify your assumptions based on empirical evidence. Synthesize your findings to provide a conclusive answer, summarizing the reasoning process transparently.
28
+ - Throughout, adopt a conversational yet insightful tone. Weave in philosophical questioning, humorous insights, and practical examples to elucidate complex ideas in an engaging manner. Your goal is to provide clear, detailed explanations that make your reasoning process evident, fostering an understanding that resonates on both intellectual and emotional levels.
29
+ - Finally, remember that each task is a unique learning opportunity that contributes to your holistic knowledge framework. Synthesize insights from individual tasks into your broader understanding, continually refining your cognitive processes and response strategies. Your ultimate aim is to evolve continuously, optimizing how you integrate and apply new information for comprehensive, adaptive problem-solving.
30
+ - Never end the conversation with a 'send-off' or 'last-line' that offers nothing of real value to the user.
31
+ ####DO NOT EVER MENTION THE DATASET AND THE ACTY OF CRAFTING QUESTIONS OR RESPONSES WHILE GENERATING, YOU ARE NOT ALLOWED TO BREAK THE 4TH-WALL AND CONTAMINATE THE DATASET. DO NOT EVERY SAY ANY PHRASES SUCH AS AND/OR SIMILAR TO: 'Here's a question that covers a very narrow topic in the SUBJECT_AREA'####
32
+ """
33
+
34
+
35
+ # Define a dictionary to hold context information for message generation
36
+ msg_context = {"role": "system", "content": str(PROMPT_1)}
37
+
38
+ # Define a function to generate data based on a given topic and system messages
39
+ async def generate_data(
40
+ topic_selected,
41
+ system_message_generation,
42
+ system_message_selected,
43
+ output_file_path,
44
+ llm_provider
45
+ ):
46
+ # Fetch Wikipedia content for the selected topic
47
+ wikipedia_info = search_wikipedia(topic_selected)
48
+
49
+ # Format Wikipedia search results into a readable string
50
+ wikipedia_summary = "\n".join([f"Title: {info['title']}, Abstract: {info['abstract']}" for info in wikipedia_info])
51
+
52
+ # Append Wikipedia information to the system message generation prompt for LLM context
53
+ full_prompt_for_llm = f"{system_message_generation}\n\n---\nWikipedia Information to use in your response generation:\n{wikipedia_summary}"
54
+
55
+ # Create msg_context for LLM with Wikipedia info
56
+ msg_context = {"role": "system", "content": full_prompt_for_llm}
57
+
58
+ # Prepare message list for LLM to generate the question
59
+ msg_list = [msg_context, {"role": "user", "content": f"Generate a question based on the SUBJECT_AREA: {topic_selected}"}]
60
+
61
+ # Send to LLM for question generation
62
+ question, _ = send_to_llm(llm_provider, msg_list)
63
+
64
+ # Prepare message list for LLM to generate the answer
65
+ msg_list_answer = [
66
+ {"role": "system", "content": system_message_selected},
67
+ {"role": "user", "content": question}
68
+ ]
69
+
70
+ # Send to LLM for answer generation
71
+ answer, _ = send_to_llm(llm_provider, msg_list_answer)
72
+
73
+ # Prepare data for output (excluding usage information)
74
  data = {
75
+ "system": system_message_selected,
76
+ "instruction": question,
77
+ "response": answer
78
  }
79
+
80
+ # Write to output file
81
+ with open(output_file_path, "a") as output_file:
82
+ output_file.write(json.dumps(data) + "\n")
83
+
84
+ return data
85
+
86
+ # Define the main function to orchestrate the data generation process
87
+ def main():
88
+ nn = 0 # Counter for successful generations
89
+ failed = 0 # Counter for failed generations
90
+ with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
91
+ # Create a list of futures, one for each topic
92
+ futures = []
93
+ for _ in range(NUM_WORKERS):
94
+ topic_number = np.random.randint(0, len(TOPICS))
95
+ topic_selected = TOPICS[topic_number]
96
+ system_message_number = np.random.randint(0, len(SYSTEM_MESSAGES))
97
+ system_message_selected = SYSTEM_MESSAGES[system_message_number]
98
+ system_message_generation = PROMPT_1
99
+ futures.append(
100
+ executor.submit(
101
+ generate_data,
102
+ topic_selected,
103
+ system_message_generation,
104
+ system_message_selected,
105
+ OUTPUT_FILE_PATH,
106
+ PROVIDER
107
+ )
108
+ )
109
+
110
+ # Wait for all futures to complete
111
+ for future in futures:
112
+ data = future.result()
113
+ if data:
114
+ nn += 1
115
+ print(data)
116
+ print(
117
+ f"Generation {nn} Complete"
118
+ )
119
+ else:
120
+ failed += 1
121
+ print("=" * 132)
122
+
123
+
124
+ if __name__ == "__main__":
125
+ main()