Severian commited on
Commit
a767bbb
1 Parent(s): ee8a8cd

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +6 -2
main.py CHANGED
@@ -5,6 +5,7 @@ from wiki import search as search_wikipedia # Import the search function from t
5
  from concurrent.futures import ThreadPoolExecutor # Import ThreadPoolExecutor for concurrent execution
6
  from llm_handler import send_to_llm # Import the send_to_llm function from the llm_handler module
7
  from params import OUTPUT_FILE_PATH, NUM_WORKERS # Import constants from the params module
 
8
 
9
  # Import system messages from the system_messages module
10
  from system_messages import (
@@ -12,6 +13,9 @@ from system_messages import (
12
  )
13
  from topics import TOPICS # Import topics from the topics module
14
 
 
 
 
15
  # Set the system messages to those specified in SYSTEM_MESSAGES_VODALUS
16
  SYSTEM_MESSAGES = SYSTEM_MESSAGES_VODALUS
17
 
@@ -54,7 +58,7 @@ async def generate_data(
54
  msg_list = [msg_context, {"role": "user", "content": f"Generate a question based on the SUBJECT_AREA: {topic_selected}"}]
55
 
56
  # Send to LLM for question generation
57
- question, _ = send_to_llm(msg_list)
58
 
59
  # Prepare message list for LLM to generate the answer
60
  msg_list_answer = [
@@ -63,7 +67,7 @@ async def generate_data(
63
  ]
64
 
65
  # Send to LLM for answer generation
66
- answer, _ = send_to_llm(msg_list_answer)
67
 
68
  # Prepare data for output (excluding usage information)
69
  data = {
 
5
  from concurrent.futures import ThreadPoolExecutor # Import ThreadPoolExecutor for concurrent execution
6
  from llm_handler import send_to_llm # Import the send_to_llm function from the llm_handler module
7
  from params import OUTPUT_FILE_PATH, NUM_WORKERS # Import constants from the params module
8
+ import llamanet
9
 
10
  # Import system messages from the system_messages module
11
  from system_messages import (
 
13
  )
14
  from topics import TOPICS # Import topics from the topics module
15
 
16
+ # Initialize LlamaNet
17
+ llamanet.run()
18
+
19
  # Set the system messages to those specified in SYSTEM_MESSAGES_VODALUS
20
  SYSTEM_MESSAGES = SYSTEM_MESSAGES_VODALUS
21
 
 
58
  msg_list = [msg_context, {"role": "user", "content": f"Generate a question based on the SUBJECT_AREA: {topic_selected}"}]
59
 
60
  # Send to LLM for question generation
61
+ question, _ = send_to_llm("llamanet", msg_list)
62
 
63
  # Prepare message list for LLM to generate the answer
64
  msg_list_answer = [
 
67
  ]
68
 
69
  # Send to LLM for answer generation
70
+ answer, _ = send_to_llm("llamanet", msg_list_answer)
71
 
72
  # Prepare data for output (excluding usage information)
73
  data = {