Severian commited on
Commit
d435537
1 Parent(s): 66be049

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -10,6 +10,7 @@ from main import generate_data, PROMPT_1
10
  from topics import TOPICS
11
  from system_messages import SYSTEM_MESSAGES_VODALUS
12
  import random
 
13
 
14
  ANNOTATION_CONFIG_FILE = "annotation_config.json"
15
  OUTPUT_FILE_PATH = "dataset.jsonl"
@@ -263,7 +264,7 @@ def save_dataset_config(system_messages, prompt_1, topics):
263
 
264
  return "Dataset configuration saved successfully"
265
 
266
- # Modify the chat_with_llm function to use Gradio's built-in async capabilities
267
  def chat_with_llm(message, history):
268
  try:
269
  msg_list = [{"role": "system", "content": "You are an AI assistant helping with dataset annotation and quality checking."}]
@@ -291,7 +292,7 @@ def update_chat_context(row_data, index, total, quality, high_quality_tags, low_
291
  """
292
  return [[None, context]] # Return as a list of message pairs
293
 
294
- # Add this function to handle dataset generation
295
  async def run_generate_dataset(num_workers, num_generations, output_file_path):
296
  generated_data = []
297
  for _ in range(num_generations):
 
10
  from topics import TOPICS
11
  from system_messages import SYSTEM_MESSAGES_VODALUS
12
  import random
13
+ import spaces
14
 
15
  ANNOTATION_CONFIG_FILE = "annotation_config.json"
16
  OUTPUT_FILE_PATH = "dataset.jsonl"
 
264
 
265
  return "Dataset configuration saved successfully"
266
 
267
+ @spaces.GPU(duration=120)
268
  def chat_with_llm(message, history):
269
  try:
270
  msg_list = [{"role": "system", "content": "You are an AI assistant helping with dataset annotation and quality checking."}]
 
292
  """
293
  return [[None, context]] # Return as a list of message pairs
294
 
295
+ @spaces.GPU(duration=180)
296
  async def run_generate_dataset(num_workers, num_generations, output_file_path):
297
  generated_data = []
298
  for _ in range(num_generations):