Uhhy commited on
Commit
53eee33
1 Parent(s): 71df925

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -1,7 +1,7 @@
1
  from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  from llama_cpp import Llama
4
- from concurrent.futures import ThreadPoolExecutor
5
  import uvicorn
6
  from dotenv import load_dotenv
7
  from difflib import SequenceMatcher
@@ -10,7 +10,7 @@ load_dotenv()
10
 
11
  app = FastAPI()
12
 
13
- # Inicialización de los modelos
14
  models = [
15
  {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf"},
16
  {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf"},
@@ -47,7 +47,7 @@ def select_best_response(responses, request):
47
  return best_response
48
 
49
  def filter_by_coherence(responses, request):
50
- # Puedes implementar un filtro más sofisticado si es necesario
51
  return responses
52
 
53
  def filter_by_similarity(responses):
@@ -62,16 +62,18 @@ def filter_by_similarity(responses):
62
 
63
  @app.post("/generate_chat")
64
  async def generate_chat(request: ChatRequest):
65
- with ThreadPoolExecutor() as executor:
66
- # Ejecutar las tareas en paralelo
67
  futures = [executor.submit(generate_chat_response, request, llm) for llm in llms]
68
- responses = [future.result() for future in futures]
 
 
 
69
 
70
  if any("Error" in response for response in responses):
71
  error_response = next(response for response in responses if "Error" in response)
72
  raise HTTPException(status_code=500, detail=error_response)
73
 
74
- # Seleccionar la mejor respuesta
75
  best_response = select_best_response(responses, request)
76
  return {"response": best_response}
77
 
 
1
  from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  from llama_cpp import Llama
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
  import uvicorn
6
  from dotenv import load_dotenv
7
  from difflib import SequenceMatcher
 
10
 
11
  app = FastAPI()
12
 
13
+ # Configuración de los modelos
14
  models = [
15
  {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf"},
16
  {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf"},
 
47
  return best_response
48
 
49
  def filter_by_coherence(responses, request):
50
+ # Implementa aquí un filtro de coherencia si es necesario
51
  return responses
52
 
53
  def filter_by_similarity(responses):
 
62
 
63
  @app.post("/generate_chat")
64
  async def generate_chat(request: ChatRequest):
65
+ # Ejecutar en ThreadPoolExecutor sin límite explícito de workers
66
+ with ThreadPoolExecutor(max_workers=None) as executor:
67
  futures = [executor.submit(generate_chat_response, request, llm) for llm in llms]
68
+ responses = []
69
+ for future in as_completed(futures):
70
+ response = future.result()
71
+ responses.append(response)
72
 
73
  if any("Error" in response for response in responses):
74
  error_response = next(response for response in responses if "Error" in response)
75
  raise HTTPException(status_code=500, detail=error_response)
76
 
 
77
  best_response = select_best_response(responses, request)
78
  return {"response": best_response}
79