Uhhy commited on
Commit
71df925
1 Parent(s): b212c94

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -1,7 +1,7 @@
1
  from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  from llama_cpp import Llama
4
- from concurrent.futures import ProcessPoolExecutor
5
  import uvicorn
6
  from dotenv import load_dotenv
7
  from difflib import SequenceMatcher
@@ -10,6 +10,7 @@ load_dotenv()
10
 
11
  app = FastAPI()
12
 
 
13
  models = [
14
  {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf"},
15
  {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf"},
@@ -17,10 +18,8 @@ models = [
17
  {"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf"},
18
  ]
19
 
20
- llms = []
21
- for model in models:
22
- llm = Llama.from_pretrained(repo_id=model['repo_id'], filename=model['filename'])
23
- llms.append(llm)
24
 
25
  class ChatRequest(BaseModel):
26
  message: str
@@ -48,6 +47,7 @@ def select_best_response(responses, request):
48
  return best_response
49
 
50
  def filter_by_coherence(responses, request):
 
51
  return responses
52
 
53
  def filter_by_similarity(responses):
@@ -62,7 +62,8 @@ def filter_by_similarity(responses):
62
 
63
  @app.post("/generate_chat")
64
  async def generate_chat(request: ChatRequest):
65
- with ProcessPoolExecutor() as executor:
 
66
  futures = [executor.submit(generate_chat_response, request, llm) for llm in llms]
67
  responses = [future.result() for future in futures]
68
 
@@ -70,6 +71,7 @@ async def generate_chat(request: ChatRequest):
70
  error_response = next(response for response in responses if "Error" in response)
71
  raise HTTPException(status_code=500, detail=error_response)
72
 
 
73
  best_response = select_best_response(responses, request)
74
  return {"response": best_response}
75
 
 
1
  from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  from llama_cpp import Llama
4
+ from concurrent.futures import ThreadPoolExecutor
5
  import uvicorn
6
  from dotenv import load_dotenv
7
  from difflib import SequenceMatcher
 
10
 
11
  app = FastAPI()
12
 
13
+ # Inicialización de los modelos
14
  models = [
15
  {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf"},
16
  {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf"},
 
18
  {"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf"},
19
  ]
20
 
21
+ # Cargar modelos en memoria
22
+ llms = [Llama.from_pretrained(repo_id=model['repo_id'], filename=model['filename']) for model in models]
 
 
23
 
24
  class ChatRequest(BaseModel):
25
  message: str
 
47
  return best_response
48
 
49
  def filter_by_coherence(responses, request):
50
+ # Puedes implementar un filtro más sofisticado si es necesario
51
  return responses
52
 
53
  def filter_by_similarity(responses):
 
62
 
63
  @app.post("/generate_chat")
64
  async def generate_chat(request: ChatRequest):
65
+ with ThreadPoolExecutor() as executor:
66
+ # Ejecutar las tareas en paralelo
67
  futures = [executor.submit(generate_chat_response, request, llm) for llm in llms]
68
  responses = [future.result() for future in futures]
69
 
 
71
  error_response = next(response for response in responses if "Error" in response)
72
  raise HTTPException(status_code=500, detail=error_response)
73
 
74
+ # Seleccionar la mejor respuesta
75
  best_response = select_best_response(responses, request)
76
  return {"response": best_response}
77