from fastapi import FastAPI, HTTPException from pydantic import BaseModel from llama_cpp import Llama from concurrent.futures import ThreadPoolExecutor, as_completed from tqdm import tqdm import uvicorn from dotenv import load_dotenv from difflib import SequenceMatcher import re from spaces import GPU import httpx # Cargar variables de entorno load_dotenv() # Inicializar aplicación FastAPI app = FastAPI() # Diccionario global para almacenar los modelos global_data = { 'models': [] } # Configuración de los modelos (incluyendo los nuevos) model_configs = [ {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"}, {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"}, # Otros modelos omitidos por espacio {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-70B Instruct"}, {"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf", "name": "Codegemma 2B"}, {"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"} ] # Clase para gestionar modelos class ModelManager: def __init__(self): self.models = [] def load_model(self, model_config): print(f"Cargando modelo: {model_config['name']}...") return {"model": Llama.from_pretrained(repo_id=model_config['repo_id'], filename=model_config['filename']), "name": model_config['name']} @GPU(duration=0) def load_all_models(self): print("Iniciando carga de modelos...") with ThreadPoolExecutor(max_workers=len(model_configs)) as executor: futures = [executor.submit(self.load_model, config) for config in model_configs] models = [] for future in tqdm(as_completed(futures), total=len(model_configs), desc="Cargando modelos", unit="modelo"): try: model = future.result() models.append(model) print(f"Modelo cargado exitosamente: {model['name']}") except Exception as e: print(f"Error al cargar el modelo: {e}") print("Todos los modelos han sido cargados.") return models # Instanciar ModelManager y cargar modelos una sola vez model_manager = ModelManager() global_data['models'] = model_manager.load_all_models() # Modelo global para la solicitud de chat class ChatRequest(BaseModel): message: str top_k: int = 50 top_p: float = 0.95 temperature: float = 0.7 # Función para generar respuestas de chat def generate_chat_response(request, model_data): try: user_input = normalize_input(request.message) llm = model_data['model'] response = llm.create_chat_completion( messages=[{"role": "user", "content": user_input}], top_k=request.top_k, top_p=request.top_p, temperature=request.temperature ) reply = response['choices'][0]['message']['content'] return {"response": reply, "literal": user_input, "model_name": model_data['name']} except Exception as e: return {"response": f"Error: {str(e)}", "literal": user_input, "model_name": model_data['name']} def normalize_input(input_text): return input_text.strip() def remove_duplicates(text): text = re.sub(r'(Hello there, how are you\? \[/INST\]){2,}', 'Hello there, how are you? [/INST]', text) text = re.sub(r'(How are you\? \[/INST\]){2,}', 'How are you? [/INST]', text) text = text.replace('[/INST]', '') lines = text.split('\n') unique_lines = list(dict.fromkeys(lines)) return '\n'.join(unique_lines).strip() def remove_repetitive_responses(responses): seen = set() unique_responses = [] for response in responses: normalized_response = remove_duplicates(response['response']) if normalized_response not in seen: seen.add(normalized_response) unique_responses.append(response) return unique_responses # Manejo de errores en la inicialización de modelos (traza mencionada en el error) def handle_initialization_error(allow_token): try: client = httpx.Client() pid = 0 # Variable que simula el proceso actual assert client.allow(allow_token=allow_token, pid=pid) == httpx.codes.OK except AssertionError: raise HTTPException(status_code=500, detail="Error en la inicialización del cliente Spaces") # Ruta para generar chat en múltiples modelos @app.post("/chat/") async def chat(request: ChatRequest): try: # Simulación del error `AssertionError` durante la inicialización allow_token = "test_token" handle_initialization_error(allow_token) with ThreadPoolExecutor() as executor: futures = [executor.submit(generate_chat_response, request, model) for model in global_data['models']] responses = [future.result() for future in as_completed(futures)] unique_responses = remove_repetitive_responses(responses) return {"responses": unique_responses} except Exception as e: raise HTTPException(status_code=500, detail=f"Error procesando la solicitud: {str(e)}") # Uso de template `chat_template.default` chat_template = """ User: {message} Bot: {response} """ # Plantilla de respuesta de chat def render_chat_template(message, response): return chat_template.format(message=message, response=response) if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000)