Yhhxhfh commited on
Commit
2dc339d
1 Parent(s): ca1fd14

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +125 -0
app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from llama_cpp import Llama
3
+ from concurrent.futures import ThreadPoolExecutor
4
+ import re
5
+ import os
6
+ import gradio as gr
7
+ from dotenv import load_dotenv
8
+ from fastapi import FastAPI, Request
9
+ from fastapi.responses import JSONResponse
10
+ import spaces
11
+ import urllib3
12
+
13
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
14
+
15
+ app = FastAPI()
16
+ load_dotenv()
17
+
18
+ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
19
+
20
+ global_data = {
21
+ 'model': None,
22
+ 'tokens': {
23
+ 'eos': 'eos_token',
24
+ 'pad': 'pad_token',
25
+ 'padding': 'padding_token',
26
+ 'unk': 'unk_token',
27
+ 'bos': 'bos_token',
28
+ 'sep': 'sep_token',
29
+ 'cls': 'cls_token',
30
+ 'mask': 'mask_token'
31
+ }
32
+ }
33
+
34
+ model_configs = [
35
+ {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf"},
36
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf"},
37
+ {"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf"},
38
+ {"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf"},
39
+ {"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf"},
40
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-q2_k.gguf"},
41
+ {"repo_id": "Ffftdtd5dtft/Qwen2-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-7b-instruct-q2_k.gguf"},
42
+ {"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf"},
43
+ {"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf"},
44
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-instruct-q2_k.gguf"},
45
+ {"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf"},
46
+ {"repo_id": "Ffftdtd5dtft/Phi-3.5-mini-instruct-Q2_K-GGUF", "filename": "phi-3.5-mini-instruct-q2_k.gguf"},
47
+ {"repo_id": "Ffftdtd5dtft/TinyLlama-1.1B-Chat-v1.0-IQ1_S-GGUF", "filename": "tinyllama-1.1b-chat-v1.0-iq1_s-imat.gguf"},
48
+ {"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Minitron-8B-Base-IQ1_S-GGUF", "filename": "mistral-nemo-minitron-8b-base-iq1_s-imat.gguf"},
49
+ {"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf"}
50
+ ]
51
+
52
+ class ModelManager:
53
+ def __init__(self):
54
+ self.model = None
55
+
56
+ def load_models(self):
57
+ models = []
58
+ for config in model_configs:
59
+ try:
60
+ model = Llama.from_pretrained(repo_id=config['repo_id'], filename=config['filename'], use_auth_token=HUGGINGFACE_TOKEN)
61
+ models.append(model)
62
+ except Exception as e:
63
+ pass
64
+ self.model = models
65
+
66
+ model_manager = ModelManager()
67
+ model_manager.load_models()
68
+ global_data['model'] = model_manager.model
69
+
70
+ class ChatRequest(BaseModel):
71
+ message: str
72
+
73
+ def normalize_input(input_text):
74
+ return input_text.strip()
75
+
76
+ def remove_duplicates(text):
77
+ text = re.sub(r'(Hello there, how are you\? \[/INST\]){2,}', 'Hello there, how are you? [/INST]', text)
78
+ text = re.sub(r'(How are you\? \[/INST\]){2,}', 'How are you? [/INST]', text)
79
+ text = text.replace('[/INST]', '')
80
+ lines = text.split('\n')
81
+ unique_lines = []
82
+ seen_lines = set()
83
+ for line in lines:
84
+ if line not in seen_lines:
85
+ unique_lines.append(line)
86
+ seen_lines.add(line)
87
+ return '\n'.join(unique_lines)
88
+
89
+ @spaces.GPU()
90
+ async def generate_combined_response(inputs):
91
+ combined_response = ""
92
+ for model in global_data['model']:
93
+ try:
94
+ response = model(inputs)
95
+ combined_response += remove_duplicates(response['choices'][0]['text']) + "\n"
96
+ except Exception as e:
97
+ pass
98
+ return combined_response
99
+
100
+ async def process_message(message):
101
+ inputs = normalize_input(message)
102
+ combined_response = await generate_combined_response(inputs)
103
+ formatted_response = ""
104
+ for line in combined_response.split("\n"):
105
+ formatted_response += f"{line}\n\n"
106
+ return formatted_response
107
+
108
+ @app.post("/generate_multimodel")
109
+ async def api_generate_multimodel(request: Request):
110
+ data = await request.json()
111
+ message = data["message"]
112
+ formatted_response = await process_message(message)
113
+ return JSONResponse({"response": formatted_response})
114
+
115
+ iface = gr.Interface(
116
+ fn=process_message,
117
+ inputs=gr.Textbox(lines=2, placeholder="Enter your message here..."),
118
+ outputs=gr.Markdown(),
119
+ title="Multi-Model LLM API",
120
+ description="Enter a message and get responses from a unified model.",
121
+ )
122
+
123
+ if __name__ == "__main__":
124
+ port = int(os.environ.get("PORT", 7860))
125
+ iface.launch(server_port=port)