arjunanand13 commited on
Commit
36aa4cf
1 Parent(s): e07e6fb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +194 -0
app.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from datasets import load_dataset
3
+
4
+ import os
5
+ import spaces
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
7
+ import torch
8
+ from threading import Thread
9
+ from sentence_transformers import SentenceTransformer
10
+ from datasets import load_dataset
11
+ import time
12
+
13
+ token = os.environ["HF_TOKEN"]
14
+ ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
15
+
16
+ dataset = load_dataset("not-lain/wikipedia",revision = "embedded")
17
+
18
+ data = dataset["train"]
19
+ data = data.add_faiss_index("embeddings") # column name that has the embeddings of the dataset
20
+
21
+
22
+ model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
23
+
24
+ # use quantization to lower GPU usage
25
+ bnb_config = BitsAndBytesConfig(
26
+ load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
27
+ )
28
+
29
+ tokenizer = AutoTokenizer.from_pretrained(model_id,token=token)
30
+ model = AutoModelForCausalLM.from_pretrained(
31
+ model_id,
32
+ torch_dtype=torch.bfloat16,
33
+ device_map="auto",
34
+ quantization_config=bnb_config,
35
+ token=token
36
+ )
37
+ terminators = [
38
+ tokenizer.eos_token_id,
39
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
40
+ ]
41
+
42
+ SYS_PROMPT = """You are an assistant for answering questions.
43
+ You are given the extracted parts of a long document and a question. Provide a conversational answer.
44
+ If you don't know the answer, just say "I do not know." Don't make up an answer."""
45
+
46
+
47
+
48
+ def search(query: str, k: int = 3 ):
49
+ """a function that embeds a new query and returns the most probable results"""
50
+ embedded_query = ST.encode(query) # embed new query
51
+ scores, retrieved_examples = data.get_nearest_examples( # retrieve results
52
+ "embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
53
+ k=k # get only top k results
54
+ )
55
+ return scores, retrieved_examples
56
+
57
+ def format_prompt(prompt,retrieved_documents,k):
58
+ """using the retrieved documents we will prompt the model to generate our responses"""
59
+ PROMPT = f"Question:{prompt}\nContext:"
60
+ for idx in range(k) :
61
+ PROMPT+= f"{retrieved_documents['text'][idx]}\n"
62
+ return PROMPT
63
+
64
+
65
+ @spaces.GPU(duration=150)
66
+ def talk(prompt,history):
67
+ k = 1 # number of retrieved documents
68
+ scores , retrieved_documents = search(prompt, k)
69
+ formatted_prompt = format_prompt(prompt,retrieved_documents,k)
70
+ formatted_prompt = formatted_prompt[:2000] # to avoid GPU OOM
71
+ messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
72
+ # tell the model to generate
73
+ input_ids = tokenizer.apply_chat_template(
74
+ messages,
75
+ add_generation_prompt=True,
76
+ return_tensors="pt"
77
+ ).to(model.device)
78
+ outputs = model.generate(
79
+ input_ids,
80
+ max_new_tokens=1024,
81
+ eos_token_id=terminators,
82
+ do_sample=True,
83
+ temperature=0.6,
84
+ top_p=0.9,
85
+ )
86
+ streamer = TextIteratorStreamer(
87
+ tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
88
+ )
89
+ generate_kwargs = dict(
90
+ input_ids= input_ids,
91
+ streamer=streamer,
92
+ max_new_tokens=1024,
93
+ do_sample=True,
94
+ top_p=0.95,
95
+ temperature=0.75,
96
+ eos_token_id=terminators,
97
+ )
98
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
99
+ t.start()
100
+
101
+ outputs = []
102
+ for text in streamer:
103
+ outputs.append(text)
104
+ print(outputs)
105
+ yield "".join(outputs)
106
+
107
+ # def talk(message, history):
108
+ # print("history, ", history)
109
+ # print("message ", message)
110
+ # print("searching dataset ...")
111
+ # retrieved_examples = search(message)
112
+ # print("preparing prompt ...")
113
+ # message, metadata = prepare_prompt(message, retrieved_examples)
114
+ # resources = HEADER
115
+ # print("preparing metadata ...")
116
+ # for title, url in metadata:
117
+ # resources += f"[{title}]({url}), "
118
+ # print("preparing chat template ...")
119
+ # chat = []
120
+ # for item in history:
121
+ # chat.append({"role": "user", "content": item[0]})
122
+ # cleaned_past = item[1].split(HEADER)[0]
123
+ # chat.append({"role": "assistant", "content": cleaned_past})
124
+ # chat.append({"role": "user", "content": message})
125
+ # messages = tokenizer.apply_chat_template(
126
+ # chat, tokenize=False, add_generation_prompt=True
127
+ # )
128
+ # print("chat template prepared, ", messages)
129
+ # print("tokenizing input ...")
130
+ # # Tokenize the messages string
131
+ # model_inputs = tokenizer([messages], return_tensors="pt").to(device)
132
+ # streamer = TextIteratorStreamer(
133
+ # tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
134
+ # )
135
+ # generate_kwargs = dict(
136
+ # model_inputs,
137
+ # streamer=streamer,
138
+ # max_new_tokens=1024,
139
+ # do_sample=True,
140
+ # top_p=0.95,
141
+ # top_k=1000,
142
+ # temperature=0.75,
143
+ # num_beams=1,
144
+ # )
145
+ # print("initializing thread ...")
146
+ # t = Thread(target=model.generate, kwargs=generate_kwargs)
147
+ # t.start()
148
+ # time.sleep(1)
149
+ # # Initialize an empty string to store the generated text
150
+ # partial_text = ""
151
+ # i = 0
152
+ # while t.is_alive():
153
+ # try:
154
+ # for new_text in streamer:
155
+ # if new_text is not None:
156
+ # partial_text += new_text
157
+ # yield partial_text
158
+ # except Exception as e:
159
+ # print(f"retry number {i}\n LOGS:\n")
160
+ # i+=1
161
+ # print(e, e.args)
162
+ # partial_text += resources
163
+ # yield partial_text
164
+
165
+
166
+ TITLE = "# RAG"
167
+
168
+ DESCRIPTION = """
169
+ A rag pipeline with a chatbot feature
170
+ Resources used to build this project :
171
+ * embedding model : https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1
172
+ * dataset : https://huggingface.co/datasets/not-lain/wikipedia
173
+ * faiss docs : https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.Dataset.add_faiss_index
174
+ * chatbot : https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
175
+ """
176
+
177
+
178
+ demo = gr.ChatInterface(
179
+ fn=talk,
180
+ chatbot=gr.Chatbot(
181
+ show_label=True,
182
+ show_share_button=True,
183
+ show_copy_button=True,
184
+ likeable=True,
185
+ layout="bubble",
186
+ bubble_full_width=False,
187
+ ),
188
+ theme="Soft",
189
+ examples=[["what's anarchy ? "]],
190
+ title=TITLE,
191
+ description=DESCRIPTION,
192
+
193
+ )
194
+ demo.launch(debug=True)