Shreyas094 commited on
Commit
f080583
1 Parent(s): 65a5885

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -223,23 +223,23 @@ def google_search(term, num_results=5, lang="en", timeout=5, safe="active", ssl_
223
 
224
  def ask_question(question, temperature, top_p, repetition_penalty, web_search):
225
  global conversation_history
226
-
227
  if not question:
228
  return "Please enter a question."
229
-
230
  if question in memory_database and not web_search:
231
  answer = memory_database[question]
232
  else:
233
  model = get_model(temperature, top_p, repetition_penalty)
234
  embed = get_embeddings()
235
-
236
  if web_search:
237
  search_results = google_search(question)
238
  context_str = "\n".join([result["text"] for result in search_results if result["text"]])
239
-
240
  # Convert web search results to Document format
241
  web_docs = [Document(page_content=result["text"], metadata={"source": result["link"]}) for result in search_results if result["text"]]
242
-
243
  # Check if the FAISS database exists
244
  if os.path.exists("faiss_database"):
245
  database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
@@ -247,7 +247,7 @@ def ask_question(question, temperature, top_p, repetition_penalty, web_search):
247
  else:
248
  database = FAISS.from_documents(web_docs, embed)
249
  database.save_local("faiss_database")
250
-
251
  prompt_template = """
252
  Answer the question based on the following web search results:
253
  Web Search Results:
@@ -264,32 +264,32 @@ def ask_question(question, temperature, top_p, repetition_penalty, web_search):
264
  database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
265
  else:
266
  return "No FAISS database found. Please upload documents to create the vector store."
267
-
268
  history_str = "\n".join([f"Q: {item['question']}\nA: {item['answer']}" for item in conversation_history])
269
-
270
  if is_related_to_history(question, conversation_history):
271
  context_str = "No additional context needed. Please refer to the conversation history."
272
  else:
273
  retriever = database.as_retriever()
274
  relevant_docs = retriever.get_relevant_documents(question)
275
  context_str = "\n".join([doc.page_content for doc in relevant_docs])
276
-
277
  prompt_val = ChatPromptTemplate.from_template(prompt)
278
  formatted_prompt = prompt_val.format(history=history_str, context=context_str, question=question)
279
-
280
  answer = generate_chunked_response(model, formatted_prompt)
281
  answer = re.split(r'Question:|Current Question:', answer)[-1].strip()
282
-
283
  # Remove any remaining prompt instructions from the answer
284
  answer_lines = answer.split('\n')
285
  answer = '\n'.join(line for line in answer_lines if not line.startswith('If') and not line.startswith('Provide'))
286
-
287
  if not web_search:
288
  memory_database[question] = answer
289
-
290
  if not web_search:
291
  conversation_history = manage_conversation_history(question, answer, conversation_history)
292
-
293
  return answer
294
 
295
  def update_vectors(files, use_recursive_splitter):
 
223
 
224
  def ask_question(question, temperature, top_p, repetition_penalty, web_search):
225
  global conversation_history
226
+
227
  if not question:
228
  return "Please enter a question."
229
+
230
  if question in memory_database and not web_search:
231
  answer = memory_database[question]
232
  else:
233
  model = get_model(temperature, top_p, repetition_penalty)
234
  embed = get_embeddings()
235
+
236
  if web_search:
237
  search_results = google_search(question)
238
  context_str = "\n".join([result["text"] for result in search_results if result["text"]])
239
+
240
  # Convert web search results to Document format
241
  web_docs = [Document(page_content=result["text"], metadata={"source": result["link"]}) for result in search_results if result["text"]]
242
+
243
  # Check if the FAISS database exists
244
  if os.path.exists("faiss_database"):
245
  database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
 
247
  else:
248
  database = FAISS.from_documents(web_docs, embed)
249
  database.save_local("faiss_database")
250
+
251
  prompt_template = """
252
  Answer the question based on the following web search results:
253
  Web Search Results:
 
264
  database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
265
  else:
266
  return "No FAISS database found. Please upload documents to create the vector store."
267
+
268
  history_str = "\n".join([f"Q: {item['question']}\nA: {item['answer']}" for item in conversation_history])
269
+
270
  if is_related_to_history(question, conversation_history):
271
  context_str = "No additional context needed. Please refer to the conversation history."
272
  else:
273
  retriever = database.as_retriever()
274
  relevant_docs = retriever.get_relevant_documents(question)
275
  context_str = "\n".join([doc.page_content for doc in relevant_docs])
276
+
277
  prompt_val = ChatPromptTemplate.from_template(prompt)
278
  formatted_prompt = prompt_val.format(history=history_str, context=context_str, question=question)
279
+
280
  answer = generate_chunked_response(model, formatted_prompt)
281
  answer = re.split(r'Question:|Current Question:', answer)[-1].strip()
282
+
283
  # Remove any remaining prompt instructions from the answer
284
  answer_lines = answer.split('\n')
285
  answer = '\n'.join(line for line in answer_lines if not line.startswith('If') and not line.startswith('Provide'))
286
+
287
  if not web_search:
288
  memory_database[question] = answer
289
+
290
  if not web_search:
291
  conversation_history = manage_conversation_history(question, answer, conversation_history)
292
+
293
  return answer
294
 
295
  def update_vectors(files, use_recursive_splitter):