Syed Junaid Iqbal commited on
Commit
13bb955
β€’
1 Parent(s): 850682a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -18
app.py CHANGED
@@ -17,7 +17,6 @@ from langchain.prompts import PromptTemplate
17
  from langchain import hub
18
  import os
19
  import glob
20
- import gc
21
 
22
 
23
  # TEXT LOADERS
@@ -103,15 +102,15 @@ def get_vectorstore(text_chunks, embeddings):
103
 
104
  def get_conversation_chain(vectorstore):
105
 
106
- model_path = "models/llama-2-13b-chat.Q4_K_S.gguf"
107
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
108
 
109
  llm = LlamaCpp(model_path= model_path,
110
  n_ctx=4000,
111
  max_tokens= 500,
112
- n_gpu_layers = 50,
113
  n_batch = 512,
114
- callback_manager = callback_manager
115
  verbose=True)
116
 
117
  memory = ConversationBufferMemory(
@@ -123,18 +122,10 @@ def get_conversation_chain(vectorstore):
123
  Try to summarise the content and keep the answer to the point.
124
  If you don't know the answer, just say that you don't know, don't try to make up an answer.
125
 
126
- Followe the template below
127
- Example:
128
- Question : how many paid leaves do i have ?
129
- Answer : The number of paid leaves varies depending on the type of leave, like privilege leave you're entitled to a maximum of 21 days in a calendar year. Other leaves might have different entitlements. thanks for asking!
130
- make sure to add "thanks for asking!" after every answer
131
 
132
- {context}
133
-
134
  Question: {question}
135
  Answer:
136
-
137
- Just answer to the point!
138
  """
139
 
140
  rag_prompt_custom = PromptTemplate.from_template(template)
@@ -189,11 +180,11 @@ def add_rounded_edges(image_path="./randstad_featuredimage.png", radius=30):
189
 
190
  def main():
191
  load_dotenv()
192
- gc.collect()
193
  st.set_page_config(page_title="Chat with multiple Files",
194
  page_icon=":books:")
195
  st.write(css, unsafe_allow_html=True)
196
 
 
197
  if "conversation" not in st.session_state:
198
  st.session_state.conversation = None
199
  if "chat_history" not in st.session_state:
@@ -201,20 +192,29 @@ def main():
201
 
202
  st.title("πŸ’¬ Randstad HR Chatbot")
203
  st.subheader("πŸš€ A HR powered by Generative AI")
 
 
 
204
  # user_question = st.text_input("Ask a question about your documents:")
205
 
206
  st.session_state.embeddings = FastEmbedEmbeddings( model_name= "BAAI/bge-small-en-v1.5",
207
  cache_dir="./embedding_model/")
208
 
209
- if len(glob.glob("./vectordb/*.sqlite3")) > 0:
210
 
211
  vectorstore = Chroma(persist_directory="./vectordb/", embedding_function=st.session_state.embeddings)
212
  st.session_state.conversation = get_conversation_chain(vectorstore)
213
  handle_userinput()
214
 
215
  with st.sidebar:
 
 
216
  add_rounded_edges()
217
 
 
 
 
 
218
  st.subheader("Your documents")
219
  docs = st.file_uploader(
220
  "Upload File (pdf,text,csv...) and click 'Process'", accept_multiple_files=True)
@@ -257,6 +257,4 @@ if __name__ == '__main__':
257
  print("Command executed successfully.")
258
  except subprocess.CalledProcessError as e:
259
  print(f"Error: {e}")
260
-
261
- main()
262
-
 
17
  from langchain import hub
18
  import os
19
  import glob
 
20
 
21
 
22
  # TEXT LOADERS
 
102
 
103
  def get_conversation_chain(vectorstore):
104
 
105
+ model_path = st.session_state.model
106
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
107
 
108
  llm = LlamaCpp(model_path= model_path,
109
  n_ctx=4000,
110
  max_tokens= 500,
111
+ fp = 50,
112
  n_batch = 512,
113
+ callback_manager = callback_manager,
114
  verbose=True)
115
 
116
  memory = ConversationBufferMemory(
 
122
  Try to summarise the content and keep the answer to the point.
123
  If you don't know the answer, just say that you don't know, don't try to make up an answer.
124
 
 
 
 
 
 
125
 
126
+ Context: {context}
 
127
  Question: {question}
128
  Answer:
 
 
129
  """
130
 
131
  rag_prompt_custom = PromptTemplate.from_template(template)
 
180
 
181
  def main():
182
  load_dotenv()
 
183
  st.set_page_config(page_title="Chat with multiple Files",
184
  page_icon=":books:")
185
  st.write(css, unsafe_allow_html=True)
186
 
187
+
188
  if "conversation" not in st.session_state:
189
  st.session_state.conversation = None
190
  if "chat_history" not in st.session_state:
 
192
 
193
  st.title("πŸ’¬ Randstad HR Chatbot")
194
  st.subheader("πŸš€ A HR powered by Generative AI")
195
+
196
+ # default model
197
+ st.session_state.model = "./models/mistral-7b-instruct-v0.2.Q5_K_M.gguf"
198
  # user_question = st.text_input("Ask a question about your documents:")
199
 
200
  st.session_state.embeddings = FastEmbedEmbeddings( model_name= "BAAI/bge-small-en-v1.5",
201
  cache_dir="./embedding_model/")
202
 
203
+ if len(glob.glob("./vectordb/*.sqlite3")) > 0 :
204
 
205
  vectorstore = Chroma(persist_directory="./vectordb/", embedding_function=st.session_state.embeddings)
206
  st.session_state.conversation = get_conversation_chain(vectorstore)
207
  handle_userinput()
208
 
209
  with st.sidebar:
210
+
211
+ # calling a
212
  add_rounded_edges()
213
 
214
+ st.subheader("Select Your Embedding Model Model")
215
+ st.session_state.model = st.selectbox( 'Models', tuple( glob.glob('./models/*.gguf') ) )
216
+
217
+
218
  st.subheader("Your documents")
219
  docs = st.file_uploader(
220
  "Upload File (pdf,text,csv...) and click 'Process'", accept_multiple_files=True)
 
257
  print("Command executed successfully.")
258
  except subprocess.CalledProcessError as e:
259
  print(f"Error: {e}")
260
+ main()