srinuksv commited on
Commit
162343b
1 Parent(s): e7469ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -5
app.py CHANGED
@@ -5,10 +5,15 @@ from llama_index.core import StorageContext, load_index_from_storage, VectorStor
5
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
6
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
  from sentence_transformers import SentenceTransformer
8
-
 
 
 
9
  # Load environment variables
10
  load_dotenv()
11
-
 
 
12
  # Configure the Llama index settings
13
  Settings.llm = HuggingFaceInferenceAPI(
14
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
@@ -98,6 +103,35 @@ def predict(message, history):
98
  response = handle_query(message)
99
  response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
100
  return response_with_logo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  # Custom CSS for styling
102
  css = '''
103
  .circle-logo {
@@ -109,13 +143,11 @@ css = '''
109
  margin-right: 10px;
110
  vertical-align: middle;
111
  }
112
-
113
  .circle-logo img {
114
  width: 100%;
115
  height: 100%;
116
  object-fit: cover;
117
  }
118
-
119
  .response-with-logo {
120
  display: flex;
121
  align-items: center;
@@ -127,7 +159,7 @@ footer {
127
  }
128
  label.svelte-1b6s6s {display: none}
129
  '''
130
- gr.ChatInterface(predict,
131
  css=css,
132
  description="FernAI",
133
  clear_btn=None, undo_btn=None, retry_btn=None,
 
5
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
6
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
  from sentence_transformers import SentenceTransformer
8
+ import firebase_admin
9
+ from firebase_admin import db, credentials
10
+ import datetime
11
+ import uuid
12
  # Load environment variables
13
  load_dotenv()
14
+ # authenticate to firebase
15
+ cred = credentials.Certificate("redfernstech-fd8fe-firebase-adminsdk-g9vcn-0537b4efd6.json")
16
+ firebase_admin.initialize_app(cred, {"databaseURL": "https://redfernstech-fd8fe-default-rtdb.firebaseio.com/"})
17
  # Configure the Llama index settings
18
  Settings.llm = HuggingFaceInferenceAPI(
19
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
 
103
  response = handle_query(message)
104
  response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
105
  return response_with_logo
106
+ def save_chat_message(session_id, message_data):
107
+ ref = db.reference(f'/chat_history/{session_id}') # Use the session ID to save chat data
108
+ ref.push().set(message_data)
109
+
110
+ # Define your Gradio chat interface function (replace with your actual logic)
111
+ def chat_interface(message, history):
112
+ try:
113
+ # Generate a unique session ID for this chat session
114
+ session_id = str(uuid.uuid4())
115
+
116
+ # Process the user message and generate a response (your chatbot logic)
117
+ response = handle_query(message)
118
+
119
+ # Capture the message data
120
+ message_data = {
121
+ "sender": "user",
122
+ "message": message,
123
+ "response": response,
124
+ "timestamp": datetime.datetime.now().isoformat() # Use a library like datetime
125
+ }
126
+
127
+ # Call the save function to store in Firebase with the generated session ID
128
+ save_chat_message(session_id, message_data)
129
+
130
+ # Return the bot response
131
+ return response
132
+ except Exception as e:
133
+ return str(e)
134
+
135
  # Custom CSS for styling
136
  css = '''
137
  .circle-logo {
 
143
  margin-right: 10px;
144
  vertical-align: middle;
145
  }
 
146
  .circle-logo img {
147
  width: 100%;
148
  height: 100%;
149
  object-fit: cover;
150
  }
 
151
  .response-with-logo {
152
  display: flex;
153
  align-items: center;
 
159
  }
160
  label.svelte-1b6s6s {display: none}
161
  '''
162
+ gr.ChatInterface(chat_interface,
163
  css=css,
164
  description="FernAI",
165
  clear_btn=None, undo_btn=None, retry_btn=None,