srinuksv commited on
Commit
a0505ca
1 Parent(s): 84dca9c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -30
app.py CHANGED
@@ -1,9 +1,9 @@
1
- import os
2
- from dotenv import load_dotenv
3
  import gradio as gr
4
- from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
5
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
 
6
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
 
 
7
 
8
  # Load environment variables
9
  load_dotenv()
@@ -82,35 +82,34 @@ def handle_query(query):
82
  print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
83
  data_ingestion_from_directory()
84
 
85
- # Define the input and output components for the Gradio interface
86
- input_component = gr.Textbox(
87
- label="User:",
88
- placeholder="Type your message...",
89
- lines=2
90
- )
91
-
92
- output_component = gr.Output(
93
- label="Bot:",
94
- type="text",
95
- initial="Bot's response will appear here...",
96
- )
97
-
98
- # Function to handle queries
99
- def chatbot_handler(query):
100
- with output_component:
101
- response = handle_query(query)
102
- print(f"User: {query}\nBot: {response}\n")
103
- return response
104
-
105
- # Create the Gradio interface with chat-like settings
106
- interface = gr.Interface(
107
- fn=chatbot_handler,
108
- inputs=input_component,
109
- outputs=output_component,
110
- title="RedfernsTech Chatbot",
111
  theme="compact",
112
  live=True # Enables real-time updates
113
  )
114
 
115
  # Launch the Gradio interface
116
- interface.launch()
 
 
 
1
  import gradio as gr
 
2
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
3
+ from llama_index.core import ChatPromptTemplate, Settings, StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader
4
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
5
+ from dotenv import load_dotenv
6
+ import os
7
 
8
  # Load environment variables
9
  load_dotenv()
 
82
  print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
83
  data_ingestion_from_directory()
84
 
85
+ def predict(message, history):
86
+ messages = [{"role": "system", "content": "You are a helpful assistant."}]
87
+ for user_message, bot_message in history:
88
+ if user_message:
89
+ messages.append({"role": "user", "content": user_message})
90
+ if bot_message:
91
+ messages.append({"role": "assistant", "content": bot_message})
92
+ messages.append({"role": "user", "content": message})
93
+
94
+ response = ""
95
+ for chunk in Settings.llm.create_chat_completion(
96
+ stream=True,
97
+ messages=messages,
98
+ ):
99
+ part = chunk["choices"][0]["delta"].get("content", None)
100
+ if part:
101
+ response += part
102
+ yield response
103
+
104
+ # Create a Gradio chat interface
105
+ demo = gr.Interface(
106
+ fn=predict,
107
+ inputs=gr.Textbox(label="User Input", placeholder="Type your message here..."),
108
+ outputs=gr.Textbox(label="Bot Response", placeholder="Bot's response will appear here...", readonly=True),
109
+ title="RedFernsTech Chatbot",
 
110
  theme="compact",
111
  live=True # Enables real-time updates
112
  )
113
 
114
  # Launch the Gradio interface
115
+ demo.launch()