SRUNU / app.py
srinuksv's picture
Update app.py
7adc402 verified
raw
history blame
No virus
6.36 kB
from dotenv import load_dotenv
import gradio as gr
import os
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from sentence_transformers import SentenceTransformer
# Load environment variables
load_dotenv()
# Configure the Llama index settings
Settings.llm = HuggingFaceInferenceAPI(
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
context_window=3000,
token=os.getenv("HF_TOKEN"),
max_new_tokens=512,
generate_kwargs={"temperature": 0.1},
)
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-small-en-v1.5"
)
# Define the directory for persistent storage and data
PERSIST_DIR = "db"
PDF_DIRECTORY = 'data' # Changed to the directory containing PDFs
# Ensure directories exist
os.makedirs(PDF_DIRECTORY, exist_ok=True)
os.makedirs(PERSIST_DIR, exist_ok=True)
# Variable to store current chat conversation
current_chat_history = []
def data_ingestion_from_directory():
# Use SimpleDirectoryReader on the directory containing the PDF files
documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
storage_context = StorageContext.from_defaults()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=PERSIST_DIR)
def handle_query(query):
chat_text_qa_msgs = [
(
"user",
"""
Your name is FernAI. Your aim is to provide the best service and information about RedFerns Tech company. You should provide answers based on the conversation flow and your final aim is attract the user to connect our company services. And summarize the answer try to give the answer with less words and effectively.
{context_str}
Question:
{query_str}
"""
)
]
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
# Load index from storage
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# Use chat history to enhance response
context_str = ""
for past_query, response in reversed(current_chat_history):
if past_query.strip():
context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
answer = query_engine.query(query)
if hasattr(answer, 'response'):
response = answer.response
elif isinstance(answer, dict) and 'response' in answer:
response = answer['response']
else:
response = "Sorry, I couldn't find an answer."
# Update current chat history
current_chat_history.append((query, response))
return response
# Example usage: Process PDF ingestion from directory
print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
data_ingestion_from_directory()
# Define the function to handle predictions
"""def predict(message,history):
response = handle_query(message)
return response"""
def predict(message, history):
logo_html = '''
<div class="circle-logo">
<img src="1.jpeg" alt="FernAi">
</div>
'''
response = handle_query(message)
response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
return response_with_logo
# Custom CSS for styling
css = '''
.gradio-container {
display: flex;
flex-direction: column;
width: 100%;
max-width: 450px;
margin: 0 auto;
padding: 20px;
border: 1px solid #ddd;
border-radius: 10px;
background-color: #fff;
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
height: 100%;
max-height: 600px;
}
.gradio-logo {
text-align: center;
margin-bottom: 20px;
}
.circle-logo {
display: inline-block;
width: 40px;
height: 40px;
border-radius: 50%;
overflow: hidden;
margin-right: 10px;
vertical-align: middle;
}
.circle-logo img {
width: 100%;
height: 100%;
object-fit: cover;
}
.response-with-logo {
display: flex;
align-items: center;
margin-bottom: 10px;
}
.response-text {
display: inline-block;
vertical-align: middle;
font-size: 16px;
background-color: #fff;
border: 1px solid #ced4da;
border-radius: 15px 15px 15px 0;
padding: 10px;
max-width: 80%;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.gradio-chat-history {
flex: 1;
overflow-y: auto;
padding: 15px;
border-bottom: 1px solid #ddd;
background-color: #f9f9f9;
border-radius: 5px;
margin-bottom: 10px;
max-height: 500px;
}
.gradio-message {
margin-bottom: 15px;
display: flex;
flex-direction: column;
}
.gradio-message.user .gradio-message-content {
background-color: #E1FFC7;
align-self: flex-end;
border: 1px solid #c3e6cb;
border-radius: 15px 15px 0 15px;
padding: 10px;
font-size: 16px;
margin-bottom: 5px;
max-width: 80%;
}
.gradio-message.bot .gradio-message-content {
background-color: #fff;
align-self: flex-start;
border: 1px solid #ced4da;
border-radius: 15px 15px 15px 0;
padding: 10px;
font-size: 16px;
margin-bottom: 5px;
max-width: 80%;
}
.gradio-footer {
display: flex;
padding: 10px;
border-top: 1px solid #ddd;
background-color: #F8D7DA;
position: absolute;
bottom: 0;
width: calc(100% - 40px);
}
footer {
display: none !important;
background-color: #F8D7DA;
}
.gradio-chat-history .gradio-message.bot .gradio-message-content::before {
content: none;
}
'''
logo_html = '''
<div class="gradio-logo">
<img src="2.png" alt="FernAi" style="display: block; margin: 0 auto; width: 100px; height: 100px;">
</div>
'''
# Create the Blocks layout with the custom HTML and ChatInterface
with gr.Blocks(theme=gr.themes.Monochrome(), fill_height=True, css=css) as demo:
with gr.Column():
gr.HTML(logo_html)
gr.ChatInterface(predict, clear_btn=None, undo_btn=None, retry_btn=None)
# Launch the interface
demo.launch()