KvrParaskevi's picture
Update app.py
c5c8de3 verified
raw
history blame
No virus
3.12 kB
import os
import streamlit as st
import chatbot as demo_chat
from transformers import AutoModelForCausalLM, AutoTokenizer
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.huggingface import ChatHuggingFace
from transformers import pipeline
st.title("Hi, I am Chatbot Philio :mermaid:")
st.write("I am your hotel booking assistant for today.")
# tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
tokenizer, model = demo_chat.load_model()
model_identifier = "KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b"
task = "text-generation" # Change this to your model's task
# Load the model using the pipeline
model_pipeline = pipeline(task, model=model,tokenizer=tokenizer)
#Application
with st.container():
st.markdown('<div class="scrollable-div">', unsafe_allow_html=True)
#Langchain memory in session cache
if 'memory' not in st.session_state:
st.session_state.memory = demo_chat.demo_miny_memory(model)
#Check if chat history exists in this session
if 'chat_history' not in st.session_state:
st.session_state.chat_history = [
{
"role": "system",
"content": "You are a friendly chatbot who always helps the user book a hotel room based on his/her needs."
+ "Based on the current social norms you wait for the user's response to your proposals.",
},
{"role": "assistant", "content": "Hello, how can I help you today?"},
] #Initialize chat history
if 'model' not in st.session_state:
st.session_state.model = model
#renders chat history
for message in st.session_state.chat_history:
if(message["role"]!= "system"):
with st.chat_message(message["role"]):
st.write(message["content"])
#Set up input text field
input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")
if input_text:
with st.chat_message("user"):
#st.write(input_text)
st.session_state.chat_history.append({"role" : "user", "content" : input_text}) #append message to chat history
#chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
#first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
tokenized_chat = tokenizer.apply_chat_template(st.session_state.chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
#st.write(tokenizer.decode(tokenized_chat[0]))
outputs = model.generate(tokenized_chat, max_new_tokens=128)
first_answer = tokenizer.decode(outputs[0][tokenized_chat.shape[1]:],skip_special_tokens=True)
with st.chat_message("assistant"):
#st.write(first_answer)
st.session_state.chat_history.append({"role": "assistant", "content": first_answer})
st.markdown('</div>', unsafe_allow_html=True)