import streamlit as st import chatbot_bedrock as demo_chat from transformers import AutoModelForCausalLM, AutoTokenizer st.title("Hi, I am Chatbot Philio :mermaid:") st.write("I am your hotel booking assistant for today.") # tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b") # [theme] # base="light" # primaryColor="#6b4bff" tokenizer, model = demo_chat.load_model() #Application with st.container(): st.markdown('
', unsafe_allow_html=True) #Langchain memory in session cache if 'memory' not in st.session_state: st.session_state.memory = demo_chat.demo_miny_memory(model) #Check if chat history exists in this session if 'chat_history' not in st.session_state: st.session_state.chat_history = [ ] #Initialize chat history if 'model' not in st.session_state: st.write("Model added in state.") st.session_state.model = model #renders chat history for message in st.session_state.chat_history: with st.chat_message(message["role"]): st.write(message["content"]) #Set up input text field input_text = st.chat_input(placeholder="Here you can chat with Llamma 2 model.") if input_text: with st.chat_message("user"): st.write(input_text) st.session_state.chat_history.append({"role" : "user", "content" : input_text}) #append message to chat history chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= model) first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only. with st.chat_message("assistant"): st.write(first_answer) st.session_state.chat_history.append({"role": "assistant", "content": first_answer}) st.markdown('
', unsafe_allow_html=True)