File size: 3,117 Bytes
c411728
189a7a7
1ee8138
189a7a7
1fea96d
 
 
 
 
6a1830a
189a7a7
 
 
 
 
 
6a1830a
 
 
857f215
6a1830a
 
 
0b7c3d3
4edb995
0b7c3d3
 
 
 
 
046f5e1
0b7c3d3
 
 
77d3d66
 
 
 
 
 
 
 
0b7c3d3
eff3674
 
 
0b7c3d3
b163ec6
 
 
 
7b48ff3
5f5cf3f
0b7c3d3
1fea96d
0b7c3d3
 
 
c5c8de3
0b7c3d3
 
c50f997
6a1830a
5f5cf3f
589e411
b163ec6
e5daacb
6a1830a
0b7c3d3
c5c8de3
0b7c3d3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import os
import streamlit as st
import chatbot as demo_chat
from transformers import AutoModelForCausalLM, AutoTokenizer
from langchain.schema import (
    HumanMessage,
    SystemMessage,
)
from langchain_community.chat_models.huggingface import ChatHuggingFace
from transformers import pipeline

st.title("Hi, I am Chatbot Philio :mermaid:")
st.write("I am your hotel booking assistant for today.")

# tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")

tokenizer, model = demo_chat.load_model()

model_identifier = "KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b"
task = "text-generation"  # Change this to your model's task

# Load the model using the pipeline
model_pipeline = pipeline(task, model=model,tokenizer=tokenizer)


#Application 
with st.container():
    st.markdown('<div class="scrollable-div">', unsafe_allow_html=True)
    #Langchain memory in session cache 
    if 'memory' not in st.session_state:
        st.session_state.memory = demo_chat.demo_miny_memory(model)

    #Check if chat history exists in this session
    if 'chat_history' not in st.session_state:
        st.session_state.chat_history = [
            {
                "role": "system",
                "content": "You are a friendly chatbot who always helps the user book a hotel room based on his/her needs."
                + "Based on the current social norms you wait for the user's response to your proposals.",
            },
            {"role": "assistant", "content": "Hello, how can I help you today?"},
        ] #Initialize chat history

    if 'model' not in st.session_state:
        st.session_state.model = model

    #renders chat history
    for message in st.session_state.chat_history:
        if(message["role"]!= "system"):
            with st.chat_message(message["role"]):
                st.write(message["content"])
            
    
    #Set up input text field
    input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")

    if input_text:
        with st.chat_message("user"):
            #st.write(input_text)
            st.session_state.chat_history.append({"role" : "user", "content" : input_text}) #append message to chat history

        #chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
        #first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
        tokenized_chat = tokenizer.apply_chat_template(st.session_state.chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
        #st.write(tokenizer.decode(tokenized_chat[0]))
        outputs = model.generate(tokenized_chat, max_new_tokens=128) 
        first_answer = tokenizer.decode(outputs[0][tokenized_chat.shape[1]:],skip_special_tokens=True)
        
        with st.chat_message("assistant"):
            #st.write(first_answer)
            st.session_state.chat_history.append({"role": "assistant", "content": first_answer})
    st.markdown('</div>', unsafe_allow_html=True)