KvrParaskevi commited on
Commit
0b7c3d3
1 Parent(s): 90a40c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -38
app.py CHANGED
@@ -10,41 +10,41 @@ st.write("I am your hotel booking assistant for today.")
10
  # [theme]
11
  # base="light"
12
  # primaryColor="#6b4bff"
13
- if(hugging_face_key != ""):
14
- model_Loading = True
15
- if model_Loading == True:
16
- model = demo_chat.get_Model()
17
- model_Loading = False
18
-
19
- #Application
20
- with st.container():
21
- st.markdown('<div class="scrollable-div">', unsafe_allow_html=True)
22
- #Langchain memory in session cache
23
- if 'memory' not in st.session_state:
24
- st.write("Memory is initilizing ...")
25
- st.session_state.memory = demo_chat.demo_miny_memory(model)
26
-
27
- #Check if chat history exists in this session
28
- if 'chat_history' not in st.session_state:
29
- st.session_state.chat_history = [ ] #Initialize chat history
30
-
31
- #renders chat history
32
- for message in st.session_state.chat_history:
33
- with st.chat_message(message["role"]):
34
- st.write(message["content"])
35
-
36
- #Set up input text field
37
- input_text = st.chat_input(placeholder="Here you can chat with Llamma 2 model.")
38
-
39
- if input_text:
40
- with st.chat_message("user"):
41
- st.write(input_text)
42
- st.session_state.chat_history.append({"role" : "user", "content" : input_text}) #append message to chat history
43
-
44
- chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= model)
45
- first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
46
-
47
- with st.chat_message("assistant"):
48
- st.write(first_answer)
49
- st.session_state.chat_history.append({"role": "assistant", "content": first_answer})
50
- st.markdown('</div>', unsafe_allow_html=True)
 
10
  # [theme]
11
  # base="light"
12
  # primaryColor="#6b4bff"
13
+
14
+ model_Loading = True
15
+ if model_Loading == True:
16
+ model = demo_chat.get_Model()
17
+ model_Loading = False
18
+
19
+ #Application
20
+ with st.container():
21
+ st.markdown('<div class="scrollable-div">', unsafe_allow_html=True)
22
+ #Langchain memory in session cache
23
+ if 'memory' not in st.session_state:
24
+ st.write("Memory is initilizing ...")
25
+ st.session_state.memory = demo_chat.demo_miny_memory(model)
26
+
27
+ #Check if chat history exists in this session
28
+ if 'chat_history' not in st.session_state:
29
+ st.session_state.chat_history = [ ] #Initialize chat history
30
+
31
+ #renders chat history
32
+ for message in st.session_state.chat_history:
33
+ with st.chat_message(message["role"]):
34
+ st.write(message["content"])
35
+
36
+ #Set up input text field
37
+ input_text = st.chat_input(placeholder="Here you can chat with Llamma 2 model.")
38
+
39
+ if input_text:
40
+ with st.chat_message("user"):
41
+ st.write(input_text)
42
+ st.session_state.chat_history.append({"role" : "user", "content" : input_text}) #append message to chat history
43
+
44
+ chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= model)
45
+ first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
46
+
47
+ with st.chat_message("assistant"):
48
+ st.write(first_answer)
49
+ st.session_state.chat_history.append({"role": "assistant", "content": first_answer})
50
+ st.markdown('</div>', unsafe_allow_html=True)