KvrParaskevi commited on
Commit
353b462
1 Parent(s): bd1a52b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -12
app.py CHANGED
@@ -42,7 +42,7 @@ with st.container():
42
  {
43
  "role": "system",
44
  "content": "You are a friendly chatbot who always helps the user book a hotel room based on his/her needs."+
45
- "Before you make a booking you should ask for personal information: first and last name, email and phone number to confirm booking."
46
  + "Based on the current social norms you wait for the user's response to your proposals.",
47
  },
48
  {"role": "assistant", "content": "Hello, how can I help you today?"},
@@ -57,18 +57,20 @@ with st.container():
57
  #input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")
58
 
59
  if input_text := st.chat_input(placeholder="Here you can chat with our hotel booking model."):
 
60
  with st.chat_message("user"):
61
  st.markdown(input_text)
62
  st.session_state.chat_history.append({"role" : "user", "content" : input_text}) #append message to chat history
63
-
64
- #chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
65
- #first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
66
- tokenized_chat = tokenizer.apply_chat_template(st.session_state.chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
67
- #st.write(tokenizer.decode(tokenized_chat[0]))
68
- outputs = model.generate(tokenized_chat, max_new_tokens=128)
69
- first_answer = tokenizer.decode(outputs[0][tokenized_chat.shape[1]:],skip_special_tokens=True)
70
-
71
- with st.chat_message("assistant"):
72
- st.markdown(first_answer)
73
- st.session_state.chat_history.append({"role": "assistant", "content": first_answer})
 
74
  st.markdown('</div>', unsafe_allow_html=True)
 
42
  {
43
  "role": "system",
44
  "content": "You are a friendly chatbot who always helps the user book a hotel room based on his/her needs."+
45
+ "Before you confirm a booking you should ask for user's personal information: first and last name, email and phone number to confirm booking."
46
  + "Based on the current social norms you wait for the user's response to your proposals.",
47
  },
48
  {"role": "assistant", "content": "Hello, how can I help you today?"},
 
57
  #input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")
58
 
59
  if input_text := st.chat_input(placeholder="Here you can chat with our hotel booking model."):
60
+
61
  with st.chat_message("user"):
62
  st.markdown(input_text)
63
  st.session_state.chat_history.append({"role" : "user", "content" : input_text}) #append message to chat history
64
+
65
+ with st.spinner("Generating response..."):
66
+ #chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
67
+ #first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
68
+ tokenized_chat = tokenizer.apply_chat_template(st.session_state.chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
69
+ #st.write(tokenizer.decode(tokenized_chat[0]))
70
+ outputs = model.generate(tokenized_chat, max_new_tokens=128)
71
+ first_answer = tokenizer.decode(outputs[0][tokenized_chat.shape[1]:],skip_special_tokens=True)
72
+
73
+ with st.chat_message("assistant"):
74
+ st.markdown(first_answer)
75
+ st.session_state.chat_history.append({"role": "assistant", "content": first_answer})
76
  st.markdown('</div>', unsafe_allow_html=True)