Illia56 commited on
Commit
e07786f
1 Parent(s): c88185a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -12,11 +12,11 @@ a Llama 2 model with 70B parameters fine-tuned for chat instructions.
12
 
13
 
14
  with st.sidebar:
15
- system_promptSide = st.text_input("Optional system prompt:")
16
  temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
17
  max_new_tokensSide = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
18
- ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
19
- RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
20
 
21
 
22
 
@@ -59,7 +59,7 @@ if prompt := st.chat_input("Ask LLama-2-70b anything..."):
59
  # Add user message to chat history
60
  st.session_state.messages.append({"role": "human", "content": prompt})
61
 
62
- response = predict(prompt, system_promptSide,temperatureSide,max_new_tokensSide,ToppSide,RepetitionpenaltySide)
63
  # Display assistant response in chat message container
64
  with st.chat_message("assistant", avatar='🦙'):
65
  st.markdown(response)
 
12
 
13
 
14
  with st.sidebar:
15
+ # system_promptSide = st.text_input("Optional system prompt:")
16
  temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
17
  max_new_tokensSide = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
18
+ # ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
19
+ # RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
20
 
21
 
22
 
 
59
  # Add user message to chat history
60
  st.session_state.messages.append({"role": "human", "content": prompt})
61
 
62
+ response = predict(message=prompt, temperature= temperatureSide,max_new_tokens=max_new_tokensSide)
63
  # Display assistant response in chat message container
64
  with st.chat_message("assistant", avatar='🦙'):
65
  st.markdown(response)