|
import streamlit as st
|
|
from huggingface_hub import InferenceClient
|
|
import os
|
|
import sys
|
|
|
|
st.title("Hey I'm Functional Bot")
|
|
print("yes")
|
|
base_url="https://api-inference.huggingface.co/models/"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_links ={
|
|
"Mistral-7B":base_url+"mistralai/Mistral-7B-Instruct-v0.2",
|
|
"Mistral-22B":base_url+"mistral-community/Mixtral-8x22B-v0.1",
|
|
"Phi-3":base_url+"microsoft/Phi-3-mini-4k-instruct"
|
|
}
|
|
|
|
|
|
model_info ={
|
|
"Mistral-7B":
|
|
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
|
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
|
|
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
|
|
|
|
|
"Mistral-22B":
|
|
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
|
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-22b/) team as has over **22 billion parameters.** \n""",
|
|
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
|
|
|
|
|
"Phi-3":
|
|
{'description':"""The PHI 3 model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
|
\nIt was created by the [**Microsoft Team**](https://news.microsoft.com/source/features/ai/the-phi-3-small-language-models-with-big-potential/) team as has over **< 13 billion parameters.** \n""",
|
|
'logo':'https://www.techfinitive.com/wp-content/uploads/2023/07/microsoft-365-copilot-jpg.webp'},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
def format_promt(message, custom_instructions=None):
|
|
prompt = ""
|
|
if custom_instructions:
|
|
prompt += f"[INST] {custom_instructions} [/INST]"
|
|
prompt += f"[INST] {message} [/INST]"
|
|
return prompt
|
|
|
|
def reset_conversation():
|
|
'''
|
|
Resets Conversation
|
|
'''
|
|
st.session_state.conversation = []
|
|
st.session_state.messages = []
|
|
return None
|
|
|
|
models =[key for key in model_links.keys()]
|
|
|
|
|
|
selected_model = st.sidebar.selectbox("Select Model", models)
|
|
|
|
|
|
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
|
|
|
|
|
st.sidebar.button('Reset Chat', on_click=reset_conversation)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if "prev_option" not in st.session_state:
|
|
st.session_state.prev_option = selected_model
|
|
|
|
if st.session_state.prev_option != selected_model:
|
|
st.session_state.messages = []
|
|
|
|
st.session_state.prev_option = selected_model
|
|
reset_conversation()
|
|
|
|
|
|
repo_id = model_links[selected_model]
|
|
|
|
st.subheader(f'{selected_model}')
|
|
|
|
|
|
|
|
if "messages" not in st.session_state:
|
|
st.session_state.messages = []
|
|
|
|
|
|
for message in st.session_state.messages:
|
|
with st.chat_message(message["role"]):
|
|
st.markdown(message["content"])
|
|
|
|
|
|
|
|
if prompt := st.chat_input(f"Hi I'm {selected_model}🗞️, How can I help you today?"):
|
|
|
|
custom_instruction = "Act like a Human in conversation"
|
|
|
|
|
|
with st.chat_message("user"):
|
|
st.markdown(prompt)
|
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
|
formated_text = format_promt(prompt, custom_instruction)
|
|
|
|
|
|
with st.chat_message("assistant"):
|
|
client = InferenceClient(
|
|
model=model_links[selected_model],)
|
|
|
|
|
|
output = client.text_generation(
|
|
formated_text,
|
|
temperature=temp_values,
|
|
max_new_tokens=3000,
|
|
stream=True
|
|
)
|
|
|
|
response = st.write_stream(output)
|
|
st.session_state.messages.append({"role": "assistant", "content": response}) |