Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
st.title("Hey I'm Functional Bot")
|
7 |
+
print("yes")
|
8 |
+
base_url="https://api-inference.huggingface.co/models/"
|
9 |
+
|
10 |
+
|
11 |
+
# print(API_KEY)
|
12 |
+
# headers = {"Authorization":"Bearer "+API_KEY}
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
model_links ={
|
30 |
+
"Mistral-7B":base_url+"mistralai/Mistral-7B-Instruct-v0.2",
|
31 |
+
"Mistral-22B":base_url+"mistral-community/Mixtral-8x22B-v0.1",
|
32 |
+
"Phi-3":base_url+"microsoft/Phi-3-mini-4k-instruct"
|
33 |
+
}
|
34 |
+
|
35 |
+
#Pull info about the model to display
|
36 |
+
model_info ={
|
37 |
+
"Mistral-7B":
|
38 |
+
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
39 |
+
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
|
40 |
+
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
41 |
+
|
42 |
+
|
43 |
+
"Mistral-22B":
|
44 |
+
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
45 |
+
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-22b/) team as has over **22 billion parameters.** \n""",
|
46 |
+
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
47 |
+
|
48 |
+
|
49 |
+
"Phi-3":
|
50 |
+
{'description':"""The PHI 3 model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
51 |
+
\nIt was created by the [**Microsoft Team**](https://news.microsoft.com/source/features/ai/the-phi-3-small-language-models-with-big-potential/) team as has over **< 13 billion parameters.** \n""",
|
52 |
+
'logo':'https://www.techfinitive.com/wp-content/uploads/2023/07/microsoft-365-copilot-jpg.webp'},
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
# "Zephyr-7B-β":
|
57 |
+
# {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
58 |
+
# \nFrom Huggingface: \n\
|
59 |
+
# Zephyr is a series of language models that are trained to act as helpful assistants. \
|
60 |
+
# [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
|
61 |
+
# is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
|
62 |
+
# that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
|
63 |
+
# 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
|
64 |
+
|
65 |
+
}
|
66 |
+
|
67 |
+
def format_promt(message, custom_instructions=None):
|
68 |
+
prompt = ""
|
69 |
+
if custom_instructions:
|
70 |
+
prompt += f"[INST] {custom_instructions} [/INST]"
|
71 |
+
prompt += f"[INST] {message} [/INST]"
|
72 |
+
return prompt
|
73 |
+
|
74 |
+
def reset_conversation():
|
75 |
+
'''
|
76 |
+
Resets Conversation
|
77 |
+
'''
|
78 |
+
st.session_state.conversation = []
|
79 |
+
st.session_state.messages = []
|
80 |
+
return None
|
81 |
+
|
82 |
+
models =[key for key in model_links.keys()]
|
83 |
+
|
84 |
+
# Create the sidebar with the dropdown for model selection
|
85 |
+
selected_model = st.sidebar.selectbox("Select Model", models)
|
86 |
+
|
87 |
+
#Create a temperature slider
|
88 |
+
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
89 |
+
|
90 |
+
#Add reset button to clear conversation
|
91 |
+
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
|
92 |
+
|
93 |
+
# Create model description
|
94 |
+
#st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
95 |
+
#st.sidebar.markdown(model_info[selected_model]['description'])
|
96 |
+
#st.sidebar.image(model_info[selected_model]['logo'])
|
97 |
+
#st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
98 |
+
#st.sidebar.markdown("\nAbout the Developer of StrangerX AI[here](https://github.com/PRITHIVSAKTHIUR/StrangerX).")
|
99 |
+
|
100 |
+
if "prev_option" not in st.session_state:
|
101 |
+
st.session_state.prev_option = selected_model
|
102 |
+
|
103 |
+
if st.session_state.prev_option != selected_model:
|
104 |
+
st.session_state.messages = []
|
105 |
+
# st.write(f"Changed to {selected_model}")
|
106 |
+
st.session_state.prev_option = selected_model
|
107 |
+
reset_conversation()
|
108 |
+
|
109 |
+
#Pull in the model we want to use
|
110 |
+
repo_id = model_links[selected_model]
|
111 |
+
|
112 |
+
st.subheader(f'{selected_model}')
|
113 |
+
# st.title(f'ChatBot Using {selected_model}')
|
114 |
+
|
115 |
+
# Initialize chat history
|
116 |
+
if "messages" not in st.session_state:
|
117 |
+
st.session_state.messages = []
|
118 |
+
|
119 |
+
# Display chat messages from history on app rerun
|
120 |
+
for message in st.session_state.messages:
|
121 |
+
with st.chat_message(message["role"]):
|
122 |
+
st.markdown(message["content"])
|
123 |
+
|
124 |
+
|
125 |
+
# Accept user input
|
126 |
+
if prompt := st.chat_input(f"Hi I'm {selected_model}🗞️, How can I help you today?"):
|
127 |
+
|
128 |
+
custom_instruction = "Act like a Human in conversation"
|
129 |
+
|
130 |
+
# Display user message in chat message container
|
131 |
+
with st.chat_message("user"):
|
132 |
+
st.markdown(prompt)
|
133 |
+
# Add user message to chat history
|
134 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
135 |
+
|
136 |
+
formated_text = format_promt(prompt, custom_instruction)
|
137 |
+
|
138 |
+
# Display assistant response in chat message container
|
139 |
+
with st.chat_message("assistant"):
|
140 |
+
client = InferenceClient(
|
141 |
+
model=model_links[selected_model],)
|
142 |
+
# headers=headers)
|
143 |
+
|
144 |
+
output = client.text_generation(
|
145 |
+
formated_text,
|
146 |
+
temperature=temp_values,#0.5
|
147 |
+
max_new_tokens=3000,
|
148 |
+
stream=True
|
149 |
+
)
|
150 |
+
|
151 |
+
response = st.write_stream(output)
|
152 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|