winglian commited on
Commit
35a5820
β€’
1 Parent(s): ee23a10

add app and requirements for jackalope demo

Browse files
Files changed (2) hide show
  1. app.py +124 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import gradio as gr
4
+ import openai
5
+
6
+ openai.api_base = os.environ.get("OPENAI_API_BASE")
7
+ openai.api_key = os.environ.get("OPENAI_API_KEY")
8
+
9
+ BASE_SYSTEM_MESSAGE = """I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning.
10
+ I am an assistant who thinks through their answers step-by-step to be sure I always get the right answer.
11
+ I think more clearly if I write out my thought process in a scratchpad manner first; therefore, I always explain background context, assumptions, and step-by-step thinking BEFORE trying to answer or solve anything."""
12
+
13
+
14
+ def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
15
+ completion = openai.Completion.create(model="openaccess-ai-collective/jackalope-7b", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True, stop=["</s>", "<|im_end|>"])
16
+ for chunk in completion:
17
+ yield chunk["choices"][0]["text"]
18
+
19
+
20
+ def clear_chat(chat_history_state, chat_message):
21
+ chat_history_state = []
22
+ chat_message = ''
23
+ return chat_history_state, chat_message
24
+
25
+
26
+ def user(message, history):
27
+ history = history or []
28
+ # Append the user's message to the conversation history
29
+ history.append([message, ""])
30
+ return "", history
31
+
32
+
33
+ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
34
+ history = history or []
35
+
36
+ sys_prompt = system_message.strip() or BASE_SYSTEM_MESSAGE
37
+ messages = "<|im_start|> "+"system\n" + sys_prompt + "<|im_end|>\n" + \
38
+ "\n".join(["\n".join(["<|im_start|> "+"user\n"+item[0]+"<|im_end|>", "<|im_start|> assistant\n"+item[1]+"<|im_end|>"])
39
+ for item in history])
40
+
41
+ # strip the last `<|im_end|>` from the messages
42
+ messages = messages.rstrip("<|im_end|>")
43
+ # remove last space from assistant, some models output a ZWSP if you leave a space
44
+ messages = messages.rstrip()
45
+
46
+ # If temperature is set to 0, force Top P to 1 and Top K to -1
47
+ if temperature == 0:
48
+ top_p = 1
49
+ top_k = -1
50
+
51
+ prediction = make_prediction(
52
+ messages,
53
+ max_tokens=max_tokens,
54
+ temperature=temperature,
55
+ top_p=top_p,
56
+ top_k=top_k,
57
+ repetition_penalty=repetition_penalty,
58
+ )
59
+ for tokens in prediction:
60
+ tokens = re.findall(r'(.*?)(\s|$)', tokens)
61
+ for subtoken in tokens:
62
+ subtoken = "".join(subtoken)
63
+ answer = subtoken
64
+ history[-1][1] += answer
65
+ # stream the response
66
+ yield history, history, ""
67
+
68
+
69
+ start_message = BASE_SYSTEM_MESSAGE
70
+
71
+ CSS ="""
72
+ .contain { display: flex; flex-direction: column; }
73
+ .gradio-container { height: 100vh !important; }
74
+ #component-0 { height: 100%; }
75
+ #chatbot { flex-grow: 1; overflow: auto; resize: vertical; }
76
+ """
77
+
78
+ #with gr.Blocks() as demo:
79
+ with gr.Blocks(css=CSS) as demo:
80
+ with gr.Row():
81
+ with gr.Column():
82
+ gr.Markdown(f"""
83
+ ## This preview demo is an un-quantized GPU chatbot of Jackalope 7B
84
+ Brought to you by your friends at Open Access AI Collective, Alignment Lab AI, OpenChat!
85
+ """)
86
+ with gr.Row():
87
+ gr.Markdown("# 🐰🦌 Jackalope 7B Playground Space! 🐰🦌")
88
+ with gr.Row():
89
+ #chatbot = gr.Chatbot().style(height=500)
90
+ chatbot = gr.Chatbot(elem_id="chatbot")
91
+ with gr.Row():
92
+ message = gr.Textbox(
93
+ label="What do you want to chat about?",
94
+ placeholder="Ask me anything.",
95
+ lines=3,
96
+ )
97
+ with gr.Row():
98
+ submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
99
+ clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
100
+ stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
101
+ with gr.Accordion("Show Model Parameters", open=True):
102
+ with gr.Row():
103
+ with gr.Column():
104
+ max_tokens = gr.Slider(20, 2500, label="Max Tokens", step=20, value=500)
105
+ temperature = gr.Slider(0.0, 2.0, label="Temperature", step=0.1, value=0.4)
106
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
107
+ top_k = gr.Slider(1, 100, label="Top K", step=1, value=40)
108
+ repetition_penalty = gr.Slider(1.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
109
+
110
+ system_msg = gr.Textbox(
111
+ start_message, label="System Message", interactive=True, visible=True, placeholder="System prompt. Provide instructions which you want the model to remember.", lines=5)
112
+
113
+ chat_history_state = gr.State()
114
+ clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
115
+ clear.click(lambda: None, None, chatbot, queue=False)
116
+
117
+ submit_click_event = submit.click(
118
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
119
+ ).then(
120
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot, chat_history_state, message], queue=True
121
+ )
122
+ stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event], queue=False)
123
+
124
+ demo.queue(max_size=128, concurrency_count=48).launch(debug=True, server_name="0.0.0.0", server_port=7860)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ openai
2
+ requests