File size: 9,762 Bytes
9901656
 
 
 
b45f299
8a6cc8e
9901656
 
3749bbd
9901656
 
 
 
 
3749bbd
b45f299
 
9901656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b45f299
 
9901656
 
 
 
4b18d24
 
9901656
 
 
 
 
 
 
 
 
 
4b18d24
9901656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1ccecb7
b45f299
 
 
9901656
 
 
4b18d24
9901656
4b18d24
9901656
 
 
4b18d24
9901656
4b18d24
9901656
 
 
 
4b18d24
9901656
 
 
 
 
 
 
 
 
 
 
4b18d24
9901656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1a5a0f
4b18d24
9901656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b18d24
9901656
 
 
 
 
4b18d24
9901656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b18d24
9901656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6cbc32
4b18d24
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
import os
import requests
import json
import time

import gradio as gr
from transformers import AutoTokenizer


import socket
hostname=socket.gethostname()
IPAddr=socket.gethostbyname(hostname)
print("Your Computer Name is:" + hostname)
print("Your Computer IP Address is:" + IPAddr)


DESCRIPTION = """
# MediaTek Research Breeze-7B

MediaTek Research Breeze-7B (hereinafter referred to as Breeze-7B) is a language model family that builds on top of [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.1), specifically intended for Traditional Chinese use.

[Breeze-7B-Base](https://huggingface.co/MediaTek-Research/Breeze-7B-Base-v1_0) is the base model for the Breeze-7B series. 
It is suitable for use if you have substantial fine-tuning data to tune it for your specific use case.

[Breeze-7B-Instruct](https://huggingface.co/MediaTek-Research/Breeze-7B-Instruct-v1_0) derives from the base model Breeze-7B-Base, making the resulting model amenable to be used as-is for commonly seen tasks.


The current release version of Breeze-7B is v1.0.

*A project by the members (in alphabetical order): Chan-Jan Hsu 許湛然, Chang-Le Liu 劉昶樂, Feng-Ting Liao 廖峰挺, Po-Chun Hsu 許博竣, Yi-Chang Chen 陳宜昌, and the supervisor Da-Shan Shiu 許大山.*

**免責聲明: MediaTek Research Breeze-7B 並未針對問答進行安全保護,因此語言模型的任何回應不代表 MediaTek Research 立場。**
"""

LICENSE = """
"""

DEFAULT_SYSTEM_PROMPT = "You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan."

API_URL = os.environ.get("API_URL")
TOKEN = os.environ.get("TOKEN")
TOKENIZER_REPO = "MediaTek-Research/Breeze-7B-Instruct-v1_0"
API_MODEL_TYPE = "breeze-7b-instruct-v10"

HEADERS = {
    "Authorization": f"Bearer {TOKEN}", 
    "Content-Type": "application/json",
    "accept": "application/json"
}

MAX_SEC = 30
MAX_INPUT_LENGTH = 5000

tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_REPO, use_auth_token=os.environ.get("HF_TOKEN"))


def refusal_condition(query):
    # 不要再問這些問題啦!

    query_remove_space = query.replace(' ', '').lower()
    is_including_tw = False
    for x in ['台灣', '台湾', 'taiwan', 'tw', '中華民國', '中华民国']:
        if x in query_remove_space:
            is_including_tw = True
    is_including_cn = False
    for x in ['中國', '中国', 'cn', 'china', '大陸', '內地', '大陆', '内地', '中華人民共和國', '中华人民共和国']:
        if x in query_remove_space:
            is_including_cn = True
    if is_including_tw and is_including_cn:
        return True

    for x in ['一個中國', '兩岸', '一中原則', '一中政策', '一个中国', '两岸', '一中原则']:
        if x in query_remove_space:
            return True

    return False

with gr.Blocks() as demo:
    gr.Markdown(DESCRIPTION)

    system_prompt = gr.Textbox(label='System prompt',
                               value=DEFAULT_SYSTEM_PROMPT,
                               lines=1)
    
    with gr.Accordion(label='Advanced options', open=False):

        max_new_tokens = gr.Slider(
            label='Max new tokens',
            minimum=32,
            maximum=2048,
            step=1,
            value=1024,
        )
        temperature = gr.Slider(
            label='Temperature',
            minimum=0.01,
            maximum=0.5,
            step=0.01,
            value=0.01,
        )
        top_p = gr.Slider(
            label='Top-p (nucleus sampling)',
            minimum=0.01,
            maximum=0.99,
            step=0.01,
            value=0.01,
        )

    chatbot = gr.Chatbot(show_copy_button=True, show_share_button=True, )
    with gr.Row():
        msg = gr.Textbox(
            container=False,
            show_label=False,
            placeholder='Type a message...',
            scale=10,
            lines=6
        )
        submit_button = gr.Button('Submit',
                                  variant='primary',
                                  scale=1,
                                  min_width=0)

    with gr.Row():
        retry_button = gr.Button('🔄  Retry', variant='secondary')
        undo_button = gr.Button('↩️ Undo', variant='secondary')
        clear = gr.Button('🗑️  Clear', variant='secondary')

    saved_input = gr.State()

    def user(user_message, history):
        return "", history + [[user_message, None]]


    def connect_server(data):
        for _ in range(3):
            s = requests.Session()
            r = s.post(API_URL, headers=HEADERS, json=data, stream=True, timeout=30)
            time.sleep(1)
            if r.status_code == 200:
                return r
        return None


    def stream_response_from_server(r):
        # start_time = time.time()
        keep_streaming = True
        for line in r.iter_lines():
            # if time.time() - start_time > MAX_SEC:
            #     keep_streaming = False
            #     break
            
            if line and keep_streaming:
                if r.status_code != 200:
                    continue
                json_response = json.loads(line)

                if "fragment" not in json_response["result"]:
                    keep_streaming = False
                    break

                delta = json_response["result"]["fragment"]["data"]["text"]
                yield delta

            # start_time = time.time()

    
    def bot(history, max_new_tokens, temperature, top_p, system_prompt):
        chat_data = []
        system_prompt = system_prompt.strip()
        if system_prompt:
            chat_data.append({"role": "system", "content": system_prompt})
        for user_msg, assistant_msg in history:
            chat_data.append({"role": "user", "content": user_msg if user_msg is not None else ''})
            chat_data.append({"role": "assistant", "content": assistant_msg if assistant_msg is not None else ''})

        message = tokenizer.apply_chat_template(chat_data, tokenize=False)
        message = message[3:]  # remove SOT token

        if len(message) > MAX_INPUT_LENGTH:
            raise Exception()

        response = '[ERROR]'
        if refusal_condition(history[-1][0]):
            history = [['[安全拒答啟動]', '[安全拒答啟動] 請清除再開啟對話']]
            response = '[REFUSAL]'
            yield history
        else:
            data = {
                "model_type": API_MODEL_TYPE,
                "prompt": str(message),
                "parameters": {
                    "temperature": float(temperature),
                    "top_p": float(top_p),
                    "max_new_tokens": int(max_new_tokens),
                    "repetition_penalty": 1.1
                }
            }
            
            r = connect_server(data)
            if r is not None:
                for delta in stream_response_from_server(r):
                    if history[-1][1] is None:
                        history[-1][1] = ''
                    history[-1][1] += delta
                    yield history

                if history[-1][1].endswith('</s>'):
                    history[-1][1] = history[-1][1][:-4]
                    yield history
    
                response = history[-1][1]
    
                if refusal_condition(history[-1][1]):
                    history[-1][1] = history[-1][1] + '\n\n**[免責聲明: 此模型並未針對問答進行安全保護,因此語言模型的任何回應不代表 MediaTek Research 立場。]**'
                    yield history
            else:
                del history[-1]
                yield history

        print('== Record ==\nQuery: {query}\nResponse: {response}'.format(query=repr(message), response=repr(history[-1][1])))
        
    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
        fn=bot,
        inputs=[
            chatbot,
            max_new_tokens,
            temperature,
            top_p,
            system_prompt,
        ],
        outputs=chatbot
    )
    submit_button.click(
        user, [msg, chatbot], [msg, chatbot], queue=False
    ).then(
        fn=bot,
        inputs=[
            chatbot,
            max_new_tokens,
            temperature,
            top_p,
            system_prompt,
        ],
        outputs=chatbot
    )


    def delete_prev_fn(
            history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
        try:
            message, _ = history.pop()
        except IndexError:
            message = ''
        return history, message or ''


    def display_input(message: str,
                      history: list[tuple[str, str]]) -> list[tuple[str, str]]:
        history.append((message, ''))
        return history

    retry_button.click(
        fn=delete_prev_fn,
        inputs=chatbot,
        outputs=[chatbot, saved_input],
        api_name=False,
        queue=False,
    ).then(
        fn=display_input,
        inputs=[saved_input, chatbot],
        outputs=chatbot,
        api_name=False,
        queue=False,
    ).then(
        fn=bot,
        inputs=[
            chatbot,
            max_new_tokens,
            temperature,
            top_p,
            system_prompt,
        ],
        outputs=chatbot,
    )

    undo_button.click(
        fn=delete_prev_fn,
        inputs=chatbot,
        outputs=[chatbot, saved_input],
        api_name=False,
        queue=False,
    ).then(
        fn=lambda x: x,
        inputs=[saved_input],
        outputs=msg,
        api_name=False,
        queue=False,
    )

    clear.click(lambda: None, None, chatbot, queue=False)

    gr.Markdown(LICENSE)

demo.queue(concurrency_count=4, max_size=128)
demo.launch()