arad1367 commited on
Commit
d18942e
β€’
1 Parent(s): 6eff03a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +167 -168
app.py CHANGED
@@ -1,169 +1,168 @@
1
- # Starting with transformers >= 4.43.0 onward.and
2
- # you can run conversational inference using the Transformers pipeline abstraction or by leveraging the Auto classes with the generate() function.
3
- import os
4
- import time
5
- import spaces
6
- import torch
7
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
8
- import gradio as gr
9
- from threading import Thread
10
-
11
- MODEL_LIST = ["meta-llama/Meta-Llama-3.1-8B-Instruct"]
12
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
- MODEL = os.environ.get("MODEL_ID")
14
-
15
- TITLE = "<h1><center>Meta-Llama3.1-8B Chatbot</center></h1>"
16
-
17
- PLACEHOLDER = """
18
- <center>
19
- <p>Hi! I'm your assistant. Feel free to ask your questions</p>
20
- </center>
21
- """
22
-
23
-
24
- CSS = """
25
- .duplicate-button {
26
- margin: auto !important;
27
- color: white !important;
28
- background: black !important;
29
- border-radius: 100vh !important;
30
- }
31
- h3 {
32
- text-align: center;
33
- }
34
- """
35
-
36
- device = "cuda" # for GPU usage or "cpu" for CPU usage
37
-
38
- quantization_config = BitsAndBytesConfig(
39
- load_in_4bit=True,
40
- bnb_4bit_compute_dtype=torch.bfloat16,
41
- bnb_4bit_use_double_quant=True,
42
- bnb_4bit_quant_type= "nf4")
43
-
44
- tokenizer = AutoTokenizer.from_pretrained(MODEL)
45
- model = AutoModelForCausalLM.from_pretrained(
46
- MODEL,
47
- torch_dtype=torch.bfloat16,
48
- device_map="auto",
49
- quantization_config=quantization_config)
50
-
51
- @spaces.GPU()
52
- def stream_chat(
53
- message: str,
54
- history: list,
55
- system_prompt: str,
56
- temperature: float = 0.8,
57
- max_new_tokens: int = 1024,
58
- top_p: float = 1.0,
59
- top_k: int = 20,
60
- penalty: float = 1.2,
61
- ):
62
- print(f'message: {message}')
63
- print(f'history: {history}')
64
-
65
- conversation = [
66
- {"role": "system", "content": system_prompt}
67
- ]
68
- for prompt, answer in history:
69
- conversation.extend([
70
- {"role": "user", "content": prompt},
71
- {"role": "assistant", "content": answer},
72
- ])
73
-
74
- conversation.append({"role": "user", "content": message})
75
-
76
- input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
77
-
78
- streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
79
-
80
- generate_kwargs = dict(
81
- input_ids=input_ids,
82
- max_new_tokens = max_new_tokens,
83
- do_sample = False if temperature == 0 else True,
84
- top_p = top_p,
85
- top_k = top_k,
86
- temperature = temperature,
87
- eos_token_id=[128001,128008,128009],
88
- streamer=streamer,
89
- )
90
-
91
- with torch.no_grad():
92
- thread = Thread(target=model.generate, kwargs=generate_kwargs)
93
- thread.start()
94
-
95
- buffer = ""
96
- for new_text in streamer:
97
- buffer += new_text
98
- yield buffer
99
-
100
-
101
- chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
102
-
103
- with gr.Blocks(css=CSS, theme="small_and_pretty") as demo:
104
- gr.HTML(TITLE)
105
- gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
106
- gr.ChatInterface(
107
- fn=stream_chat,
108
- chatbot=chatbot,
109
- fill_height=True,
110
- additional_inputs_accordion=gr.Accordion(label="βš™οΈ Parameters", open=False, render=False),
111
- additional_inputs=[
112
- gr.Textbox(
113
- value="You are a helpful assistant",
114
- label="System Prompt",
115
- render=False,
116
- ),
117
- gr.Slider(
118
- minimum=0,
119
- maximum=1,
120
- step=0.1,
121
- value=0.8,
122
- label="Temperature",
123
- render=False,
124
- ),
125
- gr.Slider(
126
- minimum=128,
127
- maximum=8192,
128
- step=1,
129
- value=1024,
130
- label="Max new tokens",
131
- render=False,
132
- ),
133
- gr.Slider(
134
- minimum=0.0,
135
- maximum=1.0,
136
- step=0.1,
137
- value=1.0,
138
- label="top_p",
139
- render=False,
140
- ),
141
- gr.Slider(
142
- minimum=1,
143
- maximum=20,
144
- step=1,
145
- value=20,
146
- label="top_k",
147
- render=False,
148
- ),
149
- gr.Slider(
150
- minimum=0.0,
151
- maximum=2.0,
152
- step=0.1,
153
- value=1.2,
154
- label="Repetition penalty",
155
- render=False,
156
- ),
157
- ],
158
- examples=[
159
- ["How to make a self-driving car?"],
160
- ["Give me creative idea to establish a startup"],
161
- ["How can I improve my programming skills?"],
162
- ["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
163
- ],
164
- cache_examples=False,
165
- )
166
-
167
-
168
- if __name__ == "__main__":
169
  demo.launch()
 
1
+ # Starting with transformers >= 4.43.0 onward.and
2
+ # you can run conversational inference using the Transformers pipeline abstraction or by leveraging the Auto classes with the generate() function.
3
+ import os
4
+ import time
5
+ import spaces
6
+ import torch
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
8
+ import gradio as gr
9
+ from threading import Thread
10
+
11
+ MODEL_LIST = ["meta-llama/Meta-Llama-3.1-8B-Instruct"]
12
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
+ MODEL = os.environ.get("MODEL_ID")
14
+
15
+ TITLE = "<h1><center>Meta-Llama3.1-8B Chatbot</center></h1>"
16
+
17
+ PLACEHOLDER = """
18
+ <center>
19
+ <p>Hi! I'm your assistant. Feel free to ask your questions</p>
20
+ </center>
21
+ """
22
+
23
+
24
+ CSS = """
25
+ .duplicate-button {
26
+ margin: auto !important;
27
+ color: white !important;
28
+ background: black !important;
29
+ border-radius: 100vh !important;
30
+ }
31
+ h3 {
32
+ text-align: center;
33
+ }
34
+ """
35
+
36
+ device = "cuda" # for GPU usage or "cpu" for CPU usage
37
+
38
+ quantization_config = BitsAndBytesConfig(
39
+ load_in_4bit=True,
40
+ bnb_4bit_compute_dtype=torch.bfloat16,
41
+ bnb_4bit_use_double_quant=True,
42
+ bnb_4bit_quant_type= "nf4")
43
+
44
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
45
+ model = AutoModelForCausalLM.from_pretrained(
46
+ MODEL,
47
+ torch_dtype=torch.bfloat16,
48
+ device_map="auto",
49
+ quantization_config=quantization_config)
50
+
51
+ @spaces.GPU()
52
+ def stream_chat(
53
+ message: str,
54
+ history: list,
55
+ system_prompt: str,
56
+ temperature: float = 0.8,
57
+ max_new_tokens: int = 1024,
58
+ top_p: float = 1.0,
59
+ top_k: int = 20,
60
+ penalty: float = 1.2,
61
+ ):
62
+ print(f'message: {message}')
63
+ print(f'history: {history}')
64
+
65
+ conversation = [
66
+ {"role": "system", "content": system_prompt}
67
+ ]
68
+ for prompt, answer in history:
69
+ conversation.extend([
70
+ {"role": "user", "content": prompt},
71
+ {"role": "assistant", "content": answer},
72
+ ])
73
+
74
+ conversation.append({"role": "user", "content": message})
75
+
76
+ input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
77
+
78
+ streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
79
+
80
+ generate_kwargs = dict(
81
+ input_ids=input_ids,
82
+ max_new_tokens = max_new_tokens,
83
+ do_sample = False if temperature == 0 else True,
84
+ top_p = top_p,
85
+ top_k = top_k,
86
+ temperature = temperature,
87
+ eos_token_id=[128001,128008,128009],
88
+ streamer=streamer,
89
+ )
90
+
91
+ with torch.no_grad():
92
+ thread = Thread(target=model.generate, kwargs=generate_kwargs)
93
+ thread.start()
94
+
95
+ buffer = ""
96
+ for new_text in streamer:
97
+ buffer += new_text
98
+ yield buffer
99
+
100
+
101
+ chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
102
+
103
+ with gr.Blocks(css=CSS, theme="small_and_pretty") as demo:
104
+ gr.HTML(TITLE)
105
+ gr.ChatInterface(
106
+ fn=stream_chat,
107
+ chatbot=chatbot,
108
+ fill_height=True,
109
+ additional_inputs_accordion=gr.Accordion(label="βš™οΈ Parameters", open=False, render=False),
110
+ additional_inputs=[
111
+ gr.Textbox(
112
+ value="You are a helpful assistant",
113
+ label="System Prompt",
114
+ render=False,
115
+ ),
116
+ gr.Slider(
117
+ minimum=0,
118
+ maximum=1,
119
+ step=0.1,
120
+ value=0.8,
121
+ label="Temperature",
122
+ render=False,
123
+ ),
124
+ gr.Slider(
125
+ minimum=128,
126
+ maximum=8192,
127
+ step=1,
128
+ value=1024,
129
+ label="Max new tokens",
130
+ render=False,
131
+ ),
132
+ gr.Slider(
133
+ minimum=0.0,
134
+ maximum=1.0,
135
+ step=0.1,
136
+ value=1.0,
137
+ label="top_p",
138
+ render=False,
139
+ ),
140
+ gr.Slider(
141
+ minimum=1,
142
+ maximum=20,
143
+ step=1,
144
+ value=20,
145
+ label="top_k",
146
+ render=False,
147
+ ),
148
+ gr.Slider(
149
+ minimum=0.0,
150
+ maximum=2.0,
151
+ step=0.1,
152
+ value=1.2,
153
+ label="Repetition penalty",
154
+ render=False,
155
+ ),
156
+ ],
157
+ examples=[
158
+ ["How to make a self-driving car?"],
159
+ ["Give me creative idea to establish a startup"],
160
+ ["How can I improve my programming skills?"],
161
+ ["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
162
+ ],
163
+ cache_examples=False,
164
+ )
165
+
166
+
167
+ if __name__ == "__main__":
 
168
  demo.launch()