nawhgnuj commited on
Commit
61ced1b
1 Parent(s): 2d13423

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -79
app.py CHANGED
@@ -1,5 +1,3 @@
1
- # Starting with transformers >= 4.43.0 onward.and
2
- # you can run conversational inference using the Transformers pipeline abstraction or by leveraging the Auto classes with the generate() function.
3
  import os
4
  import time
5
  import spaces
@@ -8,32 +6,37 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
8
  import gradio as gr
9
  from threading import Thread
10
 
11
- MODEL_LIST = ["nawhgnuj/DonaldTrump-Llama3.1-8B-Chat"]
12
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
  MODEL = os.environ.get("MODEL_ID")
14
 
15
- TITLE = "<h1><center>nawhgnuj/DonaldTrump-Llama3.1-8B-Chat</center></h1>"
16
 
17
  PLACEHOLDER = """
18
- <center>
19
- <p>Hi! I'm Donald Trump!</p>
20
- <p>Save America Win Back The White House Make America Great Again I was indicted for you!</p>
21
- </center>
 
22
  """
23
 
24
  CSS = """
 
 
 
25
  .duplicate-button {
26
  margin: auto !important;
27
  color: white !important;
28
- background: black !important;
29
  border-radius: 100vh !important;
30
  }
31
  h3 {
32
  text-align: center;
 
33
  }
34
  """
35
 
36
- device = "cuda" # for GPU usage or "cpu" for CPU usage
37
 
38
  quantization_config = BitsAndBytesConfig(
39
  load_in_4bit=True,
@@ -52,15 +55,13 @@ model = AutoModelForCausalLM.from_pretrained(
52
  def stream_chat(
53
  message: str,
54
  history: list,
55
- system_prompt: str,
56
- temperature: float = 0.8,
57
- max_new_tokens: int = 1024,
58
- top_p: float = 1.0,
59
- top_k: int = 20,
60
- penalty: float = 1.2,
61
  ):
62
- print(f'message: {message}')
63
- print(f'history: {history}')
 
 
 
 
64
 
65
  conversation = [
66
  {"role": "system", "content": system_prompt}
@@ -79,11 +80,11 @@ def stream_chat(
79
 
80
  generate_kwargs = dict(
81
  input_ids=input_ids,
82
- max_new_tokens = max_new_tokens,
83
- do_sample = False if temperature == 0 else True,
84
- top_p = top_p,
85
- top_k = top_k,
86
- temperature = temperature,
87
  eos_token_id=[128001,128008,128009],
88
  streamer=streamer,
89
  )
@@ -97,71 +98,21 @@ def stream_chat(
97
  buffer += new_text
98
  yield buffer
99
 
100
-
101
- chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
102
 
103
- with gr.Blocks(css=CSS, theme="small_and_pretty") as demo:
104
  gr.HTML(TITLE)
105
  gr.ChatInterface(
106
  fn=stream_chat,
107
  chatbot=chatbot,
108
  fill_height=True,
109
- additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
110
- additional_inputs=[
111
- gr.Textbox(
112
- value="You are a Donald Trump chatbot. You only answer like Trump in his style and tone, reflecting his unique speech patterns. Incorporate the following characteristics in every response: repeat key phrases for emphasis, use strong superlatives like 'tremendous' and 'fantastic,' attack opponents where appropriate (e.g., 'fake news media,' 'radical left'), focus on personal successes ('nobody’s done more than I have'), keep sentences short and impactful, and show national pride. Maintain a direct, informal tone, often addressing the audience as 'folks' and dismiss opposing views bluntly. Highlight patriotic themes like 'making America great again.'",
113
- label="System Prompt",
114
- render=False,
115
- ),
116
- gr.Slider(
117
- minimum=0,
118
- maximum=1,
119
- step=0.1,
120
- value=0.8,
121
- label="Temperature",
122
- render=False,
123
- ),
124
- gr.Slider(
125
- minimum=128,
126
- maximum=8192,
127
- step=1,
128
- value=1024,
129
- label="Max new tokens",
130
- render=False,
131
- ),
132
- gr.Slider(
133
- minimum=0.0,
134
- maximum=1.0,
135
- step=0.1,
136
- value=1.0,
137
- label="top_p",
138
- render=False,
139
- ),
140
- gr.Slider(
141
- minimum=1,
142
- maximum=20,
143
- step=1,
144
- value=20,
145
- label="top_k",
146
- render=False,
147
- ),
148
- gr.Slider(
149
- minimum=0.0,
150
- maximum=2.0,
151
- step=0.1,
152
- value=1.2,
153
- label="Repetition penalty",
154
- render=False,
155
- ),
156
- ],
157
  examples=[
158
- ["What do you think about kamala harris?"],
159
- ["What do you think about joe biden?"],
160
- ["What do you think about immigrants?"],
161
  ],
162
  cache_examples=False,
163
  )
164
 
165
-
166
  if __name__ == "__main__":
167
- demo.launch()
 
 
 
1
  import os
2
  import time
3
  import spaces
 
6
  import gradio as gr
7
  from threading import Thread
8
 
9
+ MODEL_LIST = ["nawhgnuj/DonaldTrump-Llama-3.1-8B-Chat"]
10
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
11
  MODEL = os.environ.get("MODEL_ID")
12
 
13
+ TITLE = "<h1 style='color: #E53935; text-align: center;'>Donald Trump Chatbot</h1>"
14
 
15
  PLACEHOLDER = """
16
+ <div style='text-align: center;'>
17
+ <img src='https://upload.wikimedia.org/wikipedia/commons/5/56/Donald_Trump_official_portrait.jpg' style='width: 200px; border-radius: 50%;'>
18
+ <p style='color: #E53935; font-weight: bold;'>Hi! I'm Donald Trump!</p>
19
+ <p>Let's Make America Great Again! Ask me anything.</p>
20
+ </div>
21
  """
22
 
23
  CSS = """
24
+ .chatbot {
25
+ background-color: #FFCDD2;
26
+ }
27
  .duplicate-button {
28
  margin: auto !important;
29
  color: white !important;
30
+ background: #B71C1C !important;
31
  border-radius: 100vh !important;
32
  }
33
  h3 {
34
  text-align: center;
35
+ color: #E53935;
36
  }
37
  """
38
 
39
+ device = "cuda" if torch.cuda.is_available() else "cpu"
40
 
41
  quantization_config = BitsAndBytesConfig(
42
  load_in_4bit=True,
 
55
  def stream_chat(
56
  message: str,
57
  history: list,
 
 
 
 
 
 
58
  ):
59
+ system_prompt = "You are a Donald Trump chatbot. You only answer like Trump in style and tone."
60
+ temperature = 0.8
61
+ max_new_tokens = 1024
62
+ top_p = 1.0
63
+ top_k = 20
64
+ penalty = 1.2
65
 
66
  conversation = [
67
  {"role": "system", "content": system_prompt}
 
80
 
81
  generate_kwargs = dict(
82
  input_ids=input_ids,
83
+ max_new_tokens=max_new_tokens,
84
+ do_sample=True,
85
+ top_p=top_p,
86
+ top_k=top_k,
87
+ temperature=temperature,
88
  eos_token_id=[128001,128008,128009],
89
  streamer=streamer,
90
  )
 
98
  buffer += new_text
99
  yield buffer
100
 
101
+ chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER, elem_classes="chatbot")
 
102
 
103
+ with gr.Blocks(css=CSS, theme=gr.themes.Default()) as demo:
104
  gr.HTML(TITLE)
105
  gr.ChatInterface(
106
  fn=stream_chat,
107
  chatbot=chatbot,
108
  fill_height=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  examples=[
110
+ ["What do you think about the economy?"],
111
+ ["How would you handle foreign policy?"],
112
+ ["What's your stance on immigration?"],
113
  ],
114
  cache_examples=False,
115
  )
116
 
 
117
  if __name__ == "__main__":
118
+ demo.launch()