peterpeter8585 commited on
Commit
25f5786
1 Parent(s): fa64ff4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -4
app.py CHANGED
@@ -9,6 +9,37 @@ transformers.utils.move_cache()
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  import os
11
  password1=os.environ["password"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  if torch.cuda.is_available():
13
  torch.cuda.max_memory_allocated(device=device)
14
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
@@ -328,10 +359,25 @@ with gr.Blocks(css=css) as demo2:
328
  """
329
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
330
  """
331
- aa = gr.ChatInterface(
332
- respond1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  additional_inputs=[
334
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
335
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
336
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
337
  gr.Slider(
@@ -376,6 +422,6 @@ ab= gr.ChatInterface(
376
  )
377
  if __name__ == "__main__":
378
  with gr.Blocks() as ai:
379
- gr.TabbedInterface([aa, ab, ac, demo2], ["gpt4(Password needed)", "gpt4(only for programming)", "gpt4(only for medical questions)","image create"])
380
  ai.queue(max_size=300)
381
  ai.launch()
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  import os
11
  password1=os.environ["password"]
12
+ def respond4(
13
+ message,
14
+ history: list[tuple[str, str]],
15
+ system_message,
16
+ max_tokens,
17
+ temperature,
18
+ top_p,
19
+ ):
20
+ messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
21
+
22
+ for val in history:
23
+ if val[0]:
24
+ messages.append({"role": "user", "content": val[0]})
25
+ if val[1]:
26
+ messages.append({"role": "assistant", "content": val[1]})
27
+
28
+ messages.append({"role": "user", "content": message})
29
+
30
+ response = ""
31
+
32
+ for message in client.chat_completion(
33
+ messages,
34
+ max_tokens=max_tokens,
35
+ stream=True,
36
+ temperature=temperature,
37
+ top_p=top_p,
38
+ ):
39
+ token = message.choices[0].delta.content
40
+
41
+ response += token
42
+ yield response
43
  if torch.cuda.is_available():
44
  torch.cuda.max_memory_allocated(device=device)
45
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
 
359
  """
360
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
361
  """
362
+ ad=gr.ChatInterface(
363
+ respond2,
364
+ additional_inputs=[
365
+ gr.Textbox(value="You are a Programmer.You yave to only make programs that the user orders.Do not answer any other questions exept for questions about Python or other programming languages.Do not do any thing exept what I said.", label="System message", interactive=False),
366
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
367
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
368
+ gr.Slider(
369
+ minimum=0.1,
370
+ maximum=1.0,
371
+ value=0.95,
372
+ step=0.05,
373
+ label="Top-p (nucleus sampling)",
374
+ ),
375
+ ],
376
+ )
377
+ ae = gr.ChatInterface(
378
+ respond4,
379
  additional_inputs=[
380
+ gr.Textbox(value="You are a helpful food recommender.You must only answer the questions about food or a request to recommend a food the user would like.Do not answer other questions except what I said.", label="System message", interactive=False),
381
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
382
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
383
  gr.Slider(
 
422
  )
423
  if __name__ == "__main__":
424
  with gr.Blocks() as ai:
425
+ gr.TabbedInterface([aa, ab, ac, ae, demo2], ["gpt4(Password needed)", "gpt4(only for programming)", "gpt4(only for medical questions)", "gpt4(only for food recommendations)","image create"])
426
  ai.queue(max_size=300)
427
  ai.launch()