prithivMLmods commited on
Commit
68c5b4c
β€’
1 Parent(s): 0ba7801

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -41
app.py CHANGED
@@ -39,7 +39,7 @@ def respond(
39
 
40
  response = ""
41
 
42
- for message in client.chat.completions.create(
43
  model="meta-llama/Meta-Llama-3.1-8B-Instruct",
44
  max_tokens=max_tokens,
45
  stream=True,
@@ -48,7 +48,6 @@ def respond(
48
  messages=messages,
49
  ):
50
  token = message.choices[0].delta.content
51
-
52
  response += token
53
  yield response
54
 
@@ -81,46 +80,38 @@ def save_to_file(history, file_format):
81
 
82
  return file_name
83
 
84
- def save_conversation(history, file_format):
85
- file_name = save_to_file(history, file_format)
86
- return file_name
87
-
88
- def save_and_download(history, file_format):
89
- file_name = save_conversation(history, file_format)
90
- return file_name
91
-
92
- demo = gr.ChatInterface(
93
- respond,
94
- additional_inputs=[
95
- gr.Textbox(value="", label="System message"),
96
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
97
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
98
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P"),
99
- gr.Radio(["PDF", "DOCX", "TXT"], label="Save As"),
100
- ],
101
- css=css,
102
- theme="allenai/gradio-theme",
103
- )
104
-
105
- save_button = gr.Button("Save Conversation")
106
- output_file = gr.File(label="Download File")
107
-
108
- def handle_save(history, save_format):
109
- return save_conversation(history, save_format)
110
-
111
- save_button.click(
112
- handle_save,
113
- inputs=[demo.history, demo.inputs[-1]], # Passing history and format
114
- outputs=output_file
115
- )
116
-
117
- demo = gr.Blocks()
118
 
119
- with demo:
120
- with gr.Column():
121
- demo.render()
122
- save_button.render()
123
- output_file.render()
124
 
125
  if __name__ == "__main__":
126
  demo.launch()
 
39
 
40
  response = ""
41
 
42
+ for message in client.chat.completions.create(
43
  model="meta-llama/Meta-Llama-3.1-8B-Instruct",
44
  max_tokens=max_tokens,
45
  stream=True,
 
48
  messages=messages,
49
  ):
50
  token = message.choices[0].delta.content
 
51
  response += token
52
  yield response
53
 
 
80
 
81
  return file_name
82
 
83
+ # Gradio Interface Setup
84
+ with gr.Blocks(css=css) as demo:
85
+ system_message = gr.Textbox(value="", label="System message")
86
+ max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
87
+ temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
88
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
89
+ save_as = gr.Radio(["PDF", "DOCX", "TXT"], label="Save As")
90
+
91
+ chat = gr.Chatbot()
92
+ msg = gr.Textbox(label="Your message")
93
+
94
+ def respond_wrapper(message, history):
95
+ response_generator = respond(
96
+ message,
97
+ history,
98
+ system_message.value,
99
+ max_tokens.value,
100
+ temperature.value,
101
+ top_p.value
102
+ )
103
+ response = next(response_generator)
104
+ return history + [(message, response)]
105
+
106
+ msg.submit(respond_wrapper, [msg, chat], [chat])
107
+
108
+ save_button = gr.Button("Save Conversation")
109
+ output_file = gr.File(label="Download File")
110
+
111
+ def handle_save(history, file_format):
112
+ return save_to_file(history, file_format)
 
 
 
 
113
 
114
+ save_button.click(handle_save, inputs=[chat, save_as], outputs=output_file)
 
 
 
 
115
 
116
  if __name__ == "__main__":
117
  demo.launch()