R3troR0b commited on
Commit
6140a60
1 Parent(s): 250c3bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -15,30 +15,31 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
  for val in history:
21
  if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
  if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
 
36
  ):
37
- token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
 
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
  """
 
15
  temperature,
16
  top_p,
17
  ):
18
+ # Prepare the prompt based on the message and history
19
+ prompt = system_message + "\n"
20
  for val in history:
21
  if val[0]:
22
+ prompt += "User: " + val[0] + "\n"
23
  if val[1]:
24
+ prompt += "Assistant: " + val[1] + "\n"
25
 
26
+ prompt += "User: " + message + "\nAssistant:"
27
 
28
  response = ""
29
 
30
+ # Use text-generation instead of chat-completion
31
+ for message in client.text_generation(
32
+ prompt=prompt,
33
+ max_new_tokens=max_tokens,
34
  temperature=temperature,
35
  top_p=top_p,
36
+ stream=True,
37
  ):
38
+ token = message['generated_text'].replace(prompt, '')
 
39
  response += token
40
  yield response
41
 
42
+
43
  """
44
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
  """