daq27 commited on
Commit
d36980f
1 Parent(s): cc930d1

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +97 -0
main.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr # to create the web UI for the application
2
+ from openai import OpenAI # to interact with LM Studio models
3
+ import re # for text manipulation
4
+
5
+ # ANSI escape code for colors
6
+ RESET_COLOR = '\033[0m'
7
+ NEON_GREEN = '\033[92m'
8
+
9
+ client = OpenAI(base_url="http://localhost:1836/v1", api_key="lm-studio")
10
+
11
+ # Initialize an empty list to store conversation history
12
+ conversation_history = []
13
+
14
+ def format_response_text(text):
15
+ """
16
+ Formats the response text for improved readability.
17
+ :param text: The raw response text.
18
+ :return: Formatted text.
19
+ """
20
+ # New paragraphs after each period, question mark, or exclamation point
21
+ text = re.sub(r'(?<=[.!?])\s+(?=[A-Z])', '\n\n', text)
22
+
23
+ # Properly indent bullet points and numbered lists
24
+ text = re.sub(r'(\n)?(\s*)?([•\-*]|\d+\.)\s+', r'\n \3 ', text)
25
+
26
+ return text
27
+
28
+ def mistral_streamed_interaction(user_input, conversation_history):
29
+ """
30
+ Interacts with the mistral model via LM Studio, maintaining conversation context.
31
+ :param user_input: String, user's input.
32
+ :param conversation_history: List, the conversation history.
33
+ :return: Tuple, containing the response and updated conversation history.
34
+ """
35
+ # Add user's input to conversation history
36
+ conversation_history.append({"role": "user", "content": user_input})
37
+
38
+ streamed_completion = client.chat.completions.create(
39
+ model="TheBloke/dolphin-2.2.1-mistral-7B-GGUF/dolphin-2.2.1-mistral-7b.Q4_K_S.gguf",
40
+ messages=conversation_history,
41
+ stream=True # Enable streaming
42
+ )
43
+
44
+ full_response = ""
45
+ line_buffer = ""
46
+
47
+ for chunk in streamed_completion:
48
+ delta_content = chunk.choices[0].delta.content
49
+ if delta_content:
50
+ line_buffer += delta_content
51
+ if '\n' in line_buffer:
52
+ lines = line_buffer.split('\n')
53
+ full_response += '\n'.join(lines[:-1])
54
+ line_buffer = lines[-1]
55
+
56
+ if line_buffer:
57
+ full_response += line_buffer
58
+
59
+ full_response = format_response_text(full_response)
60
+
61
+ # Add model's response to conversation history
62
+ conversation_history.append({"role": "system", "content": full_response})
63
+
64
+ return full_response, conversation_history
65
+
66
+ def clear_conversation_history():
67
+ """
68
+ Clears the conversation history.
69
+ """
70
+ global conversation_history
71
+ conversation_history = []
72
+ print("Conversation history cleared.")
73
+
74
+
75
+ def gradio_interface_interaction(user_input):
76
+ """
77
+ This function acts as the bridge between the Gradio interface and the chat logic.
78
+ It processes the user input via the existing chat logic and returns the response.
79
+
80
+ :param user_input: User input from the Gradio interface.
81
+ :return: Response text to be displayed in the Gradio interface.
82
+ """
83
+ # Call the existing chat interaction function with the global conversation history
84
+ response, _ = mistral_streamed_interaction(user_input, conversation_history)
85
+ return response
86
+
87
+
88
+ # Modify the Gradio interface to use the new interaction function
89
+ iface = gr.Interface(
90
+ fn=gradio_interface_interaction,
91
+ inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here"),
92
+ outputs=gr.Textbox(),
93
+ )
94
+
95
+ # Launch the Gradio interface
96
+ iface.launch()
97
+