aifeifei798
commited on
Commit
•
79bf268
1
Parent(s):
fac3515
Upload test_openai_api_lmstudio.py
Browse files- test_openai_api_lmstudio.py +35 -0
test_openai_api_lmstudio.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
|
3 |
+
# Point to the local server
|
4 |
+
client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
|
5 |
+
|
6 |
+
history = [
|
7 |
+
{"role": "system", "content": "You are an intelligent assistant. You always provide well-reasoned answers that are both correct and helpful."},
|
8 |
+
]
|
9 |
+
|
10 |
+
# Open the file in read mode
|
11 |
+
with open('Model_Test_Issues_zh_en_jp.txt', 'r', encoding='utf-8') as file:
|
12 |
+
# Read each line from the file
|
13 |
+
for line in file:
|
14 |
+
print(line)
|
15 |
+
# Add the line as the user's content to the history
|
16 |
+
history.append({"role": "user", "content": line.strip()})
|
17 |
+
|
18 |
+
# Generate the response
|
19 |
+
completion = client.chat.completions.create(
|
20 |
+
model="mod/Repository",
|
21 |
+
messages=history,
|
22 |
+
temperature=0.7,
|
23 |
+
stream=True,
|
24 |
+
)
|
25 |
+
|
26 |
+
new_message = {"role": "assistant", "content": ""}
|
27 |
+
|
28 |
+
for chunk in completion:
|
29 |
+
if chunk.choices[0].delta.content:
|
30 |
+
print(chunk.choices[0].delta.content, end="", flush=True)
|
31 |
+
new_message["content"] += chunk.choices[0].delta.content
|
32 |
+
|
33 |
+
history.append(new_message)
|
34 |
+
|
35 |
+
print()
|