llama3-8B-aifeifei-1.3 / test_openai_api_lmstudio.py
aifeifei798's picture
Upload test_openai_api_lmstudio.py
ee650ed verified
raw
history blame contribute delete
No virus
1.33 kB
import random
from openai import OpenAI
# Point to the local server
client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
history = [
{"role": "system", "content": "You are an intelligent assistant. You always provide well-reasoned answers that are both correct and helpful."},
]
# Open the file in read mode
with open('Model_Test_Issues_zh_en_jp.txt', 'r', encoding='utf-8') as file:
# Read all lines from the file
lines = file.readlines()
# Loop indefinitely
while True:
# Choose a random line from the file
line = random.choice(lines).strip()
print(line)
# Add the line as the user's content to the history
history.append({"role": "user", "content": line})
# Generate the response
completion = client.chat.completions.create(
model="mod/Repository",
messages=history,
temperature=0.7,
stream=True,
stop=["### Evaluation:","<|end_of_text|>","Translation:"]
)
new_message = {"role": "assistant", "content": ""}
for chunk in completion:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
new_message["content"] += chunk.choices[0].delta.content
history.append(new_message)
print()