File size: 13,375 Bytes
e3596d7
a503c73
096dbd4
 
d3cddbc
096dbd4
ae95de2
 
 
548dd69
10d3a03
9e02e2e
 
 
 
 
 
 
 
 
 
ae95de2
 
096dbd4
b73bb4c
ae95de2
b73bb4c
 
ae95de2
 
b73bb4c
ae95de2
b73bb4c
 
 
 
ae95de2
 
 
 
 
 
 
b73bb4c
 
 
 
 
 
ae95de2
 
 
 
 
 
 
 
 
e3596d7
ae95de2
096dbd4
ae95de2
096dbd4
ae95de2
 
 
 
 
 
 
3eec23c
b73bb4c
 
 
 
3eec23c
 
 
b73bb4c
3eec23c
 
b73bb4c
3eec23c
 
b73bb4c
3eec23c
 
f203c17
b73bb4c
f203c17
548dd69
b73bb4c
9e02e2e
 
 
6232328
ae95de2
 
 
 
a503c73
 
 
 
 
 
 
096dbd4
a503c73
 
 
 
2ceda68
096dbd4
a503c73
096dbd4
 
a503c73
ae95de2
 
 
 
 
 
e3596d7
ae95de2
b73bb4c
 
ee90599
b73bb4c
45eb20f
b73bb4c
548dd69
f203c17
10d3a03
4a5e212
 
da88775
4a5e212
 
b73bb4c
10d3a03
4a5e212
b73bb4c
f30c15c
 
ae95de2
 
096dbd4
 
 
 
 
 
b73bb4c
096dbd4
 
b73bb4c
ae95de2
 
 
 
 
568f598
ae95de2
995e6d1
ae95de2
 
 
 
 
 
 
 
 
 
 
 
995e6d1
 
ae95de2
568f598
f2a8ecd
b73bb4c
 
 
 
 
 
 
 
 
 
 
 
ae95de2
568f598
ae95de2
 
 
568f598
ae95de2
ec67b57
0081309
568f598
 
 
b73bb4c
568f598
 
ae95de2
 
 
ec67b57
 
 
 
 
b73bb4c
ec67b57
 
 
b73bb4c
ec67b57
ae95de2
 
 
 
 
 
 
 
 
 
 
548dd69
f203c17
7164dd1
d3cddbc
 
 
 
b73bb4c
d3cddbc
b73bb4c
ae95de2
d3cddbc
096dbd4
a503c73
ae95de2
 
 
 
10d3a03
b73bb4c
 
 
4a5e212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b73bb4c
 
 
 
 
 
 
 
 
4a5e212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b73bb4c
4a5e212
 
548dd69
f203c17
7164dd1
b73bb4c
4a5e212
b73bb4c
4a5e212
b73bb4c
8eb01dc
4a5e212
 
8eb01dc
4a5e212
b73bb4c
 
4a5e212
 
f935da6
4a5e212
 
 
 
 
 
 
 
ae95de2
 
 
 
096dbd4
ae95de2
 
 
b9c5d81
 
ae95de2
 
b73bb4c
 
 
096dbd4
 
ae95de2
 
faf18e1
ae95de2
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
import os
import json
import uuid
from datetime import datetime
from flask import Flask, request, Response, jsonify
import socketio
import requests
import logging
from threading import Event
import tiktoken  # 引入 tiktoken 库

def local_encoding_for_model(model_name: str):
    local_encoding_path = '/app/cl100k_base.tiktoken'
    if os.path.exists(local_encoding_path):
        with open(local_encoding_path, 'rb') as f:
            return f.read()  # 返回本地编码文件的内容
    else:
        raise FileNotFoundError(f"Local encoding file not found at {local_encoding_path}")

tiktoken.encoding_for_model = local_encoding_for_model

app = Flask(__name__)
logging.basicConfig(level=logging.INFO)

# 从环境变量中获取API密钥
API_KEY = os.environ.get('PPLX_KEY')

# 代理设置
proxy_url = os.environ.get('PROXY_URL')

# 设置代理
if proxy_url:
    proxies = {
        'http': proxy_url,
        'https': proxy_url
    }
    transport = requests.Session()
    transport.proxies.update(proxies)
else:
    transport = None

sio = socketio.Client(http_session=transport, logger=True, engineio_logger=True)

# 连接选项
connect_opts = {
    'transports': ['websocket', 'polling'],  # 允许回退到轮询
}

# 其他选项
sio_opts = {
    'extraHeaders': {
        'Cookie': os.environ.get('PPLX_COOKIE'),
        'User-Agent': os.environ.get('USER_AGENT'),
        'Accept': '*/*',
        'priority': 'u=1, i',
        'Referer': 'https://www.perplexity.ai/',
    }
}

def log_request(ip, route, status):
    timestamp = datetime.now().isoformat()
    logging.info(f"{timestamp} - {ip} - {route} - {status}")

def validate_api_key():
    api_key = request.headers.get('x-api-key')
    if api_key != API_KEY:
        log_request(request.remote_addr, request.path, 401)
        return jsonify({"error": "Invalid API key"}), 401
    return None

def normalize_content(content):
    """
    递归处理 msg['content'],确保其为字符串。
    如果 content 是字典或列表,将其转换为字符串。
    """
    if isinstance(content, str):
        return content
    elif isinstance(content, dict):
        # 将字典转化为 JSON 字符串
        return json.dumps(content, ensure_ascii=False)
    elif isinstance(content, list):
        # 对于列表,递归处理每个元素
        return " ".join([normalize_content(item) for item in content])
    else:
        # 如果是其他类型,返回空字符串
        return ""

def calculate_tokens_via_tiktoken(text, model="gpt-3.5-turbo"):
    """
    使用 tiktoken 库根据 GPT 模型计算 token 数量。
    Claude 模型与 GPT 模型的 token 计算机制类似,因此可以使用 tiktoken。
    """
    encoding = tiktoken.encoding_for_model(model)  # 获取模型的编码器
    tokens = encoding.encode(text)  # 对文本进行 tokenization
    return len(tokens)

@app.route('/')
def root():
    log_request(request.remote_addr, request.path, 200)
    return jsonify({
        "message": "Welcome to the Perplexity AI Proxy API",
        "endpoints": {
            "/ai/v1/messages": {
                "method": "POST",
                "description": "Send a message to the AI",
                "headers": {
                    "x-api-key": "Your API key (required)",
                    "Content-Type": "application/json"
                },
                "body": {
                    "messages": "Array of message objects",
                    "stream": "Boolean (true for streaming response)",
                    "model": "Model to be used (optional, defaults to claude-3-opus-20240229)"
                }
            }
        }
    })

@app.route('/ai/v1/messages', methods=['POST'])
def messages():
    auth_error = validate_api_key()
    if auth_error:
        return auth_error

    try:
        json_body = request.json
        model = json_body.get('model', 'claude-3-opus-20240229')  # 动态获取模型,默认 claude-3-opus-20240229
        stream = json_body.get('stream', True)  # 默认为True

        # 使用 normalize_content 递归处理 msg['content']
        previous_messages = "\n\n".join([normalize_content(msg['content']) for msg in json_body['messages']])

        # 动态计算输入的 token 数量,使用 tiktoken 进行 tokenization
        input_tokens = calculate_tokens_via_tiktoken(previous_messages, model="gpt-3.5-turbo")

        msg_id = str(uuid.uuid4())
        response_event = Event()
        response_text = []

        if not stream:
            # 处理 stream 为 false 的情况
            return handle_non_stream(previous_messages, msg_id, model, input_tokens)

        # 记录日志:此时请求上下文仍然有效
        log_request(request.remote_addr, request.path, 200)

        def generate():
            yield create_event("message_start", {
                "type": "message_start",
                "message": {
                    "id": msg_id,
                    "type": "message",
                    "role": "assistant",
                    "content": [],
                    "model": model,  # 动态模型
                    "stop_reason": None,
                    "stop_sequence": None,
                    "usage": {"input_tokens": input_tokens, "output_tokens": 1},  # 动态 input_tokens
                },
            })
            yield create_event("content_block_start", {"type": "content_block_start", "index": 0, "content_block": {"type": "text", "text": ""}})
            yield create_event("ping", {"type": "ping"})

            def on_connect():
                logging.info("Connected to Perplexity AI")
                emit_data = {
                    "version": "2.9",
                    "source": "default",
                    "attachments": [],
                    "language": "en-GB",
                    "timezone": "Europe/London",
                    "mode": "concise",
                    "is_related_query": False,
                    "is_default_related_query": False,
                    "visitor_id": str(uuid.uuid4()),
                    "frontend_context_uuid": str(uuid.uuid4()),
                    "prompt_source": "user",
                    "query_source": "home"
                }
                sio.emit('perplexity_ask', (previous_messages, emit_data))

            def on_query_progress(data):
                nonlocal response_text
                if 'text' in data:
                    text = json.loads(data['text'])
                    chunk = text['chunks'][-1] if text['chunks'] else None
                    if chunk:
                        response_text.append(chunk)

                # 检查是否是最终响应
                if data.get('final', False):
                    response_event.set()

            def on_query_complete(data):
                response_event.set()

            def on_disconnect():
                logging.info("Disconnected from Perplexity AI")
                response_event.set()

            def on_connect_error(data):
                logging.error(f"Connection error: {data}")
                response_text.append(f"Error connecting to Perplexity AI: {data}")
                response_event.set()

            sio.on('connect', on_connect)
            sio.on('query_progress', on_query_progress)
            sio.on('query_complete', on_query_complete)
            sio.on('disconnect', on_disconnect)
            sio.on('connect_error', on_connect_error)

            try:
                sio.connect('wss://www.perplexity.ai/', **connect_opts, headers=sio_opts['extraHeaders'])
                
                while not response_event.is_set():
                    sio.sleep(0.1)
                    while response_text:
                        chunk = response_text.pop(0)
                        yield create_event("content_block_delta", {
                            "type": "content_block_delta",
                            "index": 0,
                            "delta": {"type": "text_delta", "text": chunk},
                        })
                
            except Exception as e:
                logging.error(f"Error during socket connection: {str(e)}")
                yield create_event("content_block_delta", {
                    "type": "content_block_delta",
                    "index": 0,
                    "delta": {"type": "text_delta", "text": f"Error during socket connection: {str(e)}"},
                })
            finally:
                if sio.connected:
                    sio.disconnect()

            # 动态计算输出的 token 数量,使用 tiktoken 进行 tokenization
            output_tokens = calculate_tokens_via_tiktoken(''.join(response_text), model="gpt-3.5-turbo")

            yield create_event("content_block_stop", {"type": "content_block_stop", "index": 0})
            yield create_event("message_delta", {
                "type": "message_delta",
                "delta": {"stop_reason": "end_turn", "stop_sequence": None},
                "usage": {"input_tokens": input_tokens, "output_tokens": output_tokens},  # 动态 output_tokens
            })
            yield create_event("message_stop", {"type": "message_stop"})  # 确保发送 message_stop 事件

        return Response(generate(), content_type='text/event-stream')

    except Exception as e:
        logging.error(f"Request error: {str(e)}")
        log_request(request.remote_addr, request.path, 400)
        return jsonify({"error": str(e)}), 400

def handle_non_stream(previous_messages, msg_id, model, input_tokens):
    """
    处理 stream 为 false 的情况,返回完整的响应。
    """
    try:
        response_event = Event()
        response_text = []

        def on_connect():
            logging.info("Connected to Perplexity AI")
            emit_data = {
                "version": "2.9",
                "source": "default",
                "attachments": [],
                "language": "en-GB",
                "timezone": "Europe/London",
                "mode": "concise",
                "is_related_query": False,
                "is_default_related_query": False,
                "visitor_id": str(uuid.uuid4()),
                "frontend_context_uuid": str(uuid.uuid4()),
                "prompt_source": "user",
                "query_source": "home"
            }
            sio.emit('perplexity_ask', (previous_messages, emit_data))

        def on_query_progress(data):
            nonlocal response_text
            if 'text' in data:
                text = json.loads(data['text'])
                chunk = text['chunks'][-1] if text['chunks'] else None
                if chunk:
                    response_text.append(chunk)

            # 检查是否是最终响应
            if data.get('final', False):
                response_event.set()

        def on_disconnect():
            logging.info("Disconnected from Perplexity AI")
            response_event.set()

        def on_connect_error(data):
            logging.error(f"Connection error: {data}")
            response_text.append(f"Error connecting to Perplexity AI: {data}")
            response_event.set()

        sio.on('connect', on_connect)
        sio.on('query_progress', on_query_progress)
        sio.on('disconnect', on_disconnect)
        sio.on('connect_error', on_connect_error)

        sio.connect('wss://www.perplexity.ai/', **connect_opts, headers=sio_opts['extraHeaders'])
        
        # 等待响应完成
        response_event.wait(timeout=30)

        # 动态计算输出的 token 数量,使用 tiktoken 进行 tokenization
        output_tokens = calculate_tokens_via_tiktoken(''.join(response_text), model="gpt-3.5-turbo")

        # 生成完整的响应
        full_response = {
            "content": [{"text": ''.join(response_text), "type": "text"}],  # 合并所有文本块
            "id": msg_id,
            "model": model,  # 动态模型
            "role": "assistant",
            "stop_reason": "end_turn",
            "stop_sequence": None,
            "type": "message",
            "usage": {
                "input_tokens": input_tokens,  # 动态 input_tokens
                "output_tokens": output_tokens,  # 动态 output_tokens
            },
        }
        return Response(json.dumps(full_response, ensure_ascii=False), content_type='application/json')

    except Exception as e:
        logging.error(f"Error during socket connection: {str(e)}")
        return jsonify({"error": str(e)}), 500
    finally:
        if sio.connected:
            sio.disconnect()

@app.errorhandler(404)
def not_found(error):
    log_request(request.remote_addr, request.path, 404)
    return "Not Found", 404

@app.errorhandler(500)
def server_error(error):
    logging.error(f"Server error: {str(error)}")
    log_request(request.remote_addr, request.path, 500)
    return "Something broke!", 500

def create_event(event, data):
    if isinstance(data, dict):
        data = json.dumps(data, ensure_ascii=False)  # 确保中文不会被转义
    return f"event: {event}\ndata: {data}\n\n"

if __name__ == '__main__':
    port = int(os.environ.get('PORT', 8081))
    logging.info(f"Perplexity proxy listening on port {port}")
    if not API_KEY:
        logging.warning("Warning: PPLX_KEY environment variable is not set. API key validation will fail.")
    app.run(host='0.0.0.0', port=port)