File size: 6,770 Bytes
0e7aa13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b2482
0e7aa13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b2482
0e7aa13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b2482
 
 
 
 
0e7aa13
 
 
 
 
 
 
 
c2b2482
 
 
 
 
0e7aa13
 
 
 
 
 
c2b2482
0e7aa13
 
c2b2482
0e7aa13
 
c2b2482
0e7aa13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b2482
0e7aa13
c2b2482
0e7aa13
 
 
 
 
c2b2482
 
0e7aa13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b2482
 
0e7aa13
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
[General]
# If you deploy this service on a remote server,
# it is recommended to set mode to "remote", maybe one day this program will be used
# and you **should** set host to "0.0.0.0", or you can't access the service from the outside
mode = "remote"
port = 3000
host = "0.0.0.0"
# If there are some problems, you can set debug & logger to true
debug = false
# Fastify Logger
logger = false
# If you want the service to listen to the changes of the configuration file and update automatically,
# you can set watch to true
watch = false

[General.Https]
# If you want to use HTTPS, you can set the following configuration
# enabled = true
# # You can specify the host to the certificate file (auto generate mode)
# host = '192.168.110.254'
# # You can also specify the path to your existing certificate file
# key = "path
# cert = "path"
# ca = "path"

[AI]
default = "openai"
# If the parameter is not set in the specific AI service,
# this value will be used
# For example:
#    If I don't set the temperature parameter in AI.OpenAI, this value will be used
#    But if I set the temperature parameter in AI.Gemini, the temperature parameter in AI.Gemini will be used
# temperature = 0.5
# max_tokens = 100

[AI.Functions]
# You should enter plugin IDs that you want to enable here.
# The following plugins are supported:
# serp, web_search
# You can go to src/features/ai/functionsCall to see more details.
plugins = [
  'serp',
  'web_search'
]

[AI.Functions.Serp]
apiHub_api_key = "APY04V154epVA7X87TSe6OmYnPTMr58CyTExbPzUAnaFg7t2YcY46YqRMfNPDZAJQ0V5CQbuGilK6r"
# tavily_ai_api_key = "" # Tavily currently doesn't support.

[AI.OpenAI]
# If the default model is not set,
# or...
# if the default model is set,
# but the specific AI service's model is not set,
# the default model written in the code will be used
# default = "gpt-3.5-turbo-16k.legacy"
# You can edit the base_url if you want to use the custom OpenAI server
base_url = "https://api.smgc.cc/v1"
api_key = "sk-uFCx2OYWLKJaHtfy7b0cBc517f3c4cF5A1279c855f9f6aE6"

# if you'd like to use azure openai
# is_azure = true
# base_url = "https://<resource_name>.openai.azure.com"
# azure_deployment_name = "YOUR_AZURE_DEPLOYMENT_ID" # if not provided, use req.body.model

# If the parameter is set in the specific AI service
# this value will be used, and it has the highest priority
# temperature = 0.5
# max_tokens = 100

# Custom OpenAI Model
# You can add your own OpenAI model just like the following:
# # [NOTICE] You shouldn't use the dot in the model name. It will be parsed as a section
[AI.OpenAI.Models.GPT4P]
id = "openai-gpt-4-preview" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt4"
model = "gpt-4-preview" # if it's not provided, it will be generated from the Object key.
name = "GPT-4 Preview" # if it's not provided, it will be generated from the Object key.
description = "GPT-4 Preview from OpenAI has a big context window that fits hundreds of pages of text, making it a great choice for workloads that involve longer prompts.\n"
# speed = 3  # if it's not provided, the default value will be used.
# intelligence = 3  # if it's not provided, the default value will be used.
# context = 8  # if it's not provided, the default value will be used.
# status = "beta"
# [AI.OpenAI.Models.GPT4.Capabilities] # Features control
# image_generation = true # Not supported yet
# web_search = true # The premise is that the model needs to support Function Call.

[AI.OpenAI.Models.GPT4T]
id = "openai-gpt-4-turbo" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt4"
model = "gpt-4-turbo" # if it's not provided, it will be generated from the Object key.
name = "GPT-4 Turbo" # if it's not provided, it will be generated from the Object key.
description = "GPT-4 Turbo from OpenAI has a big context window that fits hundreds of pages of text, making it a great choice for workloads that involve longer prompts.\n"
# speed = 3  # if it's not provided, the default value will be used.
# intelligence = 3  # if it's not provided, the default value will be used.
# context = 8  # if it's not provided, the default value will be used.
# status = "beta"
# [AI.OpenAI.Models.GPT4.Capabilities] # Features control
# image_generation = true # Not supported yet
# web_search = true # The premise is that the model needs to support Function Call.

[AI.OpenAI.Models.GPT4o]
id = "openai-gpt-4o" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt4"
model = "gpt-4o" # if it's not provided, it will be generated from the Object key.
name = "GPT-4o" # if it's not provided, it will be generated from the Object key.
description = "GPT-4o is the most advanced and fastest model from OpenAI, making it a great choice for complex everyday problems and deeper conversations.\n"
# speed = 3  # if it's not provided, the default value will be used.
# intelligence = 3  # if it's not provided, the default value will be used.
# context = 8  # if it's not provided, the default value will be used.
# status = "beta"
# [AI.OpenAI.Models.GPT4.Capabilities] # Features control
# image_generation = true # Not supported yet
# web_search = true # The premise is that the model needs to support Function Call.

[AI.Groq]
# refresh_token = '<your refresh token>'
# temperature = 0.5
# max_tokens = 100

[AI.Gemini]
api_key = ""
# temperature = 0.5
# max_tokens = 100

[AI.Cohere]
email = ""
password = ""

[Translate]
# You can choose the default translation service from the following:
# shortcut, deeplx, ai, libretranslate
# Default: deeplx
default = "ai"

# Maybe one day there will be a [Translate.Shortcuts] configuration here...
# [Translate.Shortcuts]

[Translate.DeepLX]
# proxy_endpoint = ""
# access_token = ""

[Translate.AI]
# If the default model is not set,
# or...
# if the default model is set,
# but the specific AI service's model is not set,
# the default model written in the code will be used
# Default: openai
default = "openai"
# The model used by the AI service
# (only effective for openai, groq)
# Default: gpt-3.5-turbo
model = "gpt-3.5-turbo"

[Translate.LibreTranslate]
base_url = "https://libretranslate.com"
# You can choose the type from the following:
# reserve, api
# Default: reserve
type = "reserve"
# If you choose api, you should set the api_key
api_key = ""
# The following is for the legacy configuration
# They will be removed in the future

[Sync]
# The location of the sync file, default is "icloud", and you can also set it to "local"
# # The iCloud storage solution is only effective when deployed on the macOS client
# # **iCloud storage solution** is the *default* option in *macOS* deployments
type = "icloud" # icloud / local