smgc commited on
Commit
0e7aa13
1 Parent(s): d315e99

Create config.toml

Browse files
Files changed (1) hide show
  1. config.toml +179 -0
config.toml ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ##
2
+ #
3
+ # [DANGER]
4
+ # The example file here is outdated,
5
+ # please visit the official documentation to get the latest configuration file example.
6
+ #
7
+ # 🔗 https://wibus-wee.github.io/raycast-unblock/
8
+ #
9
+ # [WARNING]
10
+ # The example file here only keeps the minimum available configuration,
11
+ # please refer to the official documentation for more configuration items.
12
+ # ##
13
+
14
+ [General]
15
+ # If you deploy this service on a remote server,
16
+ # it is recommended to set mode to "remote", maybe one day this program will be used
17
+ # and you **should** set host to "0.0.0.0", or you can't access the service from the outside
18
+ mode = "remote"
19
+ port = 3000
20
+ host = "0.0.0.0"
21
+ # If there are some problems, you can set debug & logger to true
22
+ debug = false
23
+ # Fastify Logger
24
+ logger = false
25
+ # If you want the service to listen to the changes of the configuration file and update automatically,
26
+ # you can set watch to true
27
+ watch = false
28
+
29
+ [General.Https]
30
+ # If you want to use HTTPS, you can set the following configuration
31
+ # enabled = true
32
+ # # You can specify the host to the certificate file (auto generate mode)
33
+ # host = '192.168.3.2'
34
+ # # You can also specify the path to your existing certificate file
35
+ # key = "path
36
+ # cert = "path"
37
+ # ca = "path"
38
+
39
+ [AI]
40
+ default = "openai"
41
+ # If the parameter is not set in the specific AI service,
42
+ # this value will be used
43
+ # For example:
44
+ # If I don't set the temperature parameter in AI.OpenAI, this value will be used
45
+ # But if I set the temperature parameter in AI.Gemini, the temperature parameter in AI.Gemini will be used
46
+ # temperature = 0.5
47
+ # max_tokens = 100
48
+
49
+ [AI.Functions]
50
+ # You should enter plugin IDs that you want to enable here.
51
+ # The following plugins are supported:
52
+ # serp, web_search
53
+ # You can go to src/features/ai/functionsCall to see more details.
54
+ plugins = [
55
+ 'serp',
56
+ 'web_search'
57
+ ]
58
+
59
+ [AI.Functions.Serp]
60
+ apyHub_api_key = "APY04V154epVA7X87TSe6OmYnPTMr58CyTExbPzUAnaFg7t2YcY46YqRMfNPDZAJQ0V5CQbuGilK6r"
61
+ # tavily_ai_api_key = "" # Tavily currently doesn't support.
62
+
63
+ [AI.OpenAI]
64
+ # If the default model is not set,
65
+ # or...
66
+ # if the default model is set,
67
+ # but the specific AI service's model is not set,
68
+ # the default model written in the code will be used
69
+ # default = "gpt-3.5-turbo-16k.legacy"
70
+ # You can edit the base_url if you want to use the custom OpenAI server
71
+ base_url = "https://api.smgc.cc/v1"
72
+ api_key = "sk-uFCx2OYWLKJaHtfy7b0cBc517f3c4cF5A1279c855f9f6aE6"
73
+
74
+ # if you'd like to use azure openai
75
+ # is_azure = true
76
+ # base_url = "https://<resource_name>.openai.azure.com"
77
+ # azure_deployment_name = "YOUR_AZURE_DEPLOYMENT_ID" # if not provided, use req.body.model
78
+
79
+ # If the parameter is set in the specific AI service
80
+ # this value will be used, and it has the highest priority
81
+ # temperature = 0.5
82
+ # max_tokens = 100
83
+
84
+ # Custom OpenAI Model
85
+ # You can add your own OpenAI model just like the following:
86
+ # # [NOTICE] You shouldn't use the dot in the model name. It will be parsed as a section
87
+ [AI.OpenAI.Models.GPT4]
88
+ id = "gpt-4-0125-preview" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt4"
89
+ model = "gpt-4-0125-preview" # if it's not provided, it will be generated from the Object key.
90
+ name = "GPT-4 (Preview)" # if it's not provided, it will be generated from the Object key.
91
+ description = "GPT-4 is OpenAI’s most capable model with broad general knowledge, allowing it to follow complex instructions and solve difficult problems.\n"
92
+ # speed = 3 # if it's not provided, the default value will be used.
93
+ # intelligence = 3 # if it's not provided, the default value will be used.
94
+ # context = 8 # if it's not provided, the default value will be used.
95
+ # status = "beta"
96
+ # [AI.OpenAI.Models.GPT4.Capabilities] # Features control
97
+ # image_generation = true # Not supported yet
98
+ # web_search = true # The premise is that the model needs to support Function Call.
99
+
100
+ [AI.OpenAI.Models.GPT4-TURBO]
101
+ id = "gpt-4-turbo"
102
+ model = "gpt-4-turbo"
103
+ name = "GPT-4 (Turbo)"
104
+ description = "GPT-4-Trubo is OpenAI’s most capable model with broad general knowledge, allowing it to follow complex instructions and solve difficult problems.\n"
105
+ # speed = 3 # if it's not provided, the default value will be used.
106
+ # intelligence = 3 # if it's not provided, the default value will be used.
107
+ # context = 8 # if it's not provided, the default value will be used.
108
+ # status = "beta"
109
+ # [AI.OpenAI.Models.GPT4.Capabilities] # Features control
110
+ # image_generation = true # Not supported yet
111
+ web_search = true
112
+
113
+ [AI.OpenAI.Models.GPT4o]
114
+ id = "gpt-4o" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt4"
115
+ model = "gpt-4o" # if it's not provided, it will be generated from the Object key.
116
+ name = "GPT-4o" # if it's not provided, it will be generated from the Object key.
117
+ description = "GPT-4o is OpenAI’s most capable model with broad general knowledge, allowing it to follow complex instructions and solve difficult problems.\n"
118
+ # speed = 3 # if it's not provided, the default value will be used.
119
+ # intelligence = 3 # if it's not provided, the default value will be used.
120
+ # context = 8 # if it's not provided, the default value will be used.
121
+ # status = "beta"
122
+ # [AI.OpenAI.Models.GPT4.Capabilities] # Features control
123
+ # image_generation = true # Not supported yet
124
+ # web_search = true # The premise is that the model needs to support Function Call.
125
+
126
+ [AI.Groq]
127
+ # refresh_token = '<your refresh token>'
128
+ # temperature = 0.5
129
+ # max_tokens = 100
130
+
131
+ [AI.Gemini]
132
+ api_key = ""
133
+ # temperature = 0.5
134
+ # max_tokens = 100
135
+
136
+ [AI.Cohere]
137
+ email = ""
138
+ password = ""
139
+
140
+ [Translate]
141
+ # You can choose the default translation service from the following:
142
+ # shortcut, deeplx, ai, libretranslate, google
143
+ # Default: deeplx
144
+ default = "deeplx"
145
+
146
+ # Maybe one day there will be a [Translate.Shortcuts] configuration here...
147
+ # [Translate.Shortcuts]
148
+
149
+ [Translate.DeepLX]
150
+ # proxy_endpoints = []
151
+ # access_tokens = []
152
+
153
+ [Translate.AI]
154
+ # If the default model is not set,
155
+ # or...
156
+ # if the default model is set,
157
+ # but the specific AI service's model is not set,
158
+ # the default model written in the code will be used
159
+ # Default: openai
160
+ default = "openai"
161
+ # The model used by the AI service
162
+ # (only effective for openai, groq)
163
+ # Default: gpt-3.5-turbo
164
+ model = "gpt-3.5-turbo"
165
+
166
+ [Translate.LibreTranslate]
167
+ base_url = "https://libretranslate.com"
168
+ # You can choose the type from the following:
169
+ # reserve, api
170
+ # Default: reserve
171
+ type = "reserve"
172
+ # If you choose api, you should set the api_key
173
+ api_key = ""
174
+
175
+ [Sync]
176
+ # The location of the sync file, default is "icloud", and you can also set it to "local"
177
+ # # The iCloud storage solution is only effective when deployed on the macOS client
178
+ # # **iCloud storage solution** is the *default* option in *macOS* deployments
179
+ type = "icloud" # icloud / local