File size: 3,093 Bytes
d5877b4
ff7bf91
e7255a6
d5877b4
 
e7255a6
d5877b4
8efa096
d5877b4
 
 
8efa096
d5877b4
e7255a6
 
 
d5877b4
 
 
 
e7255a6
d5877b4
 
e7255a6
d5877b4
 
 
 
e7255a6
d5877b4
 
 
 
e7255a6
d5877b4
 
5dc5f43
d5877b4
 
e7255a6
d5877b4
e3f1bcb
 
d5877b4
 
ff7bf91
d5877b4
ff7bf91
 
d5877b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff7bf91
d5877b4
ff7bf91
 
 
 
 
 
d5877b4
95985ef
e7255a6
d5877b4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# Configuration version (required)
version: 1.0.2

# Cache settings: Set to true to enable caching
cache: true

# Example Registration Object Structure (optional)
registration:
  socialLogins: ["github", "google", "discord", "openid", "facebook"]
  # allowedDomains:
    # - "gmail.com"

# Definition of custom endpoints
endpoints:
  custom:
    # Mistral AI API
    - name: "Mistral"  # Unique name for the endpoint
      # For `apiKey` and `baseURL`, you can use environment variables that you define.
      # recommended environment variables:
      apiKey: "${MISTRAL_API_KEY}"
      baseURL: "https://api.mistral.ai/v1"

      # Models configuration
      models: 
        # List of default models to use. At least one value is required.
        default: ["mistral-tiny", "mistral-small", "mistral-medium"]
        # Fetch option: Set to true to fetch models from API.
        fetch: true  # Defaults to false.

      # Optional configurations
      
      # Title Conversation setting
      titleConvo: true  # Set to true to enable title conversation

      # Title Method: Choose between "completion" or "functions".
      titleMethod: "completion"  # Defaults to "completion" if omitted.

      # Title Model: Specify the model to use for titles.
      titleModel: "mistral-tiny"  # Defaults to "gpt-3.5-turbo" if omitted.

      # Summarize setting: Set to true to enable summarization.
      summarize: false

      # Summary Model: Specify the model to use if summarization is enabled.
      summaryModel: "mistral-tiny"  # Defaults to "gpt-3.5-turbo" if omitted.

      # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
      forcePrompt: false

      # The label displayed for the AI model in messages.
      modelDisplayLabel: "Mistral"  # Default is "AI" when not set.

      # Add additional parameters to the request. Default params will be overwritten.
      addParams:
        safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
        
      # Drop Default params parameters from the request. See default params in guide linked below.
      # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
      dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
      
    # OpenRouter.ai Example
    - name: "OpenRouter"
      # For `apiKey` and `baseURL`, you can use environment variables that you define.
      # recommended environment variables:
      # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
      apiKey: "${OPENROUTER_KEY}"
      baseURL: "https://openrouter.ai/api/v1"
      models:
        default: ["gpt-3.5-turbo"]
        fetch: true
      titleConvo: true
      titleModel: "gpt-3.5-turbo"
      summarize: false
      summaryModel: "gpt-3.5-turbo"
      forcePrompt: false
      modelDisplayLabel: "OpenRouter"

# See the Custom Configuration Guide for more information:
# https://docs.librechat.ai/install/configuration/custom_config.html