File size: 3,591 Bytes
c37b750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67c68b5
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import tiktoken

# Mapping of model names to their respective encodings
ENCODINGS = {
    "gpt-4": tiktoken.get_encoding("cl100k_base"),
    "gpt-3.5-turbo": tiktoken.get_encoding("cl100k_base"),
    "gpt-3.5-turbo-0301": tiktoken.get_encoding("cl100k_base"),
    "text-davinci-003": tiktoken.get_encoding("p50k_base"),
    "text-davinci-002": tiktoken.get_encoding("p50k_base"),
    "text-davinci-001": tiktoken.get_encoding("r50k_base"),
    "text-curie-001": tiktoken.get_encoding("r50k_base"),
    "text-babbage-001": tiktoken.get_encoding("r50k_base"),
    "text-ada-001": tiktoken.get_encoding("r50k_base"),
    "davinci": tiktoken.get_encoding("r50k_base"),
    "curie": tiktoken.get_encoding("r50k_base"),
    "babbage": tiktoken.get_encoding("r50k_base"),
    "ada": tiktoken.get_encoding("r50k_base"),
}

# Mapping of model names to their respective maximum context lengths
MAX_LENGTH = {
    "gpt-4": 8192,
    "gpt-3.5-turbo": 4096,
    "gpt-3.5-turbo-0301": 4096,
    "text-davinci-003": 4096,
    "text-davinci-002": 4096,
    "text-davinci-001": 2049,
    "text-curie-001": 2049,
    "text-babbage-001": 2049,
    "text-ada-001": 2049,
    "davinci": 2049,
    "curie": 2049,
    "babbage": 2049,
    "ada": 2049
}

def count_tokens(model_name, text):
    """
    Count the number of tokens for a given model and text.

    Parameters:
    - model_name (str): The name of the model.
    - text (str): The input text.

    Returns:
    - int: The number of tokens.
    """
    if model_name not in ENCODINGS:
        raise ValueError(f"Model name '{model_name}' not found in encodings.")
    return len(ENCODINGS[model_name].encode(text))

def get_max_context_length(model_name):
    """
    Get the maximum context length for a given model.

    Parameters:
    - model_name (str): The name of the model.

    Returns:
    - int: The maximum context length.
    """
    if model_name not in MAX_LENGTH:
        raise ValueError(f"Model name '{model_name}' not found in max length dictionary.")
    return MAX_LENGTH[model_name]

def get_token_ids_for_text(model_name, text):
    """
    Get unique token IDs for a given text using the specified model's encoding.

    Parameters:
    - model_name (str): The name of the model.
    - text (str): The input text.

    Returns:
    - list: A list of unique token IDs.
    """
    if model_name not in ENCODINGS:
        raise ValueError(f"Model name '{model_name}' not found in encodings.")
    encoded_tokens = ENCODINGS[model_name].encode(text)
    return list(set(encoded_tokens))

def get_token_ids_for_task_parsing(model_name):
    """
    Get unique token IDs for task parsing.

    Parameters:
    - model_name (str): The name of the model.

    Returns:
    - list: A list of unique token IDs for task parsing.
    """
    text = '''{"task": "text-classification", "token-classification", "text2text-generation", "summarization", "translation", "question-answering", "conversational", "text-generation", "sentence-similarity", "tabular-classification", "object-detection", "image-classification", "image-to-image", "image-to-text", "text-to-image", "visual-question-answering", "document-question-answering", "image-segmentation", "text-to-speech", "text-to-video", "automatic-speech-recognition", "audio-to-audio", "audio-classification", "canny-control", "hed-control", "mlsd-control", "normal-control", "openpose-control", "canny-text-to-image", "depth-text-to-image", "hed-text-to-image", "mlsd-text-to-image", "normal-text-to-image", "openpose-text-to-image"}'''
    
    # Continue with your function logic here...