{ "add_bos_token": false, "add_prefix_space": false, "added_tokens_decoder": { "0": { "content": "<|end_of_text|>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "1": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "2": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "3": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "4": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "5": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "6": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "7": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "8": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "9": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "10": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "11": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "12": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "13": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "14": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "15": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "16": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "17": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "18": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "49152": { "content": "<|start_of_role|>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "49153": { "content": "<|end_of_role|>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "49154": { "content": "<|tool_call|>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true } }, "additional_special_tokens": [ "<|start_of_role|>", "<|end_of_role|>", "<|tool_call|>" ], "bos_token": "<|end_of_text|>", "chat_template": "{%- if tools %}\n {{- '<|start_of_role|>available_tools<|end_of_role|>\n' }}\n {%- for tool in tools %}\n {{- tool | tojson(indent=4) }}\n {%- if not loop.last %}\n {{- '\n\n' }}\n {%- endif %}\n {%- endfor %}\n {{- '<|end_of_text|>\n' }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{- '<|start_of_role|>system<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- elif message['role'] == 'user' %}\n {{- '<|start_of_role|>user<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- elif message['role'] == 'assistant' %}\n {{- '<|start_of_role|>assistant<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- elif message['role'] == 'assistant_tool_call' %}\n {{- '<|start_of_role|>assistant<|end_of_role|><|tool_call|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- elif message['role'] == 'tool_response' %}\n {{- '<|start_of_role|>tool_response<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- endif %}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|start_of_role|>assistant<|end_of_role|>' }}\n {%- endif %}\n{%- endfor %}", "clean_up_tokenization_spaces": true, "eos_token": "<|end_of_text|>", "errors": "replace", "model_max_length": 9223372036854775807, "pad_token": "<|end_of_text|>", "padding_side": "left", "tokenizer_class": "GPT2Tokenizer", "unk_token": "<|end_of_text|>", "vocab_size": 49152 }