minyichen commited on
Commit
18ccf4e
1 Parent(s): 609a0eb

Upload tokenizer_config.json

Browse files

The current chat_template adds an extra `chatML` EOS token when `add_generation_prompt=False`.
Please replace it with the correct chat_template to fix this behavior.
```
from transformers import AutoTokenizer
message = [{"role": "user" , "content": 'How are you?'}]
tame_tokenizer = AutoTokenizer.from_pretrained("yentinglin/Llama-3-Taiwan-8B-Instruct")
tame_tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=False)
```
You can see an extra `<|im_end|>` token in the output :
```
<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\nHow are you?<|eot_id|><|im_end|>
```

Files changed (1) hide show
  1. tokenizer_config.json +2 -19
tokenizer_config.json CHANGED
@@ -2047,33 +2047,16 @@
2047
  "rstrip": false,
2048
  "single_word": false,
2049
  "special": true
2050
- },
2051
- "128256": {
2052
- "content": "<|im_end|>",
2053
- "lstrip": false,
2054
- "normalized": false,
2055
- "rstrip": false,
2056
- "single_word": false,
2057
- "special": true
2058
- },
2059
- "128257": {
2060
- "content": "<|im_start|>",
2061
- "lstrip": false,
2062
- "normalized": false,
2063
- "rstrip": false,
2064
- "single_word": false,
2065
- "special": false
2066
  }
2067
  },
2068
  "bos_token": "<|begin_of_text|>",
2069
- "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ eos_token }}{% endif %}",
2070
  "clean_up_tokenization_spaces": true,
2071
- "eos_token": "<|im_end|>",
2072
  "model_input_names": [
2073
  "input_ids",
2074
  "attention_mask"
2075
  ],
2076
  "model_max_length": 1000000000000000019884624838656,
2077
- "pad_token": "<|end_of_text|>",
2078
  "tokenizer_class": "PreTrainedTokenizerFast"
2079
  }
 
2047
  "rstrip": false,
2048
  "single_word": false,
2049
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
2053
+ "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
+ "eos_token": "<|eot_id|>",
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
2059
  ],
2060
  "model_max_length": 1000000000000000019884624838656,
 
2061
  "tokenizer_class": "PreTrainedTokenizerFast"
2062
  }