TrgTuan10 commited on
Commit
f46378e
1 Parent(s): 2c34229

add new tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 49152
3
+ }
special_tokens_map.json CHANGED
@@ -32,6 +32,13 @@
32
  "rstrip": false,
33
  "single_word": false
34
  },
 
 
 
 
 
 
 
35
  "unk_token": {
36
  "content": "<|endoftext|>",
37
  "lstrip": false,
 
32
  "rstrip": false,
33
  "single_word": false
34
  },
35
+ "pad_token": {
36
+ "content": "[PAD]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ },
42
  "unk_token": {
43
  "content": "<|endoftext|>",
44
  "lstrip": false,
tokenizer.json CHANGED
@@ -155,6 +155,15 @@
155
  "rstrip": false,
156
  "normalized": false,
157
  "special": true
 
 
 
 
 
 
 
 
 
158
  }
159
  ],
160
  "normalizer": null,
 
155
  "rstrip": false,
156
  "normalized": false,
157
  "special": true
158
+ },
159
+ {
160
+ "id": 49152,
161
+ "content": "[PAD]",
162
+ "single_word": false,
163
+ "lstrip": false,
164
+ "rstrip": false,
165
+ "normalized": false,
166
+ "special": true
167
  }
168
  ],
169
  "normalizer": null,
tokenizer_config.json CHANGED
@@ -136,6 +136,14 @@
136
  "rstrip": false,
137
  "single_word": false,
138
  "special": true
 
 
 
 
 
 
 
 
139
  }
140
  },
141
  "additional_special_tokens": [
@@ -161,6 +169,7 @@
161
  "clean_up_tokenization_spaces": false,
162
  "eos_token": "<|endoftext|>",
163
  "model_max_length": 1000000000000000019884624838656,
 
164
  "tokenizer_class": "GPT2Tokenizer",
165
  "unk_token": "<|endoftext|>",
166
  "vocab_size": 49152
 
136
  "rstrip": false,
137
  "single_word": false,
138
  "special": true
139
+ },
140
+ "49152": {
141
+ "content": "[PAD]",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
  }
148
  },
149
  "additional_special_tokens": [
 
169
  "clean_up_tokenization_spaces": false,
170
  "eos_token": "<|endoftext|>",
171
  "model_max_length": 1000000000000000019884624838656,
172
+ "pad_token": "[PAD]",
173
  "tokenizer_class": "GPT2Tokenizer",
174
  "unk_token": "<|endoftext|>",
175
  "vocab_size": 49152