Xenova HF staff commited on
Commit
1b694f5
1 Parent(s): 1ca7b34

Update tokenizer files (#7)

Browse files

- Update tokenizer files (81b2391592079a59cae85f823149cb45e4380c59)
- Update tokenizer_config.json (ac50dcf8bba20f9325f890131c15624f6389d0af)

Files changed (2) hide show
  1. tokenizer.json +1 -1
  2. tokenizer_config.json +1 -1
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cf8cddb82b68ab495546855952d707acfa17f23c97031e0cf346b207d73faae
3
  size 17525357
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f289bc05132635a8bc7aca7aa21255efd5e18f3710f43e3cdb96bcd41be4922
3
  size 17525357
tokenizer_config.json CHANGED
@@ -1996,7 +1996,7 @@
1996
  }
1997
  },
1998
  "bos_token": "<bos>",
1999
- "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
2000
  "clean_up_tokenization_spaces": false,
2001
  "eos_token": "<eos>",
2002
  "model_max_length": 1000000000000000019884624838656,
 
1996
  }
1997
  },
1998
  "bos_token": "<bos>",
1999
+ "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
2000
  "clean_up_tokenization_spaces": false,
2001
  "eos_token": "<eos>",
2002
  "model_max_length": 1000000000000000019884624838656,