Text Generation
Transformers
PyTorch
Safetensors
English
llama
conversational
text-generation-inference
Inference Endpoints
hamishivi chujiezheng commited on
Commit
8eaeb5c
1 Parent(s): 60c3bde

Update tokenizer_config.json (#4)

Browse files

- Update tokenizer_config.json (8055c3e0aa6766f24d7cdeb4ed6a60d48fc97b0f)


Co-authored-by: Chujie Zheng <chujiezheng@users.noreply.huggingface.co>

Files changed (1) hide show
  1. tokenizer_config.json +36 -1
tokenizer_config.json CHANGED
@@ -1 +1,36 @@
1
- {"add_bos_token": true, "add_eos_token": false, "model_max_length": 2048, "pad_token": null, "sp_model_kwargs": {}, "tokenizer_class": "LlamaTokenizer", "clean_up_tokenization_spaces": false, "bos_token": {"__type": "AddedToken", "content": "<s>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}, "eos_token": {"__type": "AddedToken", "content": "</s>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}, "unk_token": {"__type": "AddedToken", "content": "<unk>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token":true,
3
+ "add_eos_token":false,
4
+ "model_max_length":2048,
5
+ "pad_token":null,
6
+ "sp_model_kwargs":{
7
+
8
+ },
9
+ "tokenizer_class":"LlamaTokenizer",
10
+ "clean_up_tokenization_spaces":false,
11
+ "bos_token":{
12
+ "__type":"AddedToken",
13
+ "content":"<s>",
14
+ "lstrip":false,
15
+ "normalized":true,
16
+ "rstrip":false,
17
+ "single_word":false
18
+ },
19
+ "eos_token":{
20
+ "__type":"AddedToken",
21
+ "content":"</s>",
22
+ "lstrip":false,
23
+ "normalized":true,
24
+ "rstrip":false,
25
+ "single_word":false
26
+ },
27
+ "unk_token":{
28
+ "__type":"AddedToken",
29
+ "content":"<unk>",
30
+ "lstrip":false,
31
+ "normalized":true,
32
+ "rstrip":false,
33
+ "single_word":false
34
+ },
35
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
36
+ }