Lowin commited on
Commit
621f3fa
0 Parent(s):

init model

Browse files
.gitattributes ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.pb filter=lfs diff=lfs merge=lfs -text
15
+ *.pt filter=lfs diff=lfs merge=lfs -text
16
+ *.pth filter=lfs diff=lfs merge=lfs -text
17
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+
3
+ language:
4
+
5
+ - zh
6
+
7
+ license:
8
+
9
+ - apache-2.0
10
+
11
+ ---
12
+
13
+ ```python
14
+
15
+ import jieba_fast
16
+
17
+ from transformers import BertTokenizer
18
+
19
+ from transformers import BigBirdModel
20
+
21
+ class JiebaTokenizer(BertTokenizer):
22
+
23
+ def __init__(
24
+
25
+ self, pre_tokenizer=lambda x: jieba_fast.cut(x, HMM=False), *args, **kwargs
26
+
27
+ ):
28
+
29
+ super().__init__(*args, **kwargs)
30
+
31
+ self.pre_tokenizer = pre_tokenizer
32
+
33
+ def _tokenize(self, text, *arg, **kwargs):
34
+
35
+ split_tokens = []
36
+
37
+ for text in self.pre_tokenizer(text):
38
+
39
+ if text in self.vocab:
40
+
41
+ split_tokens.append(text)
42
+
43
+ else:
44
+
45
+ split_tokens.extend(super()._tokenize(text))
46
+
47
+ return split_tokens
48
+
49
+ model = BigBirdModel.from_pretrained('Lowin/chinese-bigbird-small')
50
+
51
+ tokenizer = JiebaTokenizer.from_pretrained('Lowin/chinese-bigbird-small')
52
+
53
+ ```
54
+
55
+ https://github.com/LowinLi/chinese-bigbird
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/lilongwei/chinese-bigbird-small/",
3
+ "architectures": [
4
+ "BigBirdModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "attention_type": "block_sparse",
8
+ "block_size": 64,
9
+ "bos_token_id": 1,
10
+ "eos_token_id": 2,
11
+ "gradient_checkpointing": true,
12
+ "hidden_act": "gelu_fast",
13
+ "hidden_dropout_prob": 0.1,
14
+ "hidden_size": 512,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "layer_norm_eps": 1e-12,
18
+ "max_position_embeddings": 1024,
19
+ "model_type": "big_bird",
20
+ "num_attention_heads": 8,
21
+ "num_hidden_layers": 6,
22
+ "num_random_blocks": 3,
23
+ "pad_token_id": 0,
24
+ "rescale_embeddings": false,
25
+ "sep_token_id": 66,
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.9.0",
28
+ "type_vocab_size": 2,
29
+ "use_bias": true,
30
+ "use_cache": true,
31
+ "vocab_size": 30000
32
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8478dd9309c3268a0f3f5d1c741aecf10f6068c715f5843659acca6f073b4f78
3
+ size 165491767
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "/home/lilongwei/chinese-bigbird-small/", "tokenizer_class": "JiebaTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff