File size: 4,169 Bytes
d258b9f
 
7b60efa
d258b9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b60efa
 
 
d258b9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b60efa
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import datasets
from datasets.download.download_manager import DownloadManager
import pyarrow.parquet as pq

_DESCRIPTION = """\
The Weibo NER dataset is a Chinese Named Entity Recognition dataset 
drawn from the social media website Sina Weibo.
"""

_CITATION = """\
@inproceedings{peng-dredze-2015-named,
    title = "Named Entity Recognition for {C}hinese 
        Social Media with Jointly Trained Embeddings",
    author = "Peng, Nanyun  and Dredze, Mark",
    booktitle = "Proceedings of the 2015 Conference on 
        Empirical Methods in Natural Language Processing",
    month = sep,
    year = "2015",
    address = "Lisbon, Portugal",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/D15-1064",
    doi = "10.18653/v1/D15-1064",
    pages = "548--554",
}
"""

_URL = "https://huggingface.co/datasets/minskiter/weibo/resolve/main/"
_URLS = {
    "train": _URL + "data/train.parquet",
    "validation": _URL + "data/validation.parquet",
    "test": _URL + "data/test.parquet",
}

class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "labels": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=[
                                'O',
                                'B-PER.NAM',
                                'I-PER.NAM',
                                'E-PER.NAM',
                                'S-PER.NAM',
                                'B-ORG.NAM',
                                'I-ORG.NAM',
                                'E-ORG.NAM',
                                'S-ORG.NAM',
                                'B-LOC.NAM',
                                'I-LOC.NAM',
                                'E-LOC.NAM',
                                'S-LOC.NAM',
                                'B-GPE.NAM',
                                'I-GPE.NAM',
                                'E-GPE.NAM',
                                'S-GPE.NAM',
                                'B-PER.NOM',
                                'I-PER.NOM',
                                'E-PER.NOM',
                                'S-PER.NOM',
                                'B-ORG.NOM',
                                'I-ORG.NOM',
                                'E-ORG.NOM',
                                'S-ORG.NOM',
                                'B-LOC.NOM',
                                'I-LOC.NOM',
                                'E-LOC.NOM',
                                'S-LOC.NOM',
                                'B-GPE.NOM',
                                'I-GPE.NOM',
                                'E-GPE.NOM',
                                'S-GPE.NOM',
                            ]
                        )
                    ),
                }
            ),
            supervised_keys=None,
            homepage="https://aclanthology.org/D15-1064/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: DownloadManager):
        urls_to_download = _URLS
        download_files = dl_manager.download_and_extract(urls_to_download)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": download_files["train"]},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": download_files["validation"]},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": download_files["test"]},
            ),
        ]

    def _generate_examples(self, filepath):
        file  = pq.ParquetFile(filepath)
        _id = -1
        for i in file.iter_batches(batch_size=64):
            rows = i.to_pylist()
            for row in rows:
                _id+=1
                yield _id, row