minskiter commited on
Commit
7b60efa
β€’
1 Parent(s): ba186f4

feat(data): change bmes to parquet

Browse files
data/{test.char.bmes β†’ test.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54d4eae480d0175b08c2967d7b325471789038948f2b43bac21900b66b8a4960
3
- size 93851
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:516e640ed45b46302dbe2adf133596e52b764f5de05d8a9da603b2b4db7ce202
3
+ size 47490
data/{train.char.bmes β†’ train.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64a6cddf1cc3e0238aae5da70e50b95ef85d236235562ddad42e1ebe1bb9d7fe
3
- size 461990
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c94ad632fd3f9b1c32a42c55d443cba30cd2ada3d9940684326eb1ea1abfceea
3
+ size 214937
data/{validation.char.bmes β†’ validation.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9515ea6d85c9304b7f172f8b352b462b15d80d369a8d9607a8dfda1599740a92
3
- size 91251
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f95b23904d28860c1703fd8a3649d64a320011e5f4076a078c9680f4414cf95
3
+ size 46872
weibo.py CHANGED
@@ -1,5 +1,6 @@
1
  import datasets
2
  from datasets.download.download_manager import DownloadManager
 
3
 
4
  _DESCRIPTION = """\
5
  The Weibo NER dataset is a Chinese Named Entity Recognition dataset
@@ -25,9 +26,9 @@ _CITATION = """\
25
 
26
  _URL = "https://huggingface.co/datasets/minskiter/weibo/resolve/main/"
27
  _URLS = {
28
- "train": _URL + "data/train.char.bmes",
29
- "validation": _URL + "data/validation.char.bmes",
30
- "test": _URL + "data/test.char.bmes",
31
  }
32
 
33
  class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
@@ -103,26 +104,13 @@ class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
103
  ),
104
  ]
105
 
106
- def _default_example(self):
107
- return {"text": [], "labels": []}
108
-
109
  def _generate_examples(self, filepath):
110
- with open(filepath, "r", encoding="utf-8") as f:
111
- example = self._default_example()
112
- _id = 0
113
- for line in f:
114
- if len(line.strip()) == 0:
115
- if len(example["text"]) > 0:
116
- yield _id, example
117
- example = self._default_example()
118
- _id += 1
119
- continue
120
- char, label = line.split(" ")
121
- char = char.strip()
122
- if char == "":
123
- char = " "
124
- label = label.strip()
125
- example["text"].append(char)
126
- example["labels"].append(label)
127
- if len(example["text"]) > 0:
128
- yield _id, example
 
1
  import datasets
2
  from datasets.download.download_manager import DownloadManager
3
+ import pyarrow.parquet as pq
4
 
5
  _DESCRIPTION = """\
6
  The Weibo NER dataset is a Chinese Named Entity Recognition dataset
 
26
 
27
  _URL = "https://huggingface.co/datasets/minskiter/weibo/resolve/main/"
28
  _URLS = {
29
+ "train": _URL + "data/train.parquet",
30
+ "validation": _URL + "data/validation.parquet",
31
+ "test": _URL + "data/test.parquet",
32
  }
33
 
34
  class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
 
104
  ),
105
  ]
106
 
 
 
 
107
  def _generate_examples(self, filepath):
108
+ file = pq.ParquetFile(filepath)
109
+ _id = -1
110
+ for i in file.iter_batches(batch_size=64):
111
+ rows = i.to_pylist()
112
+ for row in rows:
113
+ _id+=1
114
+ yield _id, row
115
+
116
+