whyen-wang commited on
Commit
56a0506
1 Parent(s): c5c3e8b

Convert dataset to Parquet (#1)

Browse files

- Convert dataset to Parquet (6129f4ee0a2a8d07dc595171ed0e1583698a6601)
- Delete loading script (fd76f4ba4eaac7926d38acda4303b68423c16219)
- Delete data file (8e6f2247a3d140ea67d2caad94a869329693ed93)
- Delete data file (53444c520554488947c4e92c57bb36620e92e04d)
- Delete loading script auxiliary file (3cb87bc458705410bf77255a00f1cd617b09de48)
- Delete data file (b1a7d143d95ba59ea6d45c2f5fa8020dc43081a9)

.gitignore DELETED
@@ -1 +0,0 @@
1
- __pycache__/
 
 
README.md CHANGED
@@ -1,7 +1,43 @@
1
  ---
 
 
2
  task_categories:
3
  - image-classification
4
  pretty_name: MNIST
5
- size_categories:
6
- - n<1K
7
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ size_categories:
3
+ - n<1K
4
  task_categories:
5
  - image-classification
6
  pretty_name: MNIST
7
+ dataset_info:
8
+ features:
9
+ - name: image
10
+ dtype:
11
+ image:
12
+ mode: L
13
+ - name: label
14
+ dtype:
15
+ class_label:
16
+ names:
17
+ '0': '0'
18
+ '1': '1'
19
+ '2': '2'
20
+ '3': '3'
21
+ '4': '4'
22
+ '5': '5'
23
+ '6': '6'
24
+ '7': '7'
25
+ '8': '8'
26
+ '9': '9'
27
+ splits:
28
+ - name: train
29
+ num_bytes: 17223300.0
30
+ num_examples: 60000
31
+ - name: test
32
+ num_bytes: 2875182.0
33
+ num_examples: 10000
34
+ download_size: 18157556
35
+ dataset_size: 20098482.0
36
+ configs:
37
+ - config_name: default
38
+ data_files:
39
+ - split: train
40
+ path: data/train-*
41
+ - split: test
42
+ path: data/test-*
43
+ ---
__init__.py DELETED
File without changes
data/{test.csv → test-00000-of-00001.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eff039c809f429c2de65bc69adc3f7a6ebd2ecd286a7e7e6ec39f114b64e7443
3
- size 18299664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ba50678b3c74a32dbda3e0b7aa2bc8ddd0c772a62e1533d4a540b1caa6dc999
3
+ size 2595915
data/{train.csv → train-00000-of-00001.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71f0e2ff351343e4d1217bc2e73c3127e219789f4df0fafa08ea19ad38057da4
3
- size 109636215
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c190ff735dd00eb84abf76313046e87998677797c4ae8e0c9628ee071687e875
3
+ size 15561641
mnist.py DELETED
@@ -1,86 +0,0 @@
1
- import csv
2
- import datasets
3
- import numpy as np
4
-
5
- _HOMEPAGE = 'http://yann.lecun.com/exdb/mnist/'
6
- _LICENSE = '-'
7
- _DESCRIPTION = '''\
8
- The MNIST database of handwritten digits, available from this page, has a training set of 60,000 examples, and a test set of 10,000 examples.
9
- It is a subset of a larger set available from NIST.
10
- The digits have been size-normalized and centered in a fixed-size image.
11
- '''
12
- _CITATION = '''-'''
13
- _NAMES = list('0123456789')
14
-
15
-
16
- class MNISTConfig(datasets.BuilderConfig):
17
- '''Builder Config for MNIST'''
18
-
19
- def __init__(
20
- self, description, homepage, **kwargs
21
- ):
22
- super(MNISTConfig, self).__init__(
23
- version=datasets.Version('1.0.0', ''),
24
- **kwargs
25
- )
26
- self.description = description
27
- self.homepage = homepage
28
- self.train_image_url = 'data/train.csv'
29
- self.test_image_url = 'data/test.csv'
30
-
31
-
32
- class MNIST(datasets.GeneratorBasedBuilder):
33
- BUILDER_CONFIGS = [
34
- MNISTConfig(
35
- description=_DESCRIPTION,
36
- homepage=_HOMEPAGE
37
- )
38
- ]
39
-
40
- def _info(self):
41
- features = datasets.Features({
42
- 'image': datasets.Image(mode='L', decode=True, id=None),
43
- 'label': datasets.ClassLabel(names=_NAMES)
44
- })
45
- return datasets.DatasetInfo(
46
- description=_DESCRIPTION,
47
- features=features,
48
- homepage=_HOMEPAGE,
49
- license=_LICENSE,
50
- citation=_CITATION
51
- )
52
-
53
- def _split_generators(self, dl_manager):
54
- train_image_path = dl_manager.download(
55
- self.config.train_image_url
56
- )
57
- test_image_path = dl_manager.download(
58
- self.config.test_image_url
59
- )
60
- return [
61
- datasets.SplitGenerator(
62
- name=datasets.Split.TRAIN,
63
- gen_kwargs={
64
- 'data_path': f'{train_image_path}'
65
- }
66
- ),
67
- datasets.SplitGenerator(
68
- name=datasets.Split.TEST,
69
- gen_kwargs={
70
- 'data_path': f'{test_image_path}'
71
- }
72
- )
73
- ]
74
-
75
- def _generate_examples(self, data_path):
76
- idx = 0
77
- with open(data_path, newline='', encoding='utf-8') as csvfile:
78
- csvreader = csv.reader(csvfile)
79
- next(csvreader)
80
- for row in csvreader:
81
- example = {
82
- 'image': np.array(row[1:], np.uint8).reshape(28, 28),
83
- 'label': row[0]
84
- }
85
- yield idx, example
86
- idx += 1