File size: 2,632 Bytes
1ea4ef8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c4bf8a
 
 
 
 
1ea4ef8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154c87
1ea4ef8
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# Lint as: python3
"""QC question classification dataset."""


import csv

import datasets
from datasets.tasks import TextClassification


_DESCRIPTION = """\
This data collection contains all the data used in our learning question classification experiments(see [1]), which has question class definitions, the training and testing question sets, examples of preprocessing the questions, feature definition scripts and examples of semantically related word features. 
This work has been done by Xin Li and Dan Roth and supported by [2].
"""

_CITATION = """"""

_TRAIN_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/SST-5/raw/main/train.csv"
_TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/SST-5/raw/main/test.csv"
_VALID_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/SST-5/raw/main/validation.csv"


CATEGORY_MAPPING = {'0': 0,
 '1': 1,
 '2': 2,
 '3': 3,
 '4': 4
}

class SST5(datasets.GeneratorBasedBuilder):
    """SST5 sentiment classification dataset."""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "label": datasets.features.ClassLabel(names=list(CATEGORY_MAPPING.keys())),
                }
            ),
            homepage="",
            citation=_CITATION,
            task_templates=[TextClassification(text_column="text", label_column="label")],
        )

    def _split_generators(self, dl_manager):
        train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
        test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
        valid_path = dl_manager.download_and_extract(_VALID_DOWNLOAD_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_path}),
        ]

    def _generate_examples(self, filepath):
        """Generate examples."""
        with open(filepath, encoding="utf-8") as csv_file:
            csv_reader = csv.reader(
                csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_ALL, skipinitialspace=True
            )
            # _ = next(csv_reader) # skip header
            for id_, row in enumerate(csv_reader):
                label, text = row
                label = CATEGORY_MAPPING[label]
                yield id_, {"text": text, "label": label}