albertvillanova HF staff commited on
Commit
cab853a
1 Parent(s): 9ce6303

Convert dataset to Parquet (#12)

Browse files

- Convert dataset to Parquet (cd6ec7974b8bfd3b0180aaec4217ef430a15acce)
- Add 'unsplit' config data files (4b41f73c72b4ca6e24a641b551ac8f3b1090ed57)
- Delete loading script (0f2013aff0f120885b4892bddfe59faafa13a310)
- Delete data file (6d44c341e752cddbb545a0e4f4a41ecb09da5744)
- Delete legacy dataset_infos.json (23abdc09990f9cc02a701b82af668d46d8235b77)
- Delete data file (52bb8b6ef67e33229acd43b70874d585cbd45c95)
- Delete data file (b3ebb3b4fd4045c7d98862853d63c5b19582c254)
- Delete data file (106c232c0a63f3f06ae5d80d141114a00604e969)

README.md CHANGED
@@ -38,16 +38,16 @@ dataset_info:
38
  '5': surprise
39
  splits:
40
  - name: train
41
- num_bytes: 1741597
42
  num_examples: 16000
43
  - name: validation
44
- num_bytes: 214703
45
  num_examples: 2000
46
  - name: test
47
- num_bytes: 217181
48
  num_examples: 2000
49
- download_size: 740883
50
- dataset_size: 2173481
51
  - config_name: unsplit
52
  features:
53
  - name: text
@@ -64,10 +64,24 @@ dataset_info:
64
  '5': surprise
65
  splits:
66
  - name: train
67
- num_bytes: 45445685
68
  num_examples: 416809
69
- download_size: 15388281
70
- dataset_size: 45445685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  train-eval-index:
72
  - config: default
73
  task: text-classification
 
38
  '5': surprise
39
  splits:
40
  - name: train
41
+ num_bytes: 1741533
42
  num_examples: 16000
43
  - name: validation
44
+ num_bytes: 214695
45
  num_examples: 2000
46
  - name: test
47
+ num_bytes: 217173
48
  num_examples: 2000
49
+ download_size: 1287193
50
+ dataset_size: 2173401
51
  - config_name: unsplit
52
  features:
53
  - name: text
 
64
  '5': surprise
65
  splits:
66
  - name: train
67
+ num_bytes: 45444017
68
  num_examples: 416809
69
+ download_size: 26888538
70
+ dataset_size: 45444017
71
+ configs:
72
+ - config_name: split
73
+ data_files:
74
+ - split: train
75
+ path: split/train-*
76
+ - split: validation
77
+ path: split/validation-*
78
+ - split: test
79
+ path: split/test-*
80
+ default: true
81
+ - config_name: unsplit
82
+ data_files:
83
+ - split: train
84
+ path: unsplit/train-*
85
  train-eval-index:
86
  - config: default
87
  task: text-classification
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. For more detailed information please refer to the paper.\n", "citation": "@inproceedings{saravia-etal-2018-carer,\n title = \"{CARER}: Contextualized Affect Representations for Emotion Recognition\",\n author = \"Saravia, Elvis and\n Liu, Hsien-Chi Toby and\n Huang, Yen-Hao and\n Wu, Junlin and\n Chen, Yi-Shin\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\",\n month = oct # \"-\" # nov,\n year = \"2018\",\n address = \"Brussels, Belgium\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/D18-1404\",\n doi = \"10.18653/v1/D18-1404\",\n pages = \"3687--3697\",\n abstract = \"Emotions are expressed in nuanced ways, which varies by collective or individual experiences, knowledge, and beliefs. Therefore, to understand emotion, as conveyed through text, a robust mechanism capable of capturing and modeling different linguistic nuances and phenomena is needed. We propose a semi-supervised, graph-based algorithm to produce rich structural descriptors which serve as the building blocks for constructing contextualized affect representations from text. The pattern-based representations are further enriched with word embeddings and evaluated through several emotion recognition tasks. Our experimental results demonstrate that the proposed method outperforms state-of-the-art techniques on emotion recognition tasks.\",\n}\n", "homepage": "https://github.com/dair-ai/emotion_dataset", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 6, "names": ["sadness", "joy", "love", "anger", "fear", "surprise"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": {"input": "text", "output": "label"}, "task_templates": [{"task": "text-classification", "text_column": "text", "label_column": "label", "labels": ["anger", "fear", "joy", "love", "sadness", "surprise"]}], "builder_name": "emotion", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1741541, "num_examples": 16000, "dataset_name": "emotion"}, "validation": {"name": "validation", "num_bytes": 214699, "num_examples": 2000, "dataset_name": "emotion"}, "test": {"name": "test", "num_bytes": 217177, "num_examples": 2000, "dataset_name": "emotion"}}, "download_checksums": {"https://www.dropbox.com/s/1pzkadrvffbqw6o/train.txt?dl=1": {"num_bytes": 1658616, "checksum": "3ab03d945a6cb783d818ccd06dafd52d2ed8b4f62f0f85a09d7d11870865b190"}, "https://www.dropbox.com/s/2mzialpsgf9k5l3/val.txt?dl=1": {"num_bytes": 204240, "checksum": "34faaa31962fe63cdf5dbf6c132ef8ab166c640254ab991af78f3aea375e79ef"}, "https://www.dropbox.com/s/ikkqxfdbdec3fuj/test.txt?dl=1": {"num_bytes": 206760, "checksum": "60f531690d20127339e7f054edc299a82c627b5ec0dd5d552d53d544e0cfcc17"}}, "download_size": 2069616, "post_processing_size": null, "dataset_size": 2173417, "size_in_bytes": 4243033}}
 
 
emotion.py DELETED
@@ -1,88 +0,0 @@
1
- import json
2
-
3
- import datasets
4
- from datasets.tasks import TextClassification
5
-
6
-
7
- _CITATION = """\
8
- @inproceedings{saravia-etal-2018-carer,
9
- title = "{CARER}: Contextualized Affect Representations for Emotion Recognition",
10
- author = "Saravia, Elvis and
11
- Liu, Hsien-Chi Toby and
12
- Huang, Yen-Hao and
13
- Wu, Junlin and
14
- Chen, Yi-Shin",
15
- booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
16
- month = oct # "-" # nov,
17
- year = "2018",
18
- address = "Brussels, Belgium",
19
- publisher = "Association for Computational Linguistics",
20
- url = "https://www.aclweb.org/anthology/D18-1404",
21
- doi = "10.18653/v1/D18-1404",
22
- pages = "3687--3697",
23
- abstract = "Emotions are expressed in nuanced ways, which varies by collective or individual experiences, knowledge, and beliefs. Therefore, to understand emotion, as conveyed through text, a robust mechanism capable of capturing and modeling different linguistic nuances and phenomena is needed. We propose a semi-supervised, graph-based algorithm to produce rich structural descriptors which serve as the building blocks for constructing contextualized affect representations from text. The pattern-based representations are further enriched with word embeddings and evaluated through several emotion recognition tasks. Our experimental results demonstrate that the proposed method outperforms state-of-the-art techniques on emotion recognition tasks.",
24
- }
25
- """
26
-
27
- _DESCRIPTION = """\
28
- Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. For more detailed information please refer to the paper.
29
- """
30
-
31
- _HOMEPAGE = "https://github.com/dair-ai/emotion_dataset"
32
-
33
- _LICENSE = "The dataset should be used for educational and research purposes only"
34
-
35
- _URLS = {
36
- "split": {
37
- "train": "data/train.jsonl.gz",
38
- "validation": "data/validation.jsonl.gz",
39
- "test": "data/test.jsonl.gz",
40
- },
41
- "unsplit": {
42
- "train": "data/data.jsonl.gz",
43
- },
44
- }
45
-
46
-
47
- class Emotion(datasets.GeneratorBasedBuilder):
48
- VERSION = datasets.Version("1.0.0")
49
- BUILDER_CONFIGS = [
50
- datasets.BuilderConfig(
51
- name="split", version=VERSION, description="Dataset split in train, validation and test"
52
- ),
53
- datasets.BuilderConfig(name="unsplit", version=VERSION, description="Unsplit dataset"),
54
- ]
55
- DEFAULT_CONFIG_NAME = "split"
56
-
57
- def _info(self):
58
- class_names = ["sadness", "joy", "love", "anger", "fear", "surprise"]
59
- return datasets.DatasetInfo(
60
- description=_DESCRIPTION,
61
- features=datasets.Features(
62
- {"text": datasets.Value("string"), "label": datasets.ClassLabel(names=class_names)}
63
- ),
64
- supervised_keys=("text", "label"),
65
- homepage=_HOMEPAGE,
66
- citation=_CITATION,
67
- license=_LICENSE,
68
- task_templates=[TextClassification(text_column="text", label_column="label")],
69
- )
70
-
71
- def _split_generators(self, dl_manager):
72
- """Returns SplitGenerators."""
73
- paths = dl_manager.download_and_extract(_URLS[self.config.name])
74
- if self.config.name == "split":
75
- return [
76
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": paths["train"]}),
77
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": paths["validation"]}),
78
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": paths["test"]}),
79
- ]
80
- else:
81
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": paths["train"]})]
82
-
83
- def _generate_examples(self, filepath):
84
- """Generate examples."""
85
- with open(filepath, encoding="utf-8") as f:
86
- for idx, line in enumerate(f):
87
- example = json.loads(line)
88
- yield idx, example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test.jsonl.gz → split/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4524468d0b7ee8eab07a088216cde7f9278f1c574669504a805ed172df6dad75
3
- size 74935
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f8407fa1ca9c310f55781f082ed73812f6551e8dda2c61973123a121869245b
3
+ size 128987
data/validation.jsonl.gz → split/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50783464882f450f88e61ece964a200e492495eed1472ed520d013bbcd3049be
3
- size 74018
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10817f0f2ea42358bc62f69a09dfb8bd71701727df6d5a387bea742f3ea06417
3
+ size 1030740
data/train.jsonl.gz → split/validation-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:757a0a73f1483f4b3f94783b774cdbf0831722a2b2c9abb5b820b4614ff6882a
3
- size 591930
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c70f0e660b5ebd1ea9a37d2a851f516f08a6d6477cdfc11be204e22a2f1102fd
3
+ size 127466
data/data.jsonl.gz → unsplit/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8944e6b35cb42294769ac30cf17bd006231545b2eeecfa59324246e192564d1f
3
- size 15388281
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba60fe890562b2770967d63f9d7eb104691e028ca68716cd4e926996ecb31441
3
+ size 26888538