Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
system HF staff commited on
Commit
06b8c32
1 Parent(s): 269f614

Update files from the datasets library (from 1.1.3)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.1.3

Files changed (3) hide show
  1. dataset_infos.json +1 -1
  2. dummy/1.2.0/dummy_data.zip +3 -0
  3. xsum.py +67 -22
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are two features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n\nThis data need to manaully downloaded and extracted as described in\nhttps://github.com/EdinburghNLP/XSum/blob/master/XSum-Dataset/README.md.\nThe folder 'xsum-extracts-from-downloads' need to be compressed as\n'xsum-extracts-from-downloads.tar.gz' and put in manually downloaded folder.\n", "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n", "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset", "license": "", "features": {"document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": {"input": "document", "output": "summary"}, "builder_name": "xsum", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 474092909, "num_examples": 204017, "dataset_name": "xsum"}, "validation": {"name": "validation", "num_bytes": 26011730, "num_examples": 11327, "dataset_name": "xsum"}, "test": {"name": "test", "num_bytes": 26470484, "num_examples": 11333, "dataset_name": "xsum"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/summarization/xsum.tar.gz": {"num_bytes": 204844092, "checksum": "3daaea63a068ad9d9c250ca39fcfe1e985e08696984dfbc3274f6a4082a29f88"}}, "download_size": 204844092, "dataset_size": 526575123, "size_in_bytes": 731419215}}
 
1
+ {"default": {"description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are three features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n - id: BBC ID of the article.\n\n", "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n", "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset", "license": "", "features": {"document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "document", "output": "summary"}, "builder_name": "xsum", "config_name": "default", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 479206608, "num_examples": 204045, "dataset_name": "xsum"}, "validation": {"name": "validation", "num_bytes": 26292901, "num_examples": 11332, "dataset_name": "xsum"}, "test": {"name": "test", "num_bytes": 26756165, "num_examples": 11334, "dataset_name": "xsum"}}, "download_checksums": {"http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz": {"num_bytes": 254582292, "checksum": "10b48aa187fc9c904b30f76ca97e2da0de8d3a1238acc26acadef93e2001af90"}, "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json": {"num_bytes": 2720574, "checksum": "9c0c5d8f048a90bd68b19a34e4c30577ed270d3247b2119fa06a04ef46292068"}}, "download_size": 257302866, "post_processing_size": null, "dataset_size": 532255674, "size_in_bytes": 789558540}}
dummy/1.2.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6108aa5ec21ccb6e18ef70868eab02b335d9b59f3ac73b542e5dd3aa48993bb6
3
+ size 2943
xsum.py CHANGED
@@ -18,6 +18,7 @@
18
 
19
  from __future__ import absolute_import, division, print_function
20
 
 
21
  import os
22
 
23
  import datasets
@@ -36,25 +37,45 @@ _CITATION = """
36
  _DESCRIPTION = """
37
  Extreme Summarization (XSum) Dataset.
38
 
39
- There are two features:
40
  - document: Input news article.
41
  - summary: One sentence summary of the article.
 
42
 
43
  """
44
 
45
-
46
- _URL = "https://s3.amazonaws.com/datasets.huggingface.co/summarization/xsum.tar.gz"
 
 
 
47
 
48
  _DOCUMENT = "document"
49
  _SUMMARY = "summary"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
 
52
  class Xsum(datasets.GeneratorBasedBuilder):
53
  """Extreme Summarization (XSum) Dataset."""
54
 
55
- # Version 1.1.0 removes web contents.
56
- VERSION = datasets.Version("1.1.0")
57
- SUPPORTED_VERSIONS = [datasets.Version("1.0.0", "Dataset without cleaning.")]
58
 
59
  def _info(self):
60
  return datasets.DatasetInfo(
@@ -63,6 +84,7 @@ class Xsum(datasets.GeneratorBasedBuilder):
63
  {
64
  _DOCUMENT: datasets.Value("string"),
65
  _SUMMARY: datasets.Value("string"),
 
66
  }
67
  ),
68
  supervised_keys=(_DOCUMENT, _SUMMARY),
@@ -73,39 +95,62 @@ class Xsum(datasets.GeneratorBasedBuilder):
73
  def _split_generators(self, dl_manager):
74
  """Returns SplitGenerators."""
75
 
76
- dl_path = dl_manager.download_and_extract(_URL)
 
77
 
78
- dl_path = os.path.join(dl_path, "xsum")
79
  return [
80
  datasets.SplitGenerator(
81
  name=datasets.Split.TRAIN,
82
  gen_kwargs={
83
- "source": os.path.join(dl_path, "train.source"),
84
- "target": os.path.join(dl_path, "train.target"),
 
85
  },
86
  ),
87
  datasets.SplitGenerator(
88
  name=datasets.Split.VALIDATION,
89
  gen_kwargs={
90
- "source": os.path.join(dl_path, "val.source"),
91
- "target": os.path.join(dl_path, "val.target"),
 
92
  },
93
  ),
94
  datasets.SplitGenerator(
95
  name=datasets.Split.TEST,
96
  gen_kwargs={
97
- "source": os.path.join(dl_path, "test.source"),
98
- "target": os.path.join(dl_path, "test.target"),
 
99
  },
100
  ),
101
  ]
102
 
103
- def _generate_examples(self, source, target):
104
  """Yields examples."""
105
- with open(source, encoding="utf-8") as f1:
106
- source = f1.readlines()
107
- with open(target, encoding="utf-8") as f2:
108
- target = f2.readlines()
109
- assert len(source) == len(target)
110
- for i in range(len(target)):
111
- yield i, {_DOCUMENT: source[i], _SUMMARY: target[i]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  from __future__ import absolute_import, division, print_function
20
 
21
+ import json
22
  import os
23
 
24
  import datasets
 
37
  _DESCRIPTION = """
38
  Extreme Summarization (XSum) Dataset.
39
 
40
+ There are three features:
41
  - document: Input news article.
42
  - summary: One sentence summary of the article.
43
+ - id: BBC ID of the article.
44
 
45
  """
46
 
47
+ # From https://github.com/EdinburghNLP/XSum/issues/12
48
+ _URL_DATA = "http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz"
49
+ _URL_SPLITS = (
50
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json"
51
+ )
52
 
53
  _DOCUMENT = "document"
54
  _SUMMARY = "summary"
55
+ _ID = "id"
56
+
57
+ _REMOVE_LINES = set(
58
+ [
59
+ "Share this with\n",
60
+ "Email\n",
61
+ "Facebook\n",
62
+ "Messenger\n",
63
+ "Twitter\n",
64
+ "Pinterest\n",
65
+ "WhatsApp\n",
66
+ "Linkedin\n",
67
+ "LinkedIn\n",
68
+ "Copy this link\n",
69
+ "These are external links and will open in a new window\n",
70
+ ]
71
+ )
72
 
73
 
74
  class Xsum(datasets.GeneratorBasedBuilder):
75
  """Extreme Summarization (XSum) Dataset."""
76
 
77
+ # Version 1.2.0 expands coverage, includes ids, and removes web contents.
78
+ VERSION = datasets.Version("1.2.0")
 
79
 
80
  def _info(self):
81
  return datasets.DatasetInfo(
 
84
  {
85
  _DOCUMENT: datasets.Value("string"),
86
  _SUMMARY: datasets.Value("string"),
87
+ _ID: datasets.Value("string"),
88
  }
89
  ),
90
  supervised_keys=(_DOCUMENT, _SUMMARY),
 
95
  def _split_generators(self, dl_manager):
96
  """Returns SplitGenerators."""
97
 
98
+ files_to_download = {"data": _URL_DATA, "splits": _URL_SPLITS}
99
+ downloaded_files = dl_manager.download_and_extract(files_to_download)
100
 
 
101
  return [
102
  datasets.SplitGenerator(
103
  name=datasets.Split.TRAIN,
104
  gen_kwargs={
105
+ "split_path": downloaded_files["splits"],
106
+ "split_name": "train",
107
+ "data_dir": os.path.join(downloaded_files["data"], "bbc-summary-data"),
108
  },
109
  ),
110
  datasets.SplitGenerator(
111
  name=datasets.Split.VALIDATION,
112
  gen_kwargs={
113
+ "split_path": downloaded_files["splits"],
114
+ "split_name": "validation",
115
+ "data_dir": os.path.join(downloaded_files["data"], "bbc-summary-data"),
116
  },
117
  ),
118
  datasets.SplitGenerator(
119
  name=datasets.Split.TEST,
120
  gen_kwargs={
121
+ "split_path": downloaded_files["splits"],
122
+ "split_name": "test",
123
+ "data_dir": os.path.join(downloaded_files["data"], "bbc-summary-data"),
124
  },
125
  ),
126
  ]
127
 
128
+ def _generate_examples(self, split_path, split_name, data_dir):
129
  """Yields examples."""
130
+
131
+ with open(split_path, "r", encoding="utf-8") as f:
132
+ split_ids = json.load(f)
133
+
134
+ for i in split_ids[split_name]:
135
+ with open(os.path.join(data_dir, i + ".summary"), "r", encoding="utf-8") as f:
136
+ text = "".join([line for line in f.readlines() if line not in _REMOVE_LINES and line.strip()])
137
+ # Each file follows below format:
138
+ # [SN]URL[SN]
139
+ # http://somelink
140
+ #
141
+ # [SN]TITLE[SN]
142
+ # some intro
143
+ #
144
+ # [SN]FIRST-SENTENCE[SN]
145
+ # some intro
146
+ #
147
+ # [SN]RESTBODY[SN]
148
+ # text line.
149
+ # another text line.
150
+ # "another text line."
151
+
152
+ # According to the following issue, FIRST-SENTENCE
153
+ # is the reference summary and TITLE is unused:
154
+ # https://github.com/EdinburghNLP/XSum/issues/22
155
+ segs = text.split("[SN]")
156
+ yield i, {_DOCUMENT: segs[8].strip(), _SUMMARY: segs[6].strip(), _ID: i}