Datasets:

Languages:
English
ArXiv:
License:
polinaeterna HF staff commited on
Commit
9b364ba
1 Parent(s): 59a2ae6

specify paths to all archives

Browse files
Files changed (1) hide show
  1. peoples_speech.py +11 -6
peoples_speech.py CHANGED
@@ -62,8 +62,10 @@ _LICENSE = [
62
  ]
63
 
64
  # relative path to data inside dataset's repo
65
- # TODO: change according to the scheme of generating urls to the audio archives
66
- _DATA_URL = "train/{config}/{config}_00000{archive_id}.tar"
 
 
67
 
68
  # relative path to metadata inside dataset's repo
69
  _MANIFEST_URL = "train/{config}.json"
@@ -102,9 +104,12 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
102
  )
103
 
104
  def _split_generators(self, dl_manager):
105
- # TODO: for demo purposes I use just first 5 archives
106
- # TODO: this should be changed to the actual number of archives further
107
- urls = [_DATA_URL.format(config=self.config.name, archive_id=i) for i in range(5)]
 
 
 
108
  archive_paths = [dl_manager.download(url) for url in urls]
109
 
110
  # In non-streaming mode, we extract the archives to have the data locally:
@@ -131,7 +136,7 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
131
  "local_extracted_archive_paths": local_extracted_archive_paths,
132
  # use iter_archive here to access the files in the TAR archives:
133
  "archives": [dl_manager.iter_archive(path) for path in archive_paths],
134
- "manifest_path": manifest_path
135
  },
136
  ),
137
  ]
 
62
  ]
63
 
64
  # relative path to data inside dataset's repo
65
+ _DATA_URL = "train/{config}/{config}_{archive_id:06d}.tar"
66
+
67
+ # relative path to file containing number of audio archives inside dataset's repo
68
+ _N_FILES_URL = "train/{config}/n_files.txt"
69
 
70
  # relative path to metadata inside dataset's repo
71
  _MANIFEST_URL = "train/{config}.json"
 
104
  )
105
 
106
  def _split_generators(self, dl_manager):
107
+ n_files_url = _N_FILES_URL.format(config=self.config.name)
108
+ n_files_path = dl_manager.download_and_extract(n_files_url)
109
+ with open(n_files_path, encoding="utf-8") as f:
110
+ n_files = int(f.read().strip())
111
+
112
+ urls = [_DATA_URL.format(config=self.config.name, archive_id=i) for i in range(n_files)]
113
  archive_paths = [dl_manager.download(url) for url in urls]
114
 
115
  # In non-streaming mode, we extract the archives to have the data locally:
 
136
  "local_extracted_archive_paths": local_extracted_archive_paths,
137
  # use iter_archive here to access the files in the TAR archives:
138
  "archives": [dl_manager.iter_archive(path) for path in archive_paths],
139
+ "manifest_path": manifest_path,
140
  },
141
  ),
142
  ]