evi / evi.py
polinaeterna's picture
polinaeterna HF staff
exclude broken files
0e959bb
raw
history blame contribute delete
No virus
8.2 kB
# coding=utf-8
# Copyright 2022 The PolyAI and HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EVI is a challenging spoken multilingual dataset with 5,506 dialogues in English, Polish, and French
that can be used for benchmarking and developing knowledge-based enrolment, identification, and identification
for spoken dialogue systems.
"""
import csv
from datetime import datetime
import json
import os
import warnings
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{Spithourakis2022evi,
author = {Georgios P. Spithourakis and Ivan Vuli\'{c} and Micha\l{} Lis and I\~{n}igo Casanueva and Pawe\l{} Budzianowski},
title = {{EVI}: Multilingual Spoken Dialogue Tasks and Dataset for Knowledge-Based Enrolment, Verification, and Identification},
year = {2022},
note = {Data available at https://github.com/PolyAI-LDN/evi-paper},
url = {https://arxiv.org/abs/2204.13496},
booktitle = {Findings of NAACL (publication pending)}
}
""" # noqa
_ALL_CONFIGS = sorted([
"en-GB", "fr-FR", "pl-PL"
])
_LANGS = sorted(["en", "fr", "pl"])
_DESCRIPTION = """
EVI is a challenging spoken multilingual dataset with 5,506 dialogues in English, Polish, and French
that can be used for benchmarking and developing knowledge-based enrolment, identification, and identification
for spoken dialogue systems.
""" # noqa
_LICENSE = "CC-BY-4.0"
_HOMEPAGE = "https://github.com/PolyAI-LDN/evi-paper"
_BASE_URL = "https://huggingface.co/datasets/PolyAI/evi/resolve/main/data"
_TEXTS_URL = {
lang: os.path.join(_BASE_URL, f"dialogues.{lang.split('-')[0]}.tsv") for lang in _LANGS
}
_RECORDS_URL = {
lang: os.path.join(_BASE_URL, f"records.{lang.split('-')[0]}.csv") for lang in _LANGS
}
_BROKEN_URL = {
"en": os.path.join(_BASE_URL, "broken_en.txt")
}
_AUDIO_DATA_URL = "https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip" # noqa
_VERSION = datasets.Version("0.0.1", "")
class EviConfig(datasets.BuilderConfig):
"""BuilderConfig for EVI"""
def __init__(
self, name, *args, **kwargs
):
super().__init__(name=name, *args, **kwargs)
self.languages = _LANGS if name == "all" else [name.split("-")[0]] # all langs if config == "all"
class Evi(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 512
BUILDER_CONFIGS = [EviConfig(name) for name in _ALL_CONFIGS + ["all"]]
def _info(self):
features = datasets.Features(
{
"language": datasets.ClassLabel(names=_LANGS),
"audio": datasets.Audio(sampling_rate=8_000),
"asr_transcription": datasets.Value("string"),
"dialogue_id": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"turn_id": datasets.Value("int32"),
"target_profile_id": datasets.Value("string"),
"asr_nbest": datasets.Sequence(datasets.Value("string")),
"path": datasets.Value("string"),
"postcode": datasets.Value("string"),
"name": datasets.Value("string"),
"dob": datasets.Value("date64"),
"name_first": datasets.Value("string"),
"name_last": datasets.Value("string"),
"sex": datasets.ClassLabel(names=["F", "M"]), # TODO: are there other genders or Nones?
"email": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
version=_VERSION,
description=_DESCRIPTION,
license=_LICENSE,
citation=_CITATION,
features=features,
homepage=_HOMEPAGE
)
def _split_generators(self, dl_manager):
langs = self.config.languages
lang2records_urls = {
lang: _RECORDS_URL[lang] for lang in langs
}
lang2text_urls = {
lang: _TEXTS_URL[lang] for lang in langs
}
records_paths = dl_manager.download_and_extract(lang2records_urls)
text_paths = dl_manager.download_and_extract(lang2text_urls)
audio_data_path = dl_manager.download_and_extract(_AUDIO_DATA_URL)
broken_path = dl_manager.download_and_extract(_BROKEN_URL["en"]) if "en" in langs else None
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_data_path": audio_data_path,
"text_paths": text_paths,
"records_paths": records_paths,
"broken_path": broken_path
},
)
]
def _generate_examples(self, audio_data_path, text_paths, records_paths, broken_path=None):
if broken_path:
with open(broken_path, encoding="utf-8") as f:
broken_samples = set([line.strip() for line in f])
else:
broken_samples = None
for lang, text_path in text_paths.items():
records_path = records_paths[lang]
records = dict()
with open(records_path, encoding="utf-8") as fin:
records_reader = csv.DictReader(
fin, delimiter=",", skipinitialspace=True
)
for row in records_reader:
records[row["scenario_id"]] = row
records[row["scenario_id"]]["dob"] = datetime.strptime(row["dob"], "%Y-%m-%d")
_ = records[row["scenario_id"]].pop("scenario_id")
with open(text_path, encoding="utf-8") as fin:
texts_reader = csv.DictReader(
fin, delimiter="\t", skipinitialspace=True
)
for dictrow in texts_reader:
dialogue_id = dictrow["dialogue_id"]
turn_id = dictrow["turn_num"]
file_path = os.path.join(
"audios",
lang,
dialogue_id,
f'{turn_id}.wav'
)
full_path = os.path.join(audio_data_path, file_path)
if broken_samples and file_path in broken_samples:
warnings.warn(f"{full_path} is broken, skipping it.")
continue
if not os.path.isfile(full_path):
warnings.warn(f"{full_path} not found, skipping it.")
continue
target_profile_id = dictrow["scenario_id"]
if target_profile_id not in records:
warnings.warn(
f"""
Record with scenario_id {target_profile_id} not found, ignoring this dialogue.
Full dialogue info: {dictrow}
"""
)
continue
yield file_path, {
"language": lang,
"audio": str(full_path),
"dialogue_id": dialogue_id,
"speaker_id": dictrow["speaker_id"],
"turn_id": turn_id,
"target_profile_id": target_profile_id,
"asr_transcription": dictrow["transcription"],
"asr_nbest": json.loads(dictrow["nbest"]),
"path": file_path,
**records[target_profile_id]
}