import json import datasets class TeleData(datasets.GeneratorBasedBuilder): """Tele-Data dataset with multiple subsets: arxiv, standard, web, and wiki""" BUILDER_CONFIGS = [ datasets.BuilderConfig(name="arxiv", version=datasets.Version("1.0.0"), description="ArXiv data"), datasets.BuilderConfig(name="standard", version=datasets.Version("1.0.0"), description="Standard data"), datasets.BuilderConfig(name="web", version=datasets.Version("1.0.0"), description="Web data"), datasets.BuilderConfig(name="wiki", version=datasets.Version("1.0.0"), description="Wiki data"), datasets.BuilderConfig(name="full", version=datasets.Version("1.0.0"), description="Full dataset"), ] DEFAULT_CONFIG_NAME = "full" def _info(self): features = datasets.Features({ "id": datasets.Value("string"), "category": datasets.Value("string"), "content": datasets.Value("string"), "metadata": datasets.Value("string"), }) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): if self.config.name == "full": urls = [f"{name}/{name}.jsonl" for name in ["arxiv", "standard", "web", "wiki"]] else: urls = [f"{self.config.name}/{self.config.name}.jsonl"] data_files = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepaths": data_files if isinstance(data_files, list) else [data_files]}, ) ] def _generate_examples(self, filepaths): for filepath in filepaths: with open(filepath, "r", encoding="utf-8") as f: for id_, line in enumerate(f): data = json.loads(line) yield id_, { "id": data["id"], "category": data["category"], "content": data["content"], "metadata": json.dumps(data["metadata"]), }