"""ACP Bench dataset.""" import json import datasets import itertools _CITATION = """\ @article{kokel2024acp, title={ACPBench: Reasoning about Action, Change, and Planning}, author={Kokel, Harsha and Katz, Michael and Srinivas, Kavitha and Sohrabi, Shirin}, journal={arXiv}, year={2024} } """ _DESCRIPTION = """ACPBench consists of 7 reasoning tasks over 13 domains. The 13 domains include 11 classical planning domains, ALFWorld, and a novel Swap domain. The 7 tasks included in ACPBench are Action Applicability (app), Progression (prog), Atom Reachability (reach), Validation (val), Action Reachability (areach), Justification (just), and Landmarks (land).""" _HOMEPAGE = "https://ibm.github.io/ACPBench/" _LICENSE = "MIT" _BASE_URL = "https://raw.github.com/ibm/ACPBench/main/dataset" task_list = [ "app", "areach", "just", "land", "prog", "reach", "val" ] format_list = [ "bool", "mcq" ] class ACPConfig(datasets.BuilderConfig): def __init__(self, urls, **kwargs): """ urls: *dict[string]*, the urls for each split of the ACPBench set. """ super().__init__(version=datasets.Version("1.0.0"), **kwargs) self.urls = urls class ACP(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ ACPConfig( name=f"acp_{task_name}_{format_name}", urls={ "test": f"{_BASE_URL}/{task_name}/test.{format_name}.json.gz", "val": f"{_BASE_URL}/{task_name}/dev.{format_name}.json", }, ) for task_name, format_name in itertools.product(task_list,format_list) ] def _info(self): features = { "context": datasets.Value("string"), "question": datasets.Value("string"), "answer": datasets.Value("string"), "group": datasets.Value("string"), "id": datasets.Value("string") } if 'mcq' in self.config.name: features["query"]= datasets.Value("string") features["choices"]= datasets.features.Sequence(feature={'text': datasets.Value(dtype='string', id=None), 'label': datasets.Value(dtype='string', id=None)}, length=-1, id=None) return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): data_dir = dl_manager.download_and_extract(self.config.urls) return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": data_dir["test"], }, ),datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": data_dir["val"], }, ) ] def _generate_examples(self, filepath): with open(filepath) as f: examples = json.load(f) for i, instance in enumerate(examples): yield i, instance