# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import json import os import datasets _DESCRIPTION="This labelled PII dataset consists of protocol traces (JSON, SQL (PostgreSQL, MySQL), HTML, and XML) generated from OpenAPI specifications and includes 60+ PII types." _CITATION=""" @online{WinNT, author = {Benjamin Kilimnik}, title = {{Privy} Synthetic PII Protocol Trace Dataset}, year = 2022, url = {https://huggingface.co/datasets/beki/privy}, } """ _HOMEPAGE = "https://github.com/pixie-io/pixie/tree/main/src/datagen/pii/privy/privy" _LICENSE = "MIT" _URL = "https://huggingface.co/datasets/beki/privy/resolve/main/privy-dataset.zip" class Privy(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") # You will be able to load one or the other configurations in the following list with # data = datasets.load_dataset('my_dataset', 'first_domain') # data = datasets.load_dataset('my_dataset', 'second_domain') BUILDER_CONFIGS = [ datasets.BuilderConfig(name="small", version=VERSION, description="Privy small"), datasets.BuilderConfig(name="large", version=VERSION, description="Privy large"), ] DEFAULT_CONFIG_NAME = "small" def _info(self): if self.config.name == "large": features = datasets.Features( { "full_text": datasets.Value("string"), "masked": datasets.Value("string"), "spans": datasets.Sequence(datasets.Value("string")), "tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-O", "I-O", "L-O", "U-O", "B-PERSON", "I-PERSON", "L-PERSON", "U-PERSON", "B-LOCATION", "I-LOCATION", "L-LOCATION", "U-LOCATION", "B-ORGANIZATION", "I-ORGANIZATION", "L-ORGANIZATION", "U-ORGANIZATION", "B-NRP", "I-NRP", "L-NRP", "U-NRP", "B-DATE_TIME", "I-DATE_TIME", "L-DATE_TIME", "U-DATE_TIME", "B-CREDIT_CARD", "I-CREDIT_CARD", "L-CREDIT_CARD", "U-CREDIT_CARD", "B-URL", "I-URL", "L-URL", "U-URL", "B-IBAN_CODE", "I-IBAN_CODE", "L-IBAN_CODE", "U-IBAN_CODE", "B-US_BANK_NUMBER", "I-US_BANK_NUMBER", "L-US_BANK_NUMBER", "U-US_BANK_NUMBER", "B-PHONE_NUMBER", "I-PHONE_NUMBER", "L-PHONE_NUMBER", "U-PHONE_NUMBER", "B-US_SSN", "I-US_SSN", "L-US_SSN", "U-US_SSN", "B-US_PASSPORT", "I-US_PASSPORT", "L-US_PASSPORT", "U-US_PASSPORT", "B-US_DRIVER_LICENSE", "I-US_DRIVER_LICENSE", "L-US_DRIVER_LICENSE", "U-US_DRIVER_LICENSE", "B-US_LICENSE_PLATE", "I-US_LICENSE_PLATE", "L-US_LICENSE_PLATE", "U-US_LICENSE_PLATE", "B-IP_ADDRESS", "I-IP_ADDRESS", "L-IP_ADDRESS", "U-IP_ADDRESS", "B-US_ITIN", "I-US_ITIN", "L-US_ITIN", "U-US_ITIN", "B-EMAIL_ADDRESS", "I-EMAIL_ADDRESS", "L-EMAIL_ADDRESS", "U-EMAIL_ADDRESS", "B-TITLE", "I-TITLE", "L-TITLE", "U-TITLE", "B-COORDINATE", "I-COORDINATE", "L-COORDINATE", "U-COORDINATE", "B-IMEI", "I-IMEI", "L-IMEI", "U-IMEI", "B-PASSWORD", "I-PASSWORD", "L-PASSWORD", "U-PASSWORD", "B-LICENSE_PLATE", "I-LICENSE_PLATE", "L-LICENSE_PLATE", "U-LICENSE_PLATE", "B-CURRENCY", "I-CURRENCY", "L-CURRENCY", "U-CURRENCY", "B-FINANCIAL", "I-FINANCIAL", "L-FINANCIAL", "U-FINANCIAL", "B-ROUTING_NUMBER", "I-ROUTING_NUMBER", "L-ROUTING_NUMBER", "U-ROUTING_NUMBER", "B-SWIFT_CODE", "I-SWIFT_CODE", "L-SWIFT_CODE", "U-SWIFT_CODE", "B-MAC_ADDRESS", "I-MAC_ADDRESS", "L-MAC_ADDRESS", "U-MAC_ADDRESS", "B-AGE", "I-AGE", "L-AGE", "U-AGE", ] ) ), "tokens": datasets.Sequence(datasets.Value("string")), "template_id": datasets.Value("int32"), "metadata": datasets.Value("int32"), } ) if self.config.name == "small": features = datasets.Features( { "full_text": datasets.Value("string"), "masked": datasets.Value("string"), "spans": datasets.Sequence(datasets.Value("string")), "tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-O", "I-O", "L-O", "U-O", "B-PER", "I-PER", "L-PER", "U-PER", "B-LOC", "I-LOC", "L-LOC", "U-LOC", "B-ORG", "I-ORG", "L-ORG", "U-ORG", "B-NRP", "I-NRP", "L-NRP", "U-NRP", "B-DATE_TIME", "I-DATE_TIME", "L-DATE_TIME", "U-DATE_TIME", "B-CREDIT_CARD", "I-CREDIT_CARD", "L-CREDIT_CARD", "U-CREDIT_CARD", "B-URL", "I-URL", "L-URL", "U-URL", "B-IBAN_CODE", "I-IBAN_CODE", "L-IBAN_CODE", "U-IBAN_CODE", "B-US_BANK_NUMBER", "I-US_BANK_NUMBER", "L-US_BANK_NUMBER", "U-US_BANK_NUMBER", "B-PHONE_NUMBER", "I-PHONE_NUMBER", "L-PHONE_NUMBER", "U-PHONE_NUMBER", "B-US_SSN", "I-US_SSN", "L-US_SSN", "U-US_SSN", "B-US_PASSPORT", "I-US_PASSPORT", "L-US_PASSPORT", "U-US_PASSPORT", "B-US_DRIVER_LICENSE", "I-US_DRIVER_LICENSE", "L-US_DRIVER_LICENSE", "U-US_DRIVER_LICENSE", "B-US_LICENSE_PLATE", "I-US_LICENSE_PLATE", "L-US_LICENSE_PLATE", "U-US_LICENSE_PLATE", "B-IP_ADDRESS", "I-IP_ADDRESS", "L-IP_ADDRESS", "U-IP_ADDRESS", "B-US_ITIN", "I-US_ITIN", "L-US_ITIN", "U-US_ITIN", "B-EMAIL_ADDRESS", "I-EMAIL_ADDRESS", "L-EMAIL_ADDRESS", "U-EMAIL_ADDRESS", "B-TITLE", "I-TITLE", "L-TITLE", "U-TITLE", "B-COORDINATE", "I-COORDINATE", "L-COORDINATE", "U-COORDINATE", "B-IMEI", "I-IMEI", "L-IMEI", "U-IMEI", "B-PASSWORD", "I-PASSWORD", "L-PASSWORD", "U-PASSWORD", "B-LICENSE_PLATE", "I-LICENSE_PLATE", "L-LICENSE_PLATE", "U-LICENSE_PLATE", "B-CURRENCY", "I-CURRENCY", "L-CURRENCY", "U-CURRENCY", "B-FINANCIAL", "I-FINANCIAL", "L-FINANCIAL", "U-FINANCIAL", "B-ROUTING_NUMBER", "I-ROUTING_NUMBER", "L-ROUTING_NUMBER", "U-ROUTING_NUMBER", "B-SWIFT_CODE", "I-SWIFT_CODE", "L-SWIFT_CODE", "U-SWIFT_CODE", "B-MAC_ADDRESS", "I-MAC_ADDRESS", "L-MAC_ADDRESS", "U-MAC_ADDRESS", "B-AGE", "I-AGE", "L-AGE", "U-AGE", ] ) ), "tokens": datasets.Sequence(datasets.Value("string")), "template_id": datasets.Value("int32"), "metadata": datasets.Value("int32"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, # supervised_keys=("sentence", "label"), # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): data_dir = dl_manager.download_and_extract(_URL) size = "small" if self.config.name == "large": # This is the name of the configuration selected in BUILDER_CONFIGS above size = "large" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"train-{size}.json"), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"dev-{size}.json"), "split": "dev", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"test-{size}.json"), "split": "test" }, ), ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath, split): # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. with open(filepath, encoding="utf-8") as f: dataset = json.load(f) for key, row in enumerate(dataset): # Yields examples as (key, example) tuples yield key, { "tokens": row["tokens"], "tags": row["tags"], "full_text": row["full_text"], "spans": row["spans"], "masked": row["masked"], "template_id": row["template_id"], "metadata": row["metadata"], }