# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ lesion dataset - ISIC 2018 Task 2 """ import numpy as np import os from PIL import Image import datasets from datasets import Sequence, Value from urllib.parse import urlparse # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This dataset has been modified for project use case. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) # _URL = "https://storage.googleapis.com/lesion-dataset/" _URLS = { "full": "https://storage.googleapis.com/lesion-dataset/dataset-images.zip", "globules": "https://storage.googleapis.com/lesion-dataset/dataset-globules.zip", "milia_like_cyst": "https://storage.googleapis.com/lesion-dataset/dataset-milia_like_cyst.zip", "negative_network": "https://storage.googleapis.com/lesion-dataset/dataset-negative_network.zip", "pigment_network": "https://storage.googleapis.com/lesion-dataset/dataset-pigment_network.zip", "streaks": "https://storage.googleapis.com/lesion-dataset/dataset-streaks.zip", "task1": "https://storage.googleapis.com/lesion-dataset/dataset-task1.zip", } # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case class LesionDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name='full', version=VERSION, description="This will return the full dataset with all classes"), datasets.BuilderConfig(name="globules", version=VERSION, description="This will return the dataset with only globules class"), datasets.BuilderConfig(name="milia_like_cyst", version=VERSION, description="This will return the dataset with only milia_like_cyst class"), datasets.BuilderConfig(name="negative_network", version=VERSION, description="This will return the dataset with only negative_network class"), datasets.BuilderConfig(name="pigment_network", version=VERSION, description="This will return the dataset with only pigment_network class"), datasets.BuilderConfig(name="streaks", version=VERSION, description="This will return the dataset with only streaks class"), datasets.BuilderConfig(name="task1", version=VERSION, description="This will return the dataset for task1"), ] DEFAULT_CONFIG_NAME = "task1" # It's not mandatory to have a default configuration. Just use one if it make sense. def _info(self): # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset if self.config.name == "full": # This is the name of the configuration selected in BUILDER_CONFIGS above features=datasets.Features( { "image": datasets.Image(), "label0": datasets.Image(), "label1": datasets.Image(), "label2": datasets.Image(), "label3": datasets.Image(), "label4": datasets.Image(), } ) elif self.config.name in ['globules', 'milia_like_cyst', 'negative_network', 'pigment_network', 'streaks', 'task1']: features = datasets.Features( { "image": datasets.Image(), "label": datasets.Image(), } ) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and # specify them. They'll be used if as_supervised=True in builder.as_dataset. # supervised_keys=("sentence", "label"), # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive url = _URLS[self.config.name] data_dir = dl_manager.download_and_extract(url) # Parse the URL parsed_url = urlparse(url) # Get the base name base_name_with_extension = os.path.basename(parsed_url.path) # Remove the extension base_name = os.path.splitext(base_name_with_extension)[0] # Label to ID mapping self.label2id = { 'globules': 0, 'milia_like_cyst': 1, 'negative_network': 2, 'pigment_network': 3, 'streaks': 4 } # Task 2 if self.config.name in ['full', 'globules', 'milia_like_cyst', 'negative_network', 'pigment_network', 'streaks']: return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1-2_Training_Input"), "labelpath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task2_Training_GroundTruth_v3"), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1-2_Validation_Input"), "labelpath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task2_Validation_GroundTruth"), "split": "validation", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1-2_Test_Input"), "labelpath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task2_Test_GroundTruth"), "split": "test" }, ), ] return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1-2_Training_Input"), "labelpath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1_Training_GroundTruth"), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1-2_Validation_Input"), "labelpath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1_Validation_GroundTruth"), "split": "validation", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1-2_Test_Input"), "labelpath": os.path.join(data_dir, f"{base_name}/ISIC2018_Task1_Test_GroundTruth"), "split": "test" }, ), ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath, labelpath, split): # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. # if the configuration is full, return all the labels if self.config.name == "full": # Loop through every file in the filepath directory for filename in os.listdir(filepath): # Check if the file is an image if filename.endswith('.jpg') or filename.endswith('.jpeg'): # Get the base name of the image file (without the extension) (e.g. ISIC_0000000) base_name = os.path.splitext(filename)[0] yield_result = {"image": os.path.join(filepath, filename)} for k, v in self.label2id.items(): label_filename = f'{base_name}_attribute_{k}.png' label_file_path = os.path.join(labelpath, label_filename) # if attribute label does not exist, create a black mask for it if not os.path.exists(label_file_path): # Load the corresponding image to get its size img = Image.open(yield_result['image']) width, height = img.size # Create a black image of the same size black_img = Image.fromarray(np.zeros((height, width), dtype=np.uint8)) # Save the black image black_img.save(label_file_path) yield_result[f"label{v}"] = label_file_path yield base_name, yield_result elif self.config.name in ['globules', 'milia_like_cyst', 'negative_network', 'pigment_network', 'streaks']: for filename in os.listdir(filepath): if filename.endswith('.jpg') or filename.endswith('.jpeg'): base_name = os.path.splitext(filename)[0] yield_result = {"image": os.path.join(filepath, filename)} label_filename = f'{base_name}_attribute_{self.config.name}.png' label_file_path = os.path.join(labelpath, label_filename) # if attribute label does not exist, create a black mask for it if not os.path.exists(label_file_path): # Load the corresponding image to get its size img = Image.open(yield_result['image']) width, height = img.size # Create a black image of the same size black_img = Image.fromarray(np.zeros((height, width), dtype=np.uint8)) # Save the black image black_img.save(label_file_path) yield_result["label"] = label_file_path yield base_name, yield_result elif self.config.name == "task1": for filename in os.listdir(filepath): if filename.endswith('.jpg') or filename.endswith('.jpeg'): base_name = os.path.splitext(filename)[0] yield_result = {"image": os.path.join(filepath, filename)} label_filename = f'{base_name}_segmentation.png' label_file_path = os.path.join(labelpath, label_filename) # if attribute label does not exist, create a black mask for it if not os.path.exists(label_file_path): # Load the corresponding image to get its size img = Image.open(yield_result['image']) width, height = img.size # Create a black image of the same size black_img = Image.fromarray(np.zeros((height, width), dtype=np.uint8)) # Save the black image black_img.save(label_file_path) yield_result["label"] = label_file_path yield base_name, yield_result # datasets-cli test /Users/jon/code/school/t8/DeepLearning/proj/lesion-dataset.py --save_info --all_configs