IndoToxic2024 / IndoToxic2024.py
Exqrch's picture
Create IndoToxic2024.py
00fd6f1 verified
raw
history blame
No virus
3.77 kB
from pathlib import Path
from typing import Dict, List, Tuple
import datasets
import pandas as pd
_URLS = {
"annotated_data": "https://huggingface.co/datasets/Exqrch/IndoToxic2024/resolve/main/indotoxic2024_annotated_data.csv"
}
_DESCRIPTION = """\
IndoToxic2024 is an Indonesian datasets collected prior and during the 2024 Indonesia's presidential election.
The data are obtained from social media and are annotated by 19 annotators of diverse background.
The tasks supported by this datasets are text classification task around hate speech and toxic content.
"""
_HOMEPAGE = """\
https://github.com/izzako/IndoToxic2024
"""
_LICENSE = """\
Apache License 2.0
"""
_CITATION = """\
Susanto, L., Wijanarko, M. I., Pratama, P. A., Hong, T., Idris, I., Aji, A. F., & Wijaya, D. (2024, June 27). IndoToxic2024: A Demographically-Enriched Dataset of Hate Speech and Toxicity Types for Indonesian Language. arXiv.org. https://arxiv.org/abs/2406.19349
"""
class IndoToxic2024(datasets.GeneratorBasedBuilder):
def _info(self) -> datasets.DatasetInfo:
feature = {
'batch_id': datasets.Value("string"),
'batch_text_id': datasets.Value("string"),
'text_id': datasets.Value("string"),
'metadata_id': datasets.Value("string"),
'annotator_id': datasets.Value("string"),
'text': datasets.Value("string"),
'initial_paragraph': datasets.Value("string"),
'topic': datasets.Value("string"),
'is_noise_or_spam_text': datasets.Value("int32"),
'related_to_election_2024': datasets.Value("int32"),
'toxicity': datasets.Value('int32'),
'profanity_obscenity': datasets.Value('int32'),
'threat_incitement_to_violence': datasets.Value('int32'),
'insults': datasets.Value('int32'),
'identity_attack': datasets.Value('int32'),
'sexually_explicit': datasets.Value('int32')
}
features = datasets.Features(features)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
"""Returns SplitGenerators."""
train_csv_path = Path(dl_manager.download_and_extract(_URLS["annotated_data"]))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": train_csv_path},
),
]
def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
df = pd.read_csv(filepath).reset_index()
for index, row in df.iterrows():
ex = {
'batch_id': row['batch_id'],
'batch_text_id': row['batch_text_id'],
'text_id': row['text_id'],
'metadata_id': row['metadata_id'],
'annotator_id': row['annotator_id'],
'text': row['text'],
'initial_paragraph': row['initial_paragraph'],
'topic': row['topic'],
'is_noise_or_spam_text': row['is_noise_or_spam_text'],
'related_to_election_2024': row['related_to_election_2024'],
'toxicity': row['toxicity'],
'profanity_obscenity': row['profanity_obscenity'],
'threat_incitement_to_violence': row['threat_incitement_to_violence'],
'insults': row['insults'],
'identity_attack': row['identity_attack'],
'sexually_explicit': row['sexually_explicit']
}
yield str(index), ex