julianrisch commited on
Commit
84cc8cd
1 Parent(s): dd60ab6

Update germanquad.py

Browse files
Files changed (1) hide show
  1. germanquad.py +11 -30
germanquad.py CHANGED
@@ -10,7 +10,7 @@ import datasets
10
  logger = datasets.logging.get_logger(__name__)
11
 
12
 
13
- _CITATION = """\\\\\\\\
14
  @misc{möller2021germanquad,
15
  title={GermanQuAD and GermanDPR: Improving Non-English Question Answering and Passage Retrieval},
16
  author={Timo Möller and Julian Risch and Malte Pietsch},
@@ -21,7 +21,7 @@ _CITATION = """\\\\\\\\
21
  }
22
  """
23
 
24
- _DESCRIPTION = """\\\\\\\\
25
  In order to raise the bar for non-English QA, we are releasing a high-quality, human-labeled German QA dataset consisting of 13 722 questions, incl. a three-way annotated test set.
26
  The creation of GermanQuAD is inspired by insights from existing datasets as well as our labeling experience from several industry projects. We combine the strengths of SQuAD, such as high out-of-domain performance, with self-sufficient questions that contain all relevant information for open-domain QA as in the NaturalQuestions dataset. Our training and test datasets do not overlap like other popular datasets and include complex questions that cannot be answered with a single entity or only a few words.
27
  """
@@ -57,29 +57,15 @@ class GermanDPR(datasets.GeneratorBasedBuilder):
57
  description=_DESCRIPTION,
58
  features=datasets.Features(
59
  {
 
 
60
  "question": datasets.Value("string"),
61
- "answers": datasets.features.Sequence(datasets.Value("string")),
62
- "positive_ctxs": datasets.features.Sequence(
63
  {
64
- "title": datasets.Value("string"),
65
- "text": datasets.Value("string"),
66
- "passage_id": datasets.Value("string"),
67
  }
68
- ),
69
- "negative_ctxs": datasets.features.Sequence(
70
- {
71
- "title": datasets.Value("string"),
72
- "text": datasets.Value("string"),
73
- "passage_id": datasets.Value("string"),
74
- }
75
- ),
76
- "hard_negative_ctxs": datasets.features.Sequence(
77
- {
78
- "title": datasets.Value("string"),
79
- "text": datasets.Value("string"),
80
- "passage_id": datasets.Value("string"),
81
- }
82
- ),
83
  }
84
  ),
85
  # No default supervised_keys (as we have to pass both question
@@ -109,9 +95,7 @@ class GermanDPR(datasets.GeneratorBasedBuilder):
109
  for qa in paragraph["qas"]:
110
  question = qa["question"]
111
  id_ = qa["id"]
112
-
113
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
114
- answers = [answer["text"] for answer in qa["answers"]]
115
 
116
  # Features currently used are "context", "question", and "answers".
117
  # Others are extracted here for the ease of future expansions.
@@ -119,9 +103,6 @@ class GermanDPR(datasets.GeneratorBasedBuilder):
119
  "context": context,
120
  "question": question,
121
  "id": id_,
122
- "answers": {
123
- "answer_start": answer_starts,
124
- "text": answers,
125
- },
126
  }
127
-
 
10
  logger = datasets.logging.get_logger(__name__)
11
 
12
 
13
+ _CITATION = """
14
  @misc{möller2021germanquad,
15
  title={GermanQuAD and GermanDPR: Improving Non-English Question Answering and Passage Retrieval},
16
  author={Timo Möller and Julian Risch and Malte Pietsch},
 
21
  }
22
  """
23
 
24
+ _DESCRIPTION = """
25
  In order to raise the bar for non-English QA, we are releasing a high-quality, human-labeled German QA dataset consisting of 13 722 questions, incl. a three-way annotated test set.
26
  The creation of GermanQuAD is inspired by insights from existing datasets as well as our labeling experience from several industry projects. We combine the strengths of SQuAD, such as high out-of-domain performance, with self-sufficient questions that contain all relevant information for open-domain QA as in the NaturalQuestions dataset. Our training and test datasets do not overlap like other popular datasets and include complex questions that cannot be answered with a single entity or only a few words.
27
  """
 
57
  description=_DESCRIPTION,
58
  features=datasets.Features(
59
  {
60
+ "id": datasets.Value("int32"),
61
+ "context": datasets.Value("string"),
62
  "question": datasets.Value("string"),
63
+ "answers": datasets.features.Sequence(
 
64
  {
65
+ "text": datasets.Value("string"),
66
+ "answer_start": datasets.Value("int32"),
 
67
  }
68
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  }
70
  ),
71
  # No default supervised_keys (as we have to pass both question
 
95
  for qa in paragraph["qas"]:
96
  question = qa["question"]
97
  id_ = qa["id"]
98
+ answers = [{"answer_start": answer["answer_start"], "text": answer["text"]} for answer in qa["answers"]]
 
 
99
 
100
  # Features currently used are "context", "question", and "answers".
101
  # Others are extracted here for the ease of future expansions.
 
103
  "context": context,
104
  "question": question,
105
  "id": id_,
106
+ "answers": answers,
 
 
 
107
  }
108
+