Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
Muennighoff commited on
Commit
f65642d
1 Parent(s): c9b4dc1
Files changed (2) hide show
  1. data/xwinograd.tsv +0 -0
  2. xwinograd.py +147 -0
data/xwinograd.tsv ADDED
The diff for this file is too large to render. See raw diff
 
xwinograd.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ # Lint as: python3
4
+ """XWinograd"""
5
+
6
+
7
+ import json
8
+
9
+ import pandas as pd
10
+
11
+ import datasets
12
+
13
+
14
+ logger = datasets.logging.get_logger(__name__)
15
+
16
+
17
+ _CITATION = """\
18
+ @misc{tikhonov2021heads,
19
+ title={It's All in the Heads: Using Attention Heads as a Baseline for Cross-Lingual Transfer in Commonsense Reasoning},
20
+ author={Alexey Tikhonov and Max Ryabinin},
21
+ year={2021},
22
+ eprint={2106.12066},
23
+ archivePrefix={arXiv},
24
+ primaryClass={cs.CL}
25
+ }
26
+ """
27
+
28
+ _DESCRIPTION = """\
29
+ A multilingual collection of Winograd Schemas in six languages \
30
+ that can be used for evaluation of cross-lingual commonsense reasoning capabilities.
31
+ """
32
+
33
+
34
+ #_URL = "https://github.com/yandex-research/crosslingual_winograd/blob/main/dataset.tsv"
35
+ _URL = "https://huggingface.co/datasets/muennighoff/xwinograd/resolve/main/data/mt/dataset.tsv"
36
+
37
+ import json
38
+ import random
39
+
40
+ def winogrande_format(row):
41
+ array = row["pronoun"]
42
+ position_idx = json.loads(array)[1][0]
43
+ # Turn unicode into proper chinese characters
44
+ sent = str(u"{}".format(row["sent"]))
45
+ start_idx = 0
46
+ for i, tok in enumerate(json.loads(row["toks"])):
47
+ tok = str(u"{}".format(tok))
48
+ cur_start_idx = sent.find(tok)
49
+ if i == position_idx:
50
+ break
51
+ sent = sent[cur_start_idx + len(tok):]
52
+ start_idx += cur_start_idx + len(tok)
53
+ # +1 to give room for an optional space
54
+ row["sentence"] = row["sent"][:start_idx] + row["sent"][start_idx:start_idx+len(tok)+1].replace(tok, "_") + row["sent"][start_idx+len(tok)+1:]
55
+
56
+ sol = json.loads(row["solution"])
57
+
58
+ cor_answer_idx = random.choice([1, 2])
59
+ incor_answer_idx = 2 if cor_answer_idx == 1 else 1
60
+
61
+ cor_answer = str(u"{}".format(sol[0][0])) if sol[0][-1] == True else str(u"{}".format(sol[1][0]))
62
+ incor_answer = str(u"{}".format(sol[0][0])) if sol[0][-1] == False else str(u"{}".format(sol[1][0]))
63
+
64
+ row[f"option{cor_answer_idx}"] = cor_answer
65
+ row[f"option{incor_answer_idx}"] = incor_answer
66
+ row["answer"] = cor_answer_idx
67
+ return row
68
+
69
+
70
+ class XWinograd(datasets.GeneratorBasedBuilder):
71
+ """XWinograd"""
72
+
73
+ VERSION = datasets.Version("1.0.0")
74
+ BUILDER_CONFIGS = [
75
+ datasets.BuilderConfig(
76
+ name="en",
77
+ version=VERSION,
78
+ description="X",
79
+ ),
80
+ datasets.BuilderConfig(
81
+ name="fr",
82
+ version=VERSION,
83
+ description="X",
84
+ ),
85
+ datasets.BuilderConfig(
86
+ name="jp",
87
+ version=VERSION,
88
+ description="X",
89
+ ),
90
+ datasets.BuilderConfig(
91
+ name="pt",
92
+ version=VERSION,
93
+ description="X",
94
+ ),
95
+ datasets.BuilderConfig(
96
+ name="ru",
97
+ version=VERSION,
98
+ description="X",
99
+ ),
100
+ datasets.BuilderConfig(
101
+ name="zh",
102
+ version=VERSION,
103
+ description="X",
104
+ ),
105
+ ]
106
+ DEFAULT_CONFIG_NAME = "en"
107
+
108
+ def _info(self):
109
+ return datasets.DatasetInfo(
110
+ description=_DESCRIPTION,
111
+ features=datasets.Features(
112
+ {
113
+ "sentence": datasets.Value("string"),
114
+ "option1": datasets.Value("string"),
115
+ "option2": datasets.Value("string"),
116
+ "answer": datasets.Value("string")
117
+ }
118
+ ),
119
+ supervised_keys=None,
120
+ citation=_CITATION,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager):
124
+ downloaded_files = dl_manager.download_and_extract(_URL)
125
+ return [
126
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files}),
127
+ ]
128
+
129
+ def _generate_examples(self, filepath):
130
+ """This function returns the examples in the raw (text) form."""
131
+ logger.info("generating examples from = %s", filepath)
132
+ ds = pd.read_csv(
133
+ filepath, sep='\t', header=None,
134
+ names=["lang", "type", "original", "sent", "toks", "pronoun", "solution"]
135
+ )
136
+ if self.config.name:
137
+ ds = ds[ds["lang"] == self.config.name]
138
+ ds = ds.apply(winogrande_format, axis=1)
139
+
140
+ for idx, row in ds.iterrows():
141
+ yield idx, {
142
+ "sentence": row["sentence"],
143
+ "option1": row["option1"],
144
+ "option2": row["option2"],
145
+ "answer": row["answer"],
146
+ }
147
+