Datasets:
Tasks:
Token Classification
Sub-tasks:
word-sense-disambiguation
Languages:
Polish
Size:
1M<n<10M
License:
Update wsd_plwordnet_glex.py
Browse filesFix loading for usage_examples
- wsd_plwordnet_glex.py +68 -9
wsd_plwordnet_glex.py
CHANGED
|
@@ -42,30 +42,78 @@ class WsdPlwordnetGlex(datasets.GeneratorBasedBuilder):
|
|
| 42 |
|
| 43 |
def _info(self):
|
| 44 |
features = datasets.Features()
|
| 45 |
-
if self.config.name == "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
features = datasets.Features(
|
| 47 |
{
|
| 48 |
"pl_sense": datasets.Value("string"),
|
| 49 |
"text": datasets.Value("string"),
|
| 50 |
-
"tokens": datasets.
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
"plWN_lex_id": datasets.Value("string"),
|
| 53 |
"plWN_syn_id": datasets.Value("string"),
|
| 54 |
-
"
|
| 55 |
}
|
| 56 |
)
|
| 57 |
if self.config.name == "lemma_candidates":
|
| 58 |
features = datasets.Features(
|
| 59 |
{
|
| 60 |
"lemma": datasets.Value("string"),
|
| 61 |
-
"candidates": datasets.Value("string"),
|
| 62 |
}
|
| 63 |
)
|
| 64 |
return datasets.DatasetInfo(
|
| 65 |
description=_DESCRIPTION,
|
| 66 |
features=features,
|
| 67 |
)
|
| 68 |
-
|
| 69 |
def _split_generators(self, dl_manager):
|
| 70 |
dl_dir = dl_manager.download_and_extract(self.config.data_dir)
|
| 71 |
return [
|
|
@@ -79,7 +127,18 @@ class WsdPlwordnetGlex(datasets.GeneratorBasedBuilder):
|
|
| 79 |
"""Generate examples."""
|
| 80 |
|
| 81 |
with open(path, encoding="utf-8") as f:
|
| 82 |
-
if self.config.name == "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
for k,line in enumerate(f):
|
| 84 |
data = json.loads(line)
|
| 85 |
yield k, {
|
|
@@ -89,10 +148,10 @@ class WsdPlwordnetGlex(datasets.GeneratorBasedBuilder):
|
|
| 89 |
"phrases": data["phrases"],
|
| 90 |
"plWN_lex_id": data["plWN_lex_id"],
|
| 91 |
"plWN_syn_id": data["plWN_syn_id"],
|
| 92 |
-
"
|
| 93 |
}
|
| 94 |
|
| 95 |
if self.config.name == "lemma_candidates":
|
| 96 |
data = json.loads(f.read())
|
| 97 |
processed = ((ind,{"lemma": k, "candidates": v}) for ind,(k,v) in enumerate(data.items()))
|
| 98 |
-
yield from processed
|
|
|
|
| 42 |
|
| 43 |
def _info(self):
|
| 44 |
features = datasets.Features()
|
| 45 |
+
if self.config.name == "usage_examples":
|
| 46 |
+
features = features = datasets.Features(
|
| 47 |
+
{
|
| 48 |
+
|
| 49 |
+
"text": datasets.Value("string"),
|
| 50 |
+
"tokens": datasets.features.Sequence(
|
| 51 |
+
dict(
|
| 52 |
+
{
|
| 53 |
+
"index": datasets.Value("int32"),
|
| 54 |
+
"position": datasets.features.Sequence(
|
| 55 |
+
length=2,
|
| 56 |
+
feature=datasets.Value("int32"),
|
| 57 |
+
),
|
| 58 |
+
"orth": datasets.Value("string"),
|
| 59 |
+
"lemma": datasets.Value("string"),
|
| 60 |
+
"pos": datasets.Value("string"),
|
| 61 |
+
"ctag": datasets.Value("string"),
|
| 62 |
+
}
|
| 63 |
+
),
|
| 64 |
+
),
|
| 65 |
+
"phrases": datasets.features.Sequence(datasets.Value("string")),
|
| 66 |
+
"wsd": datasets.features.Sequence(
|
| 67 |
+
dict({
|
| 68 |
+
"pl_sense": datasets.Value("string"),
|
| 69 |
+
"plWN_lex_id": datasets.Value("string"),
|
| 70 |
+
"plWN_syn_id": datasets.Value("string"),
|
| 71 |
+
"plWN_lex_legacy_id": datasets.Value("string"),
|
| 72 |
+
"plWN_syn_legacy_id": datasets.Value("string"),
|
| 73 |
+
"index": datasets.Value("int32"),
|
| 74 |
+
})
|
| 75 |
+
),
|
| 76 |
+
'context_file': datasets.Value('string'),
|
| 77 |
+
}
|
| 78 |
+
)
|
| 79 |
+
if self.config.name == "definitions":
|
| 80 |
features = datasets.Features(
|
| 81 |
{
|
| 82 |
"pl_sense": datasets.Value("string"),
|
| 83 |
"text": datasets.Value("string"),
|
| 84 |
+
"tokens": datasets.features.Sequence(
|
| 85 |
+
dict(
|
| 86 |
+
{
|
| 87 |
+
"index": datasets.Value("int32"),
|
| 88 |
+
"position": datasets.features.Sequence(
|
| 89 |
+
length=2,
|
| 90 |
+
feature=datasets.Value("int32"),
|
| 91 |
+
),
|
| 92 |
+
"orth": datasets.Value("string"),
|
| 93 |
+
"lemma": datasets.Value("string"),
|
| 94 |
+
"pos": datasets.Value("string"),
|
| 95 |
+
"ctag": datasets.Value("string"),
|
| 96 |
+
}
|
| 97 |
+
),
|
| 98 |
+
),
|
| 99 |
+
"phrases": datasets.features.Sequence(datasets.Value("string")),
|
| 100 |
"plWN_lex_id": datasets.Value("string"),
|
| 101 |
"plWN_syn_id": datasets.Value("string"),
|
| 102 |
+
"wsd_indices": datasets.features.Sequence(datasets.Value("int32")),
|
| 103 |
}
|
| 104 |
)
|
| 105 |
if self.config.name == "lemma_candidates":
|
| 106 |
features = datasets.Features(
|
| 107 |
{
|
| 108 |
"lemma": datasets.Value("string"),
|
| 109 |
+
"candidates": datasets.features.Sequence(datasets.Value("string")),
|
| 110 |
}
|
| 111 |
)
|
| 112 |
return datasets.DatasetInfo(
|
| 113 |
description=_DESCRIPTION,
|
| 114 |
features=features,
|
| 115 |
)
|
| 116 |
+
|
| 117 |
def _split_generators(self, dl_manager):
|
| 118 |
dl_dir = dl_manager.download_and_extract(self.config.data_dir)
|
| 119 |
return [
|
|
|
|
| 127 |
"""Generate examples."""
|
| 128 |
|
| 129 |
with open(path, encoding="utf-8") as f:
|
| 130 |
+
if self.config.name == "usage_examples":
|
| 131 |
+
for k,line in enumerate(f):
|
| 132 |
+
data = json.loads(line)
|
| 133 |
+
yield k, {
|
| 134 |
+
"text": data["text"],
|
| 135 |
+
"tokens": data["tokens"],
|
| 136 |
+
"phrases": data["phrases"],
|
| 137 |
+
"wsd": data["wsd"],
|
| 138 |
+
'context_file': data['context_file'],
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
if self.config.name == "definitions":
|
| 142 |
for k,line in enumerate(f):
|
| 143 |
data = json.loads(line)
|
| 144 |
yield k, {
|
|
|
|
| 148 |
"phrases": data["phrases"],
|
| 149 |
"plWN_lex_id": data["plWN_lex_id"],
|
| 150 |
"plWN_syn_id": data["plWN_syn_id"],
|
| 151 |
+
"wsd_indices": data["wsd_indices"],
|
| 152 |
}
|
| 153 |
|
| 154 |
if self.config.name == "lemma_candidates":
|
| 155 |
data = json.loads(f.read())
|
| 156 |
processed = ((ind,{"lemma": k, "candidates": v}) for ind,(k,v) in enumerate(data.items()))
|
| 157 |
+
yield from processed
|