Update t_rex_relational_similarity.py
Browse files
t_rex_relational_similarity.py
CHANGED
|
@@ -7,7 +7,7 @@ import datasets
|
|
| 7 |
logger = datasets.logging.get_logger(__name__)
|
| 8 |
_DESCRIPTION = """T-Rex dataset."""
|
| 9 |
_NAME = "t_rex_relational_similarity"
|
| 10 |
-
_VERSION = "0.0.
|
| 11 |
_CITATION = """
|
| 12 |
@inproceedings{elsahar2018t,
|
| 13 |
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
|
@@ -19,9 +19,6 @@ _CITATION = """
|
|
| 19 |
|
| 20 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
| 21 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
| 22 |
-
MIN_ENTITY_FREQ = [1, 2, 3, 4]
|
| 23 |
-
MAX_PREDICATE_FREQ = [100, 50, 25, 10]
|
| 24 |
-
|
| 25 |
_TYPES = [f"filter_unified.min_entity_{a}_max_predicate_{b}" for a, b in product(MIN_ENTITY_FREQ, MAX_PREDICATE_FREQ)]
|
| 26 |
_URLS = {i: {
|
| 27 |
str(datasets.Split.TRAIN): [f'{_URL}/{i}.train.jsonl'],
|
|
@@ -44,13 +41,10 @@ class TREXRelationalSimilarityConfig(datasets.BuilderConfig):
|
|
| 44 |
class TREXRelationalSimilarity(datasets.GeneratorBasedBuilder):
|
| 45 |
"""Dataset."""
|
| 46 |
|
| 47 |
-
BUILDER_CONFIGS = [
|
| 48 |
-
TREXRelationalSimilarityConfig(name=i, version=datasets.Version(_VERSION), description=_DESCRIPTION)
|
| 49 |
-
for i in sorted(_TYPES)
|
| 50 |
-
]
|
| 51 |
|
| 52 |
def _split_generators(self, dl_manager):
|
| 53 |
-
downloaded_file = dl_manager.download_and_extract(_URLS
|
| 54 |
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
| 55 |
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
|
| 56 |
|
|
|
|
| 7 |
logger = datasets.logging.get_logger(__name__)
|
| 8 |
_DESCRIPTION = """T-Rex dataset."""
|
| 9 |
_NAME = "t_rex_relational_similarity"
|
| 10 |
+
_VERSION = "0.0.3"
|
| 11 |
_CITATION = """
|
| 12 |
@inproceedings{elsahar2018t,
|
| 13 |
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
|
|
|
| 19 |
|
| 20 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
| 21 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
|
|
|
|
|
|
|
|
|
| 22 |
_TYPES = [f"filter_unified.min_entity_{a}_max_predicate_{b}" for a, b in product(MIN_ENTITY_FREQ, MAX_PREDICATE_FREQ)]
|
| 23 |
_URLS = {i: {
|
| 24 |
str(datasets.Split.TRAIN): [f'{_URL}/{i}.train.jsonl'],
|
|
|
|
| 41 |
class TREXRelationalSimilarity(datasets.GeneratorBasedBuilder):
|
| 42 |
"""Dataset."""
|
| 43 |
|
| 44 |
+
BUILDER_CONFIGS = [TREXRelationalSimilarityConfig(version=datasets.Version(_VERSION), description=_DESCRIPTION)]
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
def _split_generators(self, dl_manager):
|
| 47 |
+
downloaded_file = dl_manager.download_and_extract(_URLS)
|
| 48 |
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
| 49 |
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
|
| 50 |
|