|
|
import datasets |
|
|
import os |
|
|
import pandas as pd |
|
|
from huggingface_hub import list_repo_files |
|
|
import glob |
|
|
|
|
|
class MERFISHConfig(datasets.BuilderConfig): |
|
|
def __init__(self, **kwargs): |
|
|
self.gene_subset = kwargs.pop("gene_subset", None) |
|
|
super().__init__(**kwargs) |
|
|
|
|
|
class MERFISH(datasets.GeneratorBasedBuilder): |
|
|
BUILDER_CONFIGS = [ |
|
|
MERFISHConfig(name="raw", description="Raw MERFISH counts per gene"), |
|
|
MERFISHConfig(name="processed", description="Processed MERFISH data"), |
|
|
] |
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description="MERFISH dataset of mouse brain slices", |
|
|
features=datasets.Features({ |
|
|
"cell_identifier": datasets.Value("string"), |
|
|
"expression": datasets.Sequence(datasets.Value("float32")), |
|
|
"gene_names": datasets.Sequence(datasets.Value("string")), |
|
|
}), |
|
|
supervised_keys=None, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
expression_prefix = f"{self.config.name}/expression" |
|
|
repo_id = "data4science/merfish" |
|
|
|
|
|
if dl_manager.is_streaming: |
|
|
data_files = { |
|
|
"expression": os.path.join(self.config.name, "expression", "*.parquet"), |
|
|
"gene_metadata": os.path.join(self.config.name, "gene_metadata.parquet"), |
|
|
"cell_metadata": os.path.join(self.config.name, "cell_metadata.parquet"), |
|
|
} |
|
|
downloaded = dl_manager.download(data_files) |
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"expression_files": sorted(glob.glob(downloaded["expression"])), |
|
|
"gene_metadata_path": downloaded["gene_metadata"], |
|
|
"cell_metadata_path": downloaded["cell_metadata"], |
|
|
}, |
|
|
), |
|
|
] |
|
|
else: |
|
|
|
|
|
all_files = list_repo_files(repo_id, repo_type="dataset") |
|
|
expression_files = [ |
|
|
f for f in all_files |
|
|
if f.startswith(expression_prefix) and f.endswith(".parquet") |
|
|
] |
|
|
expression_files = dl_manager.download(expression_files) |
|
|
gene_metadata = dl_manager.download(f"{self.config.name}/gene_metadata.parquet") |
|
|
cell_metadata = dl_manager.download(f"{self.config.name}/cell_metadata.parquet") |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"expression_files": expression_files, |
|
|
"gene_metadata_path": gene_metadata, |
|
|
"cell_metadata_path": cell_metadata, |
|
|
"fs": dl_manager.fs if dl_manager.is_streaming else None, |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, expression_files, gene_metadata_path, cell_metadata_path, fs=None): |
|
|
if fs is not None: |
|
|
gene_df = pd.read_parquet(fs.open(gene_metadata_path, "rb")) |
|
|
cell_df = pd.read_parquet(fs.open(cell_metadata_path, "rb")) |
|
|
else: |
|
|
gene_df = pd.read_parquet(gene_metadata_path) |
|
|
cell_df = pd.read_parquet(cell_metadata_path) |
|
|
|
|
|
gene_names = gene_df["gene_identifier"].tolist() if "gene_identifier" in gene_df.columns else gene_df.index.tolist() |
|
|
|
|
|
idx = 0 |
|
|
for filepath in expression_files: |
|
|
if fs is not None: |
|
|
with fs.open(filepath, "rb") as f: |
|
|
df = pd.read_parquet(f) |
|
|
else: |
|
|
df = pd.read_parquet(filepath) |
|
|
|
|
|
for idx_row, row in df.iterrows(): |
|
|
yield idx, { |
|
|
"cell_identifier": str(idx_row), |
|
|
"expression": row.to_numpy(dtype="float32").tolist(), |
|
|
"gene_names": gene_names, |
|
|
} |
|
|
idx += 1 |
|
|
|