mwirth7 commited on
Commit
316f64d
·
verified ·
1 Parent(s): 5da72cc

Update BirdSet.py

Browse files
Files changed (1) hide show
  1. BirdSet.py +129 -133
BirdSet.py CHANGED
@@ -11,26 +11,27 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
  """BirdSet: The General Avian Monitoring Evaluation Benchmark"""
16
 
17
  import os
18
  import datasets
19
  import pandas as pd
20
- import numpy as np
 
 
 
21
 
22
  from .classes import BIRD_NAMES_NIPS4BPLUS, BIRD_NAMES_AMAZON_BASIN, BIRD_NAMES_HAWAII, \
23
  BIRD_NAMES_HIGH_SIERRAS, BIRD_NAMES_SIERRA_NEVADA, BIRD_NAMES_POWDERMILL_NATURE, BIRD_NAMES_SAPSUCKER, \
24
  BIRD_NAMES_COLUMBIA_COSTA_RICA, BIRD_NAMES_XENOCANTO, BIRD_NAMES_XENOCANTO_M
25
 
26
- from . import classes
27
-
28
- from .descriptions import _BIRD_DB_CITATION, _NIPS4BPLUS_CITATION, _NIPS4BPLUS_DESCRIPTION, \
29
  _HIGH_SIERRAS_DESCRIPTION, _HIGH_SIERRAS_CITATION, _SIERRA_NEVADA_DESCRIPTION, _SIERRA_NEVADA_CITATION, \
30
  _POWDERMILL_NATURE_DESCRIPTION, _POWDERMILL_NATURE_CITATION, _AMAZON_BASIN_DESCRIPTION, _AMAZON_BASIN_CITATION, \
31
  _SAPSUCKER_WOODS_DESCRIPTION, _SAPSUCKER_WOODS_CITATION, _COLUMBIA_COSTA_RICA_CITATION, \
32
  _COLUMBIA_COSTA_RICA_DESCRIPTION, _HAWAIIAN_ISLANDS_CITATION, _HAWAIIAN_ISLANDS_DESCRIPTION
33
 
 
34
  #############################################
35
  _BIRDSET_CITATION = """\
36
  @article{birdset,
@@ -50,6 +51,44 @@ _BIRDSET_DESCRIPTION = """\
50
  base_url = "https://huggingface.co/datasets/DBD-research-group/BirdSet/resolve/data"
51
 
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  class BirdSetConfig(datasets.BuilderConfig):
54
  def __init__(
55
  self,
@@ -375,6 +414,7 @@ class BirdSet(datasets.GeneratorBasedBuilder):
375
 
376
  def _split_generators(self, dl_manager):
377
  ds_name = self.config.name
 
378
  train_files = {"PER": 11,
379
  "NES": 13,
380
  "UHH": 5,
@@ -395,7 +435,7 @@ class BirdSet(datasets.GeneratorBasedBuilder):
395
  "SSW": 36,
396
  "SNE": 5}
397
 
398
- test5s_files = {"PER": 1,
399
  "NES": 1,
400
  "UHH": 1,
401
  "HSN": 1,
@@ -404,160 +444,116 @@ class BirdSet(datasets.GeneratorBasedBuilder):
404
  "SSW": 4,
405
  "SNE": 1}
406
 
 
407
  if self.config.name.endswith("_xc"):
408
  ds_name = ds_name[:-3]
409
  dl_dir = dl_manager.download({
410
  "train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
411
- "metadata": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
412
  })
413
 
414
  elif self.config.name.endswith("_scape"):
415
  ds_name = ds_name[:-6]
416
  dl_dir = dl_manager.download({
417
  "test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
418
- "test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1, test5s_files[ds_name] + 1)],
419
- "metadata": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
420
- "metadata_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
421
  })
422
 
423
  # use POW for XCM/XCL validation
424
  elif self.config.name.startswith("XC"):
425
  dl_dir = dl_manager.download({
426
  "train": [os.path.join(self.config.data_dir, f"{ds_name}_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
427
- "valid": [os.path.join(self.config.data_dir[:-3] + "POW", f"POW_test5s_shard_{n:04d}.tar.gz") for n in range(1, test5s_files["POW"] + 1)],
428
- "metadata": os.path.join(self.config.data_dir, f"{ds_name}_metadata.parquet"),
429
- "meta_test_5s": os.path.join(self.config.data_dir[:-3] + "POW", f"POW_metadata_test_5s.parquet"),
430
  })
431
 
432
- elif self.config.name in train_files.keys():
433
  dl_dir = dl_manager.download({
434
  "train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
435
  "test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
436
- "test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1, test5s_files[ds_name] + 1)],
437
  "meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
438
  "meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
439
  "meta_test_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
440
  })
441
 
442
- # TODO no more extraction
443
- #local_audio_archives_paths = dl_manager.extract(dl_dir) if not dl_manager.is_streaming else None
444
- #local_audio_archives_paths = dl_manager.iter_archive(dl_dir)
445
-
446
- if self.config.name.startswith("XC") or self.config.name.endswith("_xc"):
447
- return [
448
- datasets.SplitGenerator(
449
- name=datasets.Split.TRAIN,
450
- gen_kwargs={
451
- "audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["train"]],
452
- #"local_audio_archives_paths": local_audio_archives_paths["train"] if local_audio_archives_paths else None,
453
- "metapath": dl_dir["metadata"],
454
- "split": datasets.Split.TRAIN,
455
- },
456
- ),
457
- datasets.SplitGenerator(
458
- name="valid",
459
- gen_kwargs={
460
- "audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["valid"]],
461
- #"local_audio_archives_paths": local_audio_archives_paths["valid"] if local_audio_archives_paths else None,
462
- "metapath": dl_dir["meta_test_5s"],
463
- "split": "valid",
464
- },
465
- ),
466
- ]
467
 
468
- elif self.config.name.endswith("_scape"):
469
- return [
470
- datasets.SplitGenerator(
471
- name=datasets.Split.TEST,
472
- gen_kwargs={
473
- "audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["test"]],
474
- #"local_audio_archives_paths": local_audio_archives_paths["test"] if local_audio_archives_paths else None,
475
- "metapath": dl_dir["metadata"],
476
- "split": datasets.Split.TEST,
477
- },
478
- ),
479
- datasets.SplitGenerator(
480
- name="test_5s",
481
- gen_kwargs={
482
- "audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["test_5s"]],
483
- #"local_audio_archives_paths": local_audio_archives_paths["test_5s"] if local_audio_archives_paths else None,
484
- "metapath": dl_dir["metadata_5s"],
485
- "split": "test_multilabel"
486
- },
487
- ),
488
- ]
489
-
490
- return [
491
- datasets.SplitGenerator(
492
- name=datasets.Split.TRAIN,
493
- gen_kwargs={
494
- "audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["train"]],
495
- #"local_audio_archives_paths": local_audio_archives_paths["train"] if local_audio_archives_paths else None,
496
- "metapath": dl_dir["meta_train"],
497
- "split": datasets.Split.TRAIN,
498
- },
499
- ),
500
- datasets.SplitGenerator(
501
- name=datasets.Split.TEST,
502
- gen_kwargs={
503
- "audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["test"]],
504
- #"local_audio_archives_paths": local_audio_archives_paths["test"] if local_audio_archives_paths else None,
505
- "metapath": dl_dir["meta_test"],
506
- "split": datasets.Split.TEST,
507
- },
508
- ),
509
- datasets.SplitGenerator(
510
- name="test_5s",
511
- gen_kwargs={
512
- "audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["test_5s"]],
513
- #"local_audio_archives_paths": local_audio_archives_paths["test_5s"] if local_audio_archives_paths else None,
514
- "metapath": dl_dir["meta_test_5s"],
515
- "split": "test_multilabel"
516
- },
517
- ),
518
- ]
519
-
520
- def _generate_examples(self, audio_archive_iterators, metapath, split):
521
- metadata = pd.read_parquet(metapath)
522
  idx = 0
523
- for i, audio_archive_iterator in enumerate(audio_archive_iterators):
 
524
  for audio_path_in_archive, audio_file in audio_archive_iterator:
525
- id = os.path.split(audio_path_in_archive)[-1]
526
- rows = metadata[metadata.index == (int(id[2:].split(".")[0]) if split == "train" else id)]
527
- audio_path = audio_path_in_archive
528
-
529
  for _, row in rows.iterrows():
530
- idx += 1
531
- yield id if split == "train" else idx, {
532
- "audio": audio_file.read(),
533
- "filepath": audio_path,
534
- "start_time": row["start_time"],
535
- "end_time": row["end_time"],
536
- "low_freq": row["low_freq"],
537
- "high_freq": row["high_freq"],
538
- "ebird_code": row["ebird_code"] if split != "test_multilabel" else None,
539
- "ebird_code_multilabel": row.get("ebird_code_multilabel", None) if "no_call" not in row.get("ebird_code_multilabel", []) else [],
540
- "ebird_code_secondary": row.get("ebird_code_secondary", None),
541
- "call_type": row["call_type"],
542
- "sex": row["sex"],
543
- "lat": row["lat"],
544
- "long": row["long"],
545
- "length": row.get("length", None),
546
- "microphone": row["microphone"],
547
- "license": row.get("license", None),
548
- "source": row["source"],
549
- "local_time": row["local_time"],
550
- "detected_events": row.get("detected_events", None),
551
- "event_cluster": row.get("event_cluster", None),
552
- "peaks": row.get("peaks", None),
553
- "quality": row.get("quality", None),
554
- "recordist": row.get("recordist", None),
555
- "genus": row.get("genus", None),
556
- "species_group": row.get("species_group", None),
557
- "order": row.get("order", None),
558
- "genus_multilabel": row.get("genus_multilabel", None),
559
- "species_group_multilabel": row.get("species_group_multilabel", None),
560
- "order_multilabel": row.get("order_multilabel", None),
561
- }
562
-
563
- os.remove(audio_archive_iterator.args[0])
 
 
 
 
 
 
 
 
 
 
 
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
14
  """BirdSet: The General Avian Monitoring Evaluation Benchmark"""
15
 
16
  import os
17
  import datasets
18
  import pandas as pd
19
+ from tqdm.auto import tqdm
20
+ import tarfile
21
+
22
+ from . import classes
23
 
24
  from .classes import BIRD_NAMES_NIPS4BPLUS, BIRD_NAMES_AMAZON_BASIN, BIRD_NAMES_HAWAII, \
25
  BIRD_NAMES_HIGH_SIERRAS, BIRD_NAMES_SIERRA_NEVADA, BIRD_NAMES_POWDERMILL_NATURE, BIRD_NAMES_SAPSUCKER, \
26
  BIRD_NAMES_COLUMBIA_COSTA_RICA, BIRD_NAMES_XENOCANTO, BIRD_NAMES_XENOCANTO_M
27
 
28
+ from .descriptions import _NIPS4BPLUS_CITATION, _NIPS4BPLUS_DESCRIPTION, \
 
 
29
  _HIGH_SIERRAS_DESCRIPTION, _HIGH_SIERRAS_CITATION, _SIERRA_NEVADA_DESCRIPTION, _SIERRA_NEVADA_CITATION, \
30
  _POWDERMILL_NATURE_DESCRIPTION, _POWDERMILL_NATURE_CITATION, _AMAZON_BASIN_DESCRIPTION, _AMAZON_BASIN_CITATION, \
31
  _SAPSUCKER_WOODS_DESCRIPTION, _SAPSUCKER_WOODS_CITATION, _COLUMBIA_COSTA_RICA_CITATION, \
32
  _COLUMBIA_COSTA_RICA_DESCRIPTION, _HAWAIIAN_ISLANDS_CITATION, _HAWAIIAN_ISLANDS_DESCRIPTION
33
 
34
+
35
  #############################################
36
  _BIRDSET_CITATION = """\
37
  @article{birdset,
 
51
  base_url = "https://huggingface.co/datasets/DBD-research-group/BirdSet/resolve/data"
52
 
53
 
54
+ def _extract_all_to_same_folder(tar_path, output_dir):
55
+ """custom extraction for tar.gz files, that extracts all files to output_dir without subfolders"""
56
+ # check if data already exists
57
+ if not os.path.isfile(output_dir) and os.path.isdir(output_dir) and os.listdir(output_dir):
58
+ return output_dir
59
+ os.makedirs(output_dir, exist_ok=True)
60
+
61
+ with tarfile.open(tar_path, "r:gz") as tar:
62
+ for member in tar.getmembers():
63
+ if member.isfile():
64
+ member.name = os.path.basename(member.name)
65
+ tar.extract(member, path=output_dir)
66
+
67
+ return output_dir
68
+
69
+
70
+ def _extract_and_delete(dl_dir: dict) -> dict:
71
+ """extracts downloaded files and deletes the archive file immediately, with progress bar.
72
+ only the processed archive and its content are saved at the same time."""
73
+ audio_paths = {name: [] for name, data in dl_dir.items() if isinstance(data, list)}
74
+ for name, data in dl_dir.items():
75
+ if not isinstance(data, list):
76
+ continue
77
+
78
+ # extract and immediately delete archives
79
+ for path in tqdm(data, f"Extracting {name} split"):
80
+ head, tail = os.path.split(path)
81
+ output_dir = os.path.join(head, "extracted", tail)
82
+ #audio_path = dl_manager.extract(path) # if all archive files are without subfolders this works just fine
83
+ audio_path = _extract_all_to_same_folder(path, output_dir)
84
+ os.remove(path)
85
+ os.remove(f"{path}.lock")
86
+ os.remove(f"{path}.json")
87
+ audio_paths[name].append(audio_path)
88
+
89
+ return audio_paths
90
+
91
+
92
  class BirdSetConfig(datasets.BuilderConfig):
93
  def __init__(
94
  self,
 
414
 
415
  def _split_generators(self, dl_manager):
416
  ds_name = self.config.name
417
+ # settings for how much archives (tar.gz) files are uploaded for a specific dataset
418
  train_files = {"PER": 11,
419
  "NES": 13,
420
  "UHH": 5,
 
435
  "SSW": 36,
436
  "SNE": 5}
437
 
438
+ test_5s_files = {"PER": 1,
439
  "NES": 1,
440
  "UHH": 1,
441
  "HSN": 1,
 
444
  "SSW": 4,
445
  "SNE": 1}
446
 
447
+ # different configs, determine what needs to be downloaded
448
  if self.config.name.endswith("_xc"):
449
  ds_name = ds_name[:-3]
450
  dl_dir = dl_manager.download({
451
  "train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
452
+ "meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
453
  })
454
 
455
  elif self.config.name.endswith("_scape"):
456
  ds_name = ds_name[:-6]
457
  dl_dir = dl_manager.download({
458
  "test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
459
+ "test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1, test_5s_files[ds_name] + 1)],
460
+ "meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
461
+ "meta_test_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
462
  })
463
 
464
  # use POW for XCM/XCL validation
465
  elif self.config.name.startswith("XC"):
466
  dl_dir = dl_manager.download({
467
  "train": [os.path.join(self.config.data_dir, f"{ds_name}_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
468
+ "valid": [os.path.join(self.config.data_dir[:-3] + "POW", f"POW_test5s_shard_{n:04d}.tar.gz") for n in range(1, test_5s_files["POW"] + 1)],
469
+ "meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata.parquet"),
470
+ "meta_valid": os.path.join(self.config.data_dir[:-3] + "POW", f"POW_metadata_test_5s.parquet"),
471
  })
472
 
473
+ else:
474
  dl_dir = dl_manager.download({
475
  "train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
476
  "test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
477
+ "test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1, test_5s_files[ds_name] + 1)],
478
  "meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
479
  "meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
480
  "meta_test_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
481
  })
482
 
483
+ # custom extraction that deletes archives right after extraction
484
+ audio_paths = _extract_and_delete(dl_dir) if not dl_manager.is_streaming else None
485
+
486
+ # construct split generators
487
+ # assumes every key in dl_dir of NAME also has meta_NAME
488
+ names = [name for name in dl_dir.keys() if not name.startswith("meta_")]
489
+ is_streaming = dl_manager.is_streaming
490
+
491
+ return [datasets.SplitGenerator(
492
+ name=name,
493
+ gen_kwargs={
494
+ "audio_archive_iterators": (dl_manager.iter_archive(archive_path) for archive_path in dl_dir[name]) if is_streaming else () ,
495
+ "audio_extracted_paths": audio_paths[name] if not is_streaming else (),
496
+ "meta_path": dl_dir[f"meta_{name}"],
497
+ "split": name
498
+ }
499
+ ) for name in names]
500
+
501
+
502
+ def _generate_examples(self, audio_archive_iterators, audio_extracted_paths, meta_path, split):
503
+ metadata = pd.read_parquet(meta_path)
504
+ if metadata.index.name != "filepath": # TODO could be removed by changing train metadata files to have filepath as index
505
+ metadata.index = metadata["filepath"].str.split("/").apply(lambda x: x[-1]) # TODO work around for update_1 inconsistent filepath
 
 
506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
  idx = 0
508
+ # in case of streaming
509
+ for audio_archive_iterator in audio_archive_iterators:
510
  for audio_path_in_archive, audio_file in audio_archive_iterator:
511
+ file_name = os.path.split(audio_path_in_archive)[-1]
512
+ rows = metadata.loc[[file_name]]
513
+ audio = audio_file.read()
 
514
  for _, row in rows.iterrows():
515
+ yield idx, self._metadata_from_row(row, split, audio_path=file_name, audio=audio)
516
+ idx += 1
517
+
518
+ # in case of not streaming
519
+ for audio_extracted_path in audio_extracted_paths:
520
+ audio_files = os.listdir(audio_extracted_path)
521
+ current_metadata = metadata.loc[audio_files]
522
+ for audio_file, row in current_metadata.iterrows():
523
+ audio_path = os.path.join(audio_extracted_path, audio_file)
524
+ yield idx, self._metadata_from_row(row, split, audio_path=audio_path)
525
+ idx += 1
526
+
527
+
528
+ @staticmethod
529
+ def _metadata_from_row(row, split: str, audio_path=None, audio=None) -> dict:
530
+ return {"audio": audio_path if not audio else {"path": None, "bytes": audio},
531
+ "filepath": audio_path,
532
+ "start_time": row["start_time"],
533
+ "end_time": row["end_time"],
534
+ "low_freq": row["low_freq"],
535
+ "high_freq": row["high_freq"],
536
+ "ebird_code": row["ebird_code"] if split != "test_5s" else None,
537
+ "ebird_code_multilabel": row.get("ebird_code_multilabel", None),
538
+ "ebird_code_secondary": row.get("ebird_code_secondary", None),
539
+ "call_type": row["call_type"],
540
+ "sex": row["sex"],
541
+ "lat": row["lat"],
542
+ "long": row["long"],
543
+ "length": row.get("length", None),
544
+ "microphone": row["microphone"],
545
+ "license": row.get("license", None),
546
+ "source": row["source"],
547
+ "local_time": row["local_time"],
548
+ "detected_events": row.get("detected_events", None),
549
+ "event_cluster": row.get("event_cluster", None),
550
+ "peaks": row.get("peaks", None),
551
+ "quality": row.get("quality", None),
552
+ "recordist": row.get("recordist", None),
553
+ "genus": row.get("genus", None) if split != "test_5s" else None,
554
+ "species_group": row.get("species_group", None) if split != "test_5s" else None,
555
+ "order": row.get("order", None) if split != "test_5s" else None,
556
+ "genus_multilabel": row.get("genus_multilabel", [row.get("genus")]),
557
+ "species_group_multilabel": row.get("species_group_multilabel", [row.get("species_group")]),
558
+ "order_multilabel": row.get("order_multilabel", [row.get("order")]),
559
+ }