metadata
dataset_info:
features:
- name: image
dtype: image
- name: objects
struct:
- name: bbox
sequence:
sequence: float64
- name: segmentation
sequence:
sequence:
sequence: float64
- name: categories
sequence: int64
splits:
- name: train
num_bytes: 17598458856.47
num_examples: 117266
- name: validation
num_bytes: 795110726.04
num_examples: 4952
download_size: 20170024873
dataset_size: 18393569582.510002
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
task_categories:
- object-detection
MS-COCO2017
Use the dataset
from datasets import load_dataset
ds = load_dataset("ariG23498/coco2017", streaming=True, split="validation")
sample = next(iter(ds))
from PIL import Image, ImageDraw
def draw_bboxes_on_image(
image: Image.Image,
objects: dict,
category_names: dict = None,
box_color: str = "red",
text_color: str = "white"
):
draw = ImageDraw.Draw(image)
bboxes = objects.get("bbox", [])
categories = objects.get("categories", [])
for i, bbox in enumerate(bboxes):
x, y, width, height = bbox
# PIL expects (x_min, y_min, x_max, y_max) for rectangle
x_min, y_min, x_max, y_max = x, y, x + width, y + height
# Draw the rectangle
draw.rectangle([x_min, y_min, x_max, y_max], outline=box_color, width=2)
# Get category label
category_id = categories[i]
label = str(category_id)
if category_names and category_id in category_names:
label = category_names[category_id]
# Draw the category label
text_bbox = draw.textbbox((x_min, y_min), label) # Use textbbox to get text size
text_width = text_bbox[2] - text_bbox[0]
text_height = text_bbox[3] - text_bbox[1]
# Draw a filled rectangle behind the text for better readability
draw.rectangle([x_min, y_min - text_height - 5, x_min + text_width + 5, y_min], fill=box_color)
draw.text((x_min + 2, y_min - text_height - 2), label, fill=text_color)
return image
draw_bboxes_on_image(
image=sample["image"],
objects=sample["objects"],
)
Get the categories
import json
with open("/content/annotations/instances_train2017.json") as f:
instances = json.load(f)
instances["categories"]
Build the dataset and upload to Hub
!pip install -U -q datasets
# Download and unzip COCO 2017
!wget -q http://images.cocodataset.org/zips/train2017.zip
!wget -q http://images.cocodataset.org/zips/val2017.zip
!wget -q http://images.cocodataset.org/annotations/annotations_trainval2017.zip
!unzip -q train2017.zip
!unzip -q val2017.zip
!unzip -q annotations_trainval2017.zip
import json
import shutil
from pathlib import Path
from tqdm import tqdm
from datasets import load_dataset
base_dir = Path("/content")
splits = {
"train": {
"image_dir": base_dir / "train2017",
"annotation_file": base_dir / "annotations" / "instances_train2017.json",
},
"val": {
"image_dir": base_dir / "val2017",
"annotation_file": base_dir / "annotations" / "instances_val2017.json",
}
}
output_dir = base_dir / "coco_imagefolder"
output_dir.mkdir(parents=True, exist_ok=True)
def normalize_segmentation(segmentation):
if isinstance(segmentation, list):
if all(isinstance(poly, list) for poly in segmentation):
return segmentation # already a list of polygons
elif all(isinstance(pt, (int, float)) for pt in segmentation):
return [segmentation] # wrap single polygon
return [] # skip RLE or malformed segmentations
def convert_coco_to_jsonl(image_dir, annotation_path, output_metadata_path):
with open(annotation_path) as f:
data = json.load(f)
id_to_filename = {img['id']: img['file_name'] for img in data['images']}
annotations_by_image = {}
for ann in data['annotations']:
img_id = ann['image_id']
bbox = ann['bbox']
category = ann['category_id']
segmentation = normalize_segmentation(ann['segmentation'])
if not segmentation:
continue # skip if malformed or RLE
if img_id not in annotations_by_image:
annotations_by_image[img_id] = {
"file_name": id_to_filename[img_id],
"objects": {
"bbox": [],
"segmentation": [],
"categories": [],
}
}
annotations_by_image[img_id]["objects"]["bbox"].append(bbox)
annotations_by_image[img_id]["objects"]["segmentation"].append(segmentation)
annotations_by_image[img_id]["objects"]["categories"].append(category)
with open(output_metadata_path, "w") as f:
for metadata in annotations_by_image.values():
json.dump(metadata, f)
f.write("\n")
# Build imagefolder structure
for split, info in splits.items():
split_dir = output_dir / split
split_dir.mkdir(parents=True, exist_ok=True)
# Copy images
for img_path in tqdm(info["image_dir"].glob("*.jpg"), desc=f"Copying {split} images"):
shutil.copy(img_path, split_dir / img_path.name)
# Write JSONL metadata
metadata_path = split_dir / "metadata.jsonl"
convert_coco_to_jsonl(split_dir, info["annotation_file"], metadata_path)
# Load and push
dataset = load_dataset("imagefolder", data_dir=str(output_dir))
dataset.push_to_hub("ariG23498/coco2017")