Initialize (#1)
Browse files* add files
* update files
* add pytest-xdist for parallel testing
* add settings for CI
* update
* update README
* update README.md
* update
- .github/workflows/ci.yaml +49 -0
- .github/workflows/push_to_hub.yaml +27 -0
- .gitignore +176 -0
- MSCOCO.py +969 -0
- README.md +3 -0
- poetry.lock +0 -0
- pyproject.toml +22 -0
- tests/MSCOCO_test.py +48 -0
- tests/__init__.py +0 -0
.github/workflows/ci.yaml
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: CI
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [ main ]
|
| 6 |
+
pull_request:
|
| 7 |
+
branches: [ main ]
|
| 8 |
+
paths-ignore:
|
| 9 |
+
- 'README.md'
|
| 10 |
+
|
| 11 |
+
jobs:
|
| 12 |
+
test:
|
| 13 |
+
runs-on: ubuntu-latest
|
| 14 |
+
strategy:
|
| 15 |
+
matrix:
|
| 16 |
+
python-version: [ '3.9', '3.10' ]
|
| 17 |
+
|
| 18 |
+
steps:
|
| 19 |
+
- uses: actions/checkout@v3
|
| 20 |
+
|
| 21 |
+
- name: Set up Python ${{ matrix.python-version }}
|
| 22 |
+
uses: actions/setup-python@v4
|
| 23 |
+
with:
|
| 24 |
+
python-version: ${{ matrix.python-version }}
|
| 25 |
+
|
| 26 |
+
- name: Install dependencies
|
| 27 |
+
run: |
|
| 28 |
+
pip install -U pip setuptools wheel poetry
|
| 29 |
+
poetry install
|
| 30 |
+
|
| 31 |
+
- name: Format
|
| 32 |
+
run: |
|
| 33 |
+
poetry run black --check .
|
| 34 |
+
|
| 35 |
+
- name: Lint
|
| 36 |
+
run: |
|
| 37 |
+
poetry run ruff .
|
| 38 |
+
|
| 39 |
+
- name: Type check
|
| 40 |
+
run: |
|
| 41 |
+
poetry run mypy . \
|
| 42 |
+
--ignore-missing-imports \
|
| 43 |
+
--no-strict-optional \
|
| 44 |
+
--no-site-packages \
|
| 45 |
+
--cache-dir=/dev/null
|
| 46 |
+
|
| 47 |
+
# - name: Run tests
|
| 48 |
+
# run: |
|
| 49 |
+
# poetry run pytest --color=yes -rf
|
.github/workflows/push_to_hub.yaml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Sync to Hugging Face Hub
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
workflow_run:
|
| 5 |
+
workflows:
|
| 6 |
+
- CI
|
| 7 |
+
branches:
|
| 8 |
+
- main
|
| 9 |
+
types:
|
| 10 |
+
- completed
|
| 11 |
+
|
| 12 |
+
jobs:
|
| 13 |
+
push_to_hub:
|
| 14 |
+
runs-on: ubuntu-latest
|
| 15 |
+
|
| 16 |
+
steps:
|
| 17 |
+
- name: Checkout repository
|
| 18 |
+
uses: actions/checkout@v3
|
| 19 |
+
|
| 20 |
+
- name: Push to Huggingface hub
|
| 21 |
+
env:
|
| 22 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 23 |
+
HF_USERNAME: ${{ secrets.HF_USERNAME }}
|
| 24 |
+
run: |
|
| 25 |
+
git fetch --unshallow
|
| 26 |
+
# git lfs fetch --all origin main
|
| 27 |
+
git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/MSCOCO main
|
.gitignore
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python
|
| 2 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
| 3 |
+
|
| 4 |
+
### Python ###
|
| 5 |
+
# Byte-compiled / optimized / DLL files
|
| 6 |
+
__pycache__/
|
| 7 |
+
*.py[cod]
|
| 8 |
+
*$py.class
|
| 9 |
+
|
| 10 |
+
# C extensions
|
| 11 |
+
*.so
|
| 12 |
+
|
| 13 |
+
# Distribution / packaging
|
| 14 |
+
.Python
|
| 15 |
+
build/
|
| 16 |
+
develop-eggs/
|
| 17 |
+
dist/
|
| 18 |
+
downloads/
|
| 19 |
+
eggs/
|
| 20 |
+
.eggs/
|
| 21 |
+
lib/
|
| 22 |
+
lib64/
|
| 23 |
+
parts/
|
| 24 |
+
sdist/
|
| 25 |
+
var/
|
| 26 |
+
wheels/
|
| 27 |
+
share/python-wheels/
|
| 28 |
+
*.egg-info/
|
| 29 |
+
.installed.cfg
|
| 30 |
+
*.egg
|
| 31 |
+
MANIFEST
|
| 32 |
+
|
| 33 |
+
# PyInstaller
|
| 34 |
+
# Usually these files are written by a python script from a template
|
| 35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 36 |
+
*.manifest
|
| 37 |
+
*.spec
|
| 38 |
+
|
| 39 |
+
# Installer logs
|
| 40 |
+
pip-log.txt
|
| 41 |
+
pip-delete-this-directory.txt
|
| 42 |
+
|
| 43 |
+
# Unit test / coverage reports
|
| 44 |
+
htmlcov/
|
| 45 |
+
.tox/
|
| 46 |
+
.nox/
|
| 47 |
+
.coverage
|
| 48 |
+
.coverage.*
|
| 49 |
+
.cache
|
| 50 |
+
nosetests.xml
|
| 51 |
+
coverage.xml
|
| 52 |
+
*.cover
|
| 53 |
+
*.py,cover
|
| 54 |
+
.hypothesis/
|
| 55 |
+
.pytest_cache/
|
| 56 |
+
cover/
|
| 57 |
+
|
| 58 |
+
# Translations
|
| 59 |
+
*.mo
|
| 60 |
+
*.pot
|
| 61 |
+
|
| 62 |
+
# Django stuff:
|
| 63 |
+
*.log
|
| 64 |
+
local_settings.py
|
| 65 |
+
db.sqlite3
|
| 66 |
+
db.sqlite3-journal
|
| 67 |
+
|
| 68 |
+
# Flask stuff:
|
| 69 |
+
instance/
|
| 70 |
+
.webassets-cache
|
| 71 |
+
|
| 72 |
+
# Scrapy stuff:
|
| 73 |
+
.scrapy
|
| 74 |
+
|
| 75 |
+
# Sphinx documentation
|
| 76 |
+
docs/_build/
|
| 77 |
+
|
| 78 |
+
# PyBuilder
|
| 79 |
+
.pybuilder/
|
| 80 |
+
target/
|
| 81 |
+
|
| 82 |
+
# Jupyter Notebook
|
| 83 |
+
.ipynb_checkpoints
|
| 84 |
+
|
| 85 |
+
# IPython
|
| 86 |
+
profile_default/
|
| 87 |
+
ipython_config.py
|
| 88 |
+
|
| 89 |
+
# pyenv
|
| 90 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 91 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 92 |
+
.python-version
|
| 93 |
+
|
| 94 |
+
# pipenv
|
| 95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 98 |
+
# install all needed dependencies.
|
| 99 |
+
#Pipfile.lock
|
| 100 |
+
|
| 101 |
+
# poetry
|
| 102 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 103 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 104 |
+
# commonly ignored for libraries.
|
| 105 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 106 |
+
#poetry.lock
|
| 107 |
+
|
| 108 |
+
# pdm
|
| 109 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 110 |
+
#pdm.lock
|
| 111 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 112 |
+
# in version control.
|
| 113 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 114 |
+
.pdm.toml
|
| 115 |
+
|
| 116 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 117 |
+
__pypackages__/
|
| 118 |
+
|
| 119 |
+
# Celery stuff
|
| 120 |
+
celerybeat-schedule
|
| 121 |
+
celerybeat.pid
|
| 122 |
+
|
| 123 |
+
# SageMath parsed files
|
| 124 |
+
*.sage.py
|
| 125 |
+
|
| 126 |
+
# Environments
|
| 127 |
+
.env
|
| 128 |
+
.venv
|
| 129 |
+
env/
|
| 130 |
+
venv/
|
| 131 |
+
ENV/
|
| 132 |
+
env.bak/
|
| 133 |
+
venv.bak/
|
| 134 |
+
|
| 135 |
+
# Spyder project settings
|
| 136 |
+
.spyderproject
|
| 137 |
+
.spyproject
|
| 138 |
+
|
| 139 |
+
# Rope project settings
|
| 140 |
+
.ropeproject
|
| 141 |
+
|
| 142 |
+
# mkdocs documentation
|
| 143 |
+
/site
|
| 144 |
+
|
| 145 |
+
# mypy
|
| 146 |
+
.mypy_cache/
|
| 147 |
+
.dmypy.json
|
| 148 |
+
dmypy.json
|
| 149 |
+
|
| 150 |
+
# Pyre type checker
|
| 151 |
+
.pyre/
|
| 152 |
+
|
| 153 |
+
# pytype static type analyzer
|
| 154 |
+
.pytype/
|
| 155 |
+
|
| 156 |
+
# Cython debug symbols
|
| 157 |
+
cython_debug/
|
| 158 |
+
|
| 159 |
+
# PyCharm
|
| 160 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 161 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 162 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 163 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 164 |
+
#.idea/
|
| 165 |
+
|
| 166 |
+
### Python Patch ###
|
| 167 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
| 168 |
+
poetry.toml
|
| 169 |
+
|
| 170 |
+
# ruff
|
| 171 |
+
.ruff_cache/
|
| 172 |
+
|
| 173 |
+
# LSP config files
|
| 174 |
+
pyrightconfig.json
|
| 175 |
+
|
| 176 |
+
# End of https://www.toptal.com/developers/gitignore/api/python
|
MSCOCO.py
ADDED
|
@@ -0,0 +1,969 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from dataclasses import asdict, dataclass
|
| 6 |
+
from typing import (
|
| 7 |
+
Any,
|
| 8 |
+
Dict,
|
| 9 |
+
Final,
|
| 10 |
+
Iterator,
|
| 11 |
+
List,
|
| 12 |
+
Literal,
|
| 13 |
+
Optional,
|
| 14 |
+
Sequence,
|
| 15 |
+
Tuple,
|
| 16 |
+
TypedDict,
|
| 17 |
+
Union,
|
| 18 |
+
get_args,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
import datasets as ds
|
| 22 |
+
import numpy as np
|
| 23 |
+
from datasets.data_files import DataFilesDict
|
| 24 |
+
from PIL import Image
|
| 25 |
+
from PIL.Image import Image as PilImage
|
| 26 |
+
from pycocotools import mask as cocomask
|
| 27 |
+
from tqdm.auto import tqdm
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
JsonDict = Dict[str, Any]
|
| 32 |
+
ImageId = int
|
| 33 |
+
AnnotationId = int
|
| 34 |
+
LicenseId = int
|
| 35 |
+
CategoryId = int
|
| 36 |
+
Bbox = Tuple[float, float, float, float]
|
| 37 |
+
|
| 38 |
+
MscocoSplits = Literal["train", "val", "test"]
|
| 39 |
+
|
| 40 |
+
KEYPOINT_STATE: Final[List[str]] = ["unknown", "invisible", "visible"]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
_CITATION = """
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
_DESCRIPTION = """
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
_HOMEPAGE = """
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
_LICENSE = """
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
_URLS = {
|
| 56 |
+
"2014": {
|
| 57 |
+
"images": {
|
| 58 |
+
"train": "http://images.cocodataset.org/zips/train2014.zip",
|
| 59 |
+
"validation": "http://images.cocodataset.org/zips/val2014.zip",
|
| 60 |
+
"test": "http://images.cocodataset.org/zips/test2014.zip",
|
| 61 |
+
},
|
| 62 |
+
"annotations": {
|
| 63 |
+
"train_validation": "http://images.cocodataset.org/annotations/annotations_trainval2014.zip",
|
| 64 |
+
"test_image_info": "http://images.cocodataset.org/annotations/image_info_test2014.zip",
|
| 65 |
+
},
|
| 66 |
+
},
|
| 67 |
+
"2015": {
|
| 68 |
+
"images": {
|
| 69 |
+
"test": "http://images.cocodataset.org/zips/test2015.zip",
|
| 70 |
+
},
|
| 71 |
+
"annotations": {
|
| 72 |
+
"test_image_info": "http://images.cocodataset.org/annotations/image_info_test2015.zip",
|
| 73 |
+
},
|
| 74 |
+
},
|
| 75 |
+
"2017": {
|
| 76 |
+
"images": {
|
| 77 |
+
"train": "http://images.cocodataset.org/zips/train2017.zip",
|
| 78 |
+
"validation": "http://images.cocodataset.org/zips/val2017.zip",
|
| 79 |
+
"test": "http://images.cocodataset.org/zips/test2017.zip",
|
| 80 |
+
"unlabeled": "http://images.cocodataset.org/zips/unlabeled2017.zip",
|
| 81 |
+
},
|
| 82 |
+
"annotations": {
|
| 83 |
+
"train_validation": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip",
|
| 84 |
+
"stuff_train_validation": "http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip",
|
| 85 |
+
"panoptic_train_validation": "http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip",
|
| 86 |
+
"test_image_info": "http://images.cocodataset.org/annotations/image_info_test2017.zip",
|
| 87 |
+
"unlabeled": "http://images.cocodataset.org/annotations/image_info_unlabeled2017.zip",
|
| 88 |
+
},
|
| 89 |
+
},
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
@dataclass
|
| 94 |
+
class AnnotationInfo(object):
|
| 95 |
+
description: str
|
| 96 |
+
url: str
|
| 97 |
+
version: str
|
| 98 |
+
year: str
|
| 99 |
+
contributor: str
|
| 100 |
+
date_created: str
|
| 101 |
+
|
| 102 |
+
@classmethod
|
| 103 |
+
def from_dict(cls, json_dict: JsonDict) -> "AnnotationInfo":
|
| 104 |
+
return cls(**json_dict)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@dataclass
|
| 108 |
+
class LicenseData(object):
|
| 109 |
+
url: str
|
| 110 |
+
license_id: LicenseId
|
| 111 |
+
name: str
|
| 112 |
+
|
| 113 |
+
@classmethod
|
| 114 |
+
def from_dict(cls, json_dict: JsonDict) -> "LicenseData":
|
| 115 |
+
return cls(
|
| 116 |
+
license_id=json_dict["id"],
|
| 117 |
+
url=json_dict["url"],
|
| 118 |
+
name=json_dict["name"],
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
@dataclass
|
| 123 |
+
class ImageData(object):
|
| 124 |
+
image_id: ImageId
|
| 125 |
+
license_id: LicenseId
|
| 126 |
+
file_name: str
|
| 127 |
+
coco_url: str
|
| 128 |
+
height: int
|
| 129 |
+
width: int
|
| 130 |
+
date_captured: str
|
| 131 |
+
flickr_url: str
|
| 132 |
+
|
| 133 |
+
@classmethod
|
| 134 |
+
def from_dict(cls, json_dict: JsonDict) -> "ImageData":
|
| 135 |
+
return cls(
|
| 136 |
+
image_id=json_dict["id"],
|
| 137 |
+
license_id=json_dict["license"],
|
| 138 |
+
file_name=json_dict["file_name"],
|
| 139 |
+
coco_url=json_dict["coco_url"],
|
| 140 |
+
height=json_dict["height"],
|
| 141 |
+
width=json_dict["width"],
|
| 142 |
+
date_captured=json_dict["date_captured"],
|
| 143 |
+
flickr_url=json_dict["flickr_url"],
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
@property
|
| 147 |
+
def shape(self) -> Tuple[int, int]:
|
| 148 |
+
return (self.height, self.width)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@dataclass
|
| 152 |
+
class CategoryData(object):
|
| 153 |
+
category_id: int
|
| 154 |
+
name: str
|
| 155 |
+
supercategory: str
|
| 156 |
+
|
| 157 |
+
@classmethod
|
| 158 |
+
def from_dict(cls, json_dict: JsonDict) -> "CategoryData":
|
| 159 |
+
return cls(
|
| 160 |
+
category_id=json_dict["id"],
|
| 161 |
+
name=json_dict["name"],
|
| 162 |
+
supercategory=json_dict["supercategory"],
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@dataclass
|
| 167 |
+
class AnnotationData(object):
|
| 168 |
+
annotation_id: AnnotationId
|
| 169 |
+
image_id: ImageId
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@dataclass
|
| 173 |
+
class CaptionsAnnotationData(AnnotationData):
|
| 174 |
+
caption: str
|
| 175 |
+
|
| 176 |
+
@classmethod
|
| 177 |
+
def from_dict(cls, json_dict: JsonDict) -> "CaptionsAnnotationData":
|
| 178 |
+
return cls(
|
| 179 |
+
annotation_id=json_dict["id"],
|
| 180 |
+
image_id=json_dict["image_id"],
|
| 181 |
+
caption=json_dict["caption"],
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class UncompressedRLE(TypedDict):
|
| 186 |
+
counts: List[int]
|
| 187 |
+
size: Tuple[int, int]
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class CompressedRLE(TypedDict):
|
| 191 |
+
counts: bytes
|
| 192 |
+
size: Tuple[int, int]
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
@dataclass
|
| 196 |
+
class InstancesAnnotationData(AnnotationData):
|
| 197 |
+
segmentation: Union[np.ndarray, CompressedRLE]
|
| 198 |
+
area: float
|
| 199 |
+
iscrowd: bool
|
| 200 |
+
bbox: Tuple[float, float, float, float]
|
| 201 |
+
category_id: int
|
| 202 |
+
|
| 203 |
+
@classmethod
|
| 204 |
+
def compress_rle(
|
| 205 |
+
cls,
|
| 206 |
+
segmentation: Union[List[List[float]], UncompressedRLE],
|
| 207 |
+
iscrowd: bool,
|
| 208 |
+
height: int,
|
| 209 |
+
width: int,
|
| 210 |
+
) -> CompressedRLE:
|
| 211 |
+
if iscrowd:
|
| 212 |
+
rle = cocomask.frPyObjects(segmentation, h=height, w=width)
|
| 213 |
+
else:
|
| 214 |
+
rles = cocomask.frPyObjects(segmentation, h=height, w=width)
|
| 215 |
+
rle = cocomask.merge(rles)
|
| 216 |
+
|
| 217 |
+
return rle # type: ignore
|
| 218 |
+
|
| 219 |
+
@classmethod
|
| 220 |
+
def rle_segmentation_to_binary_mask(
|
| 221 |
+
cls, segmentation, iscrowd: bool, height: int, width: int
|
| 222 |
+
) -> np.ndarray:
|
| 223 |
+
rle = cls.compress_rle(
|
| 224 |
+
segmentation=segmentation, iscrowd=iscrowd, height=height, width=width
|
| 225 |
+
)
|
| 226 |
+
return cocomask.decode(rle) # type: ignore
|
| 227 |
+
|
| 228 |
+
@classmethod
|
| 229 |
+
def rle_segmentation_to_mask(
|
| 230 |
+
cls,
|
| 231 |
+
segmentation: Union[List[List[float]], UncompressedRLE],
|
| 232 |
+
iscrowd: bool,
|
| 233 |
+
height: int,
|
| 234 |
+
width: int,
|
| 235 |
+
) -> np.ndarray:
|
| 236 |
+
binary_mask = cls.rle_segmentation_to_binary_mask(
|
| 237 |
+
segmentation=segmentation, iscrowd=iscrowd, height=height, width=width
|
| 238 |
+
)
|
| 239 |
+
return binary_mask * 255
|
| 240 |
+
|
| 241 |
+
@classmethod
|
| 242 |
+
def from_dict(
|
| 243 |
+
cls,
|
| 244 |
+
json_dict: JsonDict,
|
| 245 |
+
images: Dict[ImageId, ImageData],
|
| 246 |
+
decode_rle: bool,
|
| 247 |
+
) -> "InstancesAnnotationData":
|
| 248 |
+
segmentation = json_dict["segmentation"]
|
| 249 |
+
image_id = json_dict["image_id"]
|
| 250 |
+
image_data = images[image_id]
|
| 251 |
+
iscrowd = bool(json_dict["iscrowd"])
|
| 252 |
+
|
| 253 |
+
if decode_rle:
|
| 254 |
+
segmentation_mask = cls.rle_segmentation_to_mask(
|
| 255 |
+
segmentation=segmentation,
|
| 256 |
+
iscrowd=iscrowd,
|
| 257 |
+
height=image_data.height,
|
| 258 |
+
width=image_data.width,
|
| 259 |
+
)
|
| 260 |
+
assert segmentation_mask.shape == image_data.shape
|
| 261 |
+
else:
|
| 262 |
+
segmentation_mask = cls.compress_rle(
|
| 263 |
+
segmentation=segmentation,
|
| 264 |
+
iscrowd=iscrowd,
|
| 265 |
+
height=image_data.height,
|
| 266 |
+
width=image_data.width,
|
| 267 |
+
)
|
| 268 |
+
return cls(
|
| 269 |
+
#
|
| 270 |
+
# for AnnotationData
|
| 271 |
+
#
|
| 272 |
+
annotation_id=json_dict["id"],
|
| 273 |
+
image_id=image_id,
|
| 274 |
+
#
|
| 275 |
+
# for InstancesAnnotationData
|
| 276 |
+
#
|
| 277 |
+
segmentation=segmentation_mask,
|
| 278 |
+
area=json_dict["area"],
|
| 279 |
+
iscrowd=iscrowd,
|
| 280 |
+
bbox=json_dict["bbox"],
|
| 281 |
+
category_id=json_dict["category_id"],
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
@dataclass
|
| 286 |
+
class PersonKeypoint(object):
|
| 287 |
+
x: int
|
| 288 |
+
y: int
|
| 289 |
+
v: int
|
| 290 |
+
state: str
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
@dataclass
|
| 294 |
+
class PersonKeypointsAnnotationData(InstancesAnnotationData):
|
| 295 |
+
num_keypoints: int
|
| 296 |
+
keypoints: List[PersonKeypoint]
|
| 297 |
+
|
| 298 |
+
@classmethod
|
| 299 |
+
def v_keypoint_to_state(cls, keypoint_v: int) -> str:
|
| 300 |
+
return KEYPOINT_STATE[keypoint_v]
|
| 301 |
+
|
| 302 |
+
@classmethod
|
| 303 |
+
def get_person_keypoints(
|
| 304 |
+
cls, flatten_keypoints: List[int], num_keypoints: int
|
| 305 |
+
) -> List[PersonKeypoint]:
|
| 306 |
+
keypoints_x = flatten_keypoints[0::3]
|
| 307 |
+
keypoints_y = flatten_keypoints[1::3]
|
| 308 |
+
keypoints_v = flatten_keypoints[2::3]
|
| 309 |
+
assert len(keypoints_x) == len(keypoints_y) == len(keypoints_v)
|
| 310 |
+
|
| 311 |
+
keypoints = [
|
| 312 |
+
PersonKeypoint(x=x, y=y, v=v, state=cls.v_keypoint_to_state(v))
|
| 313 |
+
for x, y, v in zip(keypoints_x, keypoints_y, keypoints_v)
|
| 314 |
+
]
|
| 315 |
+
assert len([kp for kp in keypoints if kp.state != "unknown"]) == num_keypoints
|
| 316 |
+
return keypoints
|
| 317 |
+
|
| 318 |
+
@classmethod
|
| 319 |
+
def from_dict(
|
| 320 |
+
cls,
|
| 321 |
+
json_dict: JsonDict,
|
| 322 |
+
images: Dict[ImageId, ImageData],
|
| 323 |
+
decode_rle: bool,
|
| 324 |
+
) -> "PersonKeypointsAnnotationData":
|
| 325 |
+
segmentation = json_dict["segmentation"]
|
| 326 |
+
image_id = json_dict["image_id"]
|
| 327 |
+
image_data = images[image_id]
|
| 328 |
+
iscrowd = bool(json_dict["iscrowd"])
|
| 329 |
+
|
| 330 |
+
if decode_rle:
|
| 331 |
+
segmentation_mask = cls.rle_segmentation_to_mask(
|
| 332 |
+
segmentation=segmentation,
|
| 333 |
+
iscrowd=iscrowd,
|
| 334 |
+
height=image_data.height,
|
| 335 |
+
width=image_data.width,
|
| 336 |
+
)
|
| 337 |
+
assert segmentation_mask.shape == image_data.shape
|
| 338 |
+
else:
|
| 339 |
+
segmentation_mask = cls.compress_rle(
|
| 340 |
+
segmentation=segmentation,
|
| 341 |
+
iscrowd=iscrowd,
|
| 342 |
+
height=image_data.height,
|
| 343 |
+
width=image_data.width,
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
flatten_keypoints = json_dict["keypoints"]
|
| 347 |
+
num_keypoints = json_dict["num_keypoints"]
|
| 348 |
+
keypoints = cls.get_person_keypoints(flatten_keypoints, num_keypoints)
|
| 349 |
+
|
| 350 |
+
return cls(
|
| 351 |
+
#
|
| 352 |
+
# for AnnotationData
|
| 353 |
+
#
|
| 354 |
+
annotation_id=json_dict["id"],
|
| 355 |
+
image_id=image_id,
|
| 356 |
+
#
|
| 357 |
+
# for InstancesAnnotationData
|
| 358 |
+
#
|
| 359 |
+
segmentation=segmentation_mask,
|
| 360 |
+
area=json_dict["area"],
|
| 361 |
+
iscrowd=iscrowd,
|
| 362 |
+
bbox=json_dict["bbox"],
|
| 363 |
+
category_id=json_dict["category_id"],
|
| 364 |
+
#
|
| 365 |
+
# PersonKeypointsAnnotationData
|
| 366 |
+
#
|
| 367 |
+
num_keypoints=num_keypoints,
|
| 368 |
+
keypoints=keypoints,
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class LicenseDict(TypedDict):
|
| 373 |
+
license_id: LicenseId
|
| 374 |
+
name: str
|
| 375 |
+
url: str
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
class BaseExample(TypedDict):
|
| 379 |
+
image_id: ImageId
|
| 380 |
+
image: PilImage
|
| 381 |
+
file_name: str
|
| 382 |
+
coco_url: str
|
| 383 |
+
height: int
|
| 384 |
+
width: int
|
| 385 |
+
date_captured: str
|
| 386 |
+
flickr_url: str
|
| 387 |
+
license_id: LicenseId
|
| 388 |
+
license: LicenseDict
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class CaptionAnnotationDict(TypedDict):
|
| 392 |
+
annotation_id: AnnotationId
|
| 393 |
+
caption: str
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
class CaptionExample(BaseExample):
|
| 397 |
+
annotations: List[CaptionAnnotationDict]
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def generate_captions_examples(
|
| 401 |
+
image_dir: str,
|
| 402 |
+
images: Dict[ImageId, ImageData],
|
| 403 |
+
annotations: Dict[ImageId, List[CaptionsAnnotationData]],
|
| 404 |
+
licenses: Dict[LicenseId, LicenseData],
|
| 405 |
+
) -> Iterator[Tuple[int, CaptionExample]]:
|
| 406 |
+
for idx, image_id in enumerate(images.keys()):
|
| 407 |
+
image_data = images[image_id]
|
| 408 |
+
image_anns = annotations[image_id]
|
| 409 |
+
|
| 410 |
+
assert len(image_anns) > 0
|
| 411 |
+
|
| 412 |
+
image = _load_image(
|
| 413 |
+
image_path=os.path.join(image_dir, image_data.file_name),
|
| 414 |
+
)
|
| 415 |
+
example = asdict(image_data)
|
| 416 |
+
example["image"] = image
|
| 417 |
+
example["license"] = asdict(licenses[image_data.license_id])
|
| 418 |
+
|
| 419 |
+
example["annotations"] = []
|
| 420 |
+
for ann in image_anns:
|
| 421 |
+
example["annotations"].append(asdict(ann))
|
| 422 |
+
|
| 423 |
+
yield idx, example # type: ignore
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
class CategoryDict(TypedDict):
|
| 427 |
+
category_id: CategoryId
|
| 428 |
+
name: str
|
| 429 |
+
supercategory: str
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
class InstanceAnnotationDict(TypedDict):
|
| 433 |
+
annotation_id: AnnotationId
|
| 434 |
+
area: float
|
| 435 |
+
bbox: Bbox
|
| 436 |
+
image_id: ImageId
|
| 437 |
+
category_id: CategoryId
|
| 438 |
+
category: CategoryDict
|
| 439 |
+
iscrowd: bool
|
| 440 |
+
segmentation: np.ndarray
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
class InstanceExample(BaseExample):
|
| 444 |
+
annotations: List[InstanceAnnotationDict]
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
def generate_instances_examples(
|
| 448 |
+
image_dir: str,
|
| 449 |
+
images: Dict[ImageId, ImageData],
|
| 450 |
+
annotations: Dict[ImageId, List[InstancesAnnotationData]],
|
| 451 |
+
licenses: Dict[LicenseId, LicenseData],
|
| 452 |
+
categories: Dict[CategoryId, CategoryData],
|
| 453 |
+
) -> Iterator[Tuple[int, InstanceExample]]:
|
| 454 |
+
for idx, image_id in enumerate(images.keys()):
|
| 455 |
+
image_data = images[image_id]
|
| 456 |
+
image_anns = annotations[image_id]
|
| 457 |
+
|
| 458 |
+
if len(image_anns) < 1:
|
| 459 |
+
logger.warning(f"No annotation found for image id: {image_id}.")
|
| 460 |
+
continue
|
| 461 |
+
|
| 462 |
+
image = _load_image(
|
| 463 |
+
image_path=os.path.join(image_dir, image_data.file_name),
|
| 464 |
+
)
|
| 465 |
+
example = asdict(image_data)
|
| 466 |
+
example["image"] = image
|
| 467 |
+
example["license"] = asdict(licenses[image_data.license_id])
|
| 468 |
+
|
| 469 |
+
example["annotations"] = []
|
| 470 |
+
for ann in image_anns:
|
| 471 |
+
ann_dict = asdict(ann)
|
| 472 |
+
category = categories[ann.category_id]
|
| 473 |
+
ann_dict["category"] = asdict(category)
|
| 474 |
+
example["annotations"].append(ann_dict)
|
| 475 |
+
|
| 476 |
+
yield idx, example # type: ignore
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
class KeypointDict(TypedDict):
|
| 480 |
+
x: int
|
| 481 |
+
y: int
|
| 482 |
+
v: int
|
| 483 |
+
state: str
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class PersonKeypointAnnotationDict(InstanceAnnotationDict):
|
| 487 |
+
num_keypoints: int
|
| 488 |
+
keypoints: List[KeypointDict]
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
class PersonKeypointExample(BaseExample):
|
| 492 |
+
annotations: List[PersonKeypointAnnotationDict]
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def generate_person_keypoints_examples(
|
| 496 |
+
image_dir: str,
|
| 497 |
+
images: Dict[ImageId, ImageData],
|
| 498 |
+
annotations: Dict[ImageId, List[PersonKeypointsAnnotationData]],
|
| 499 |
+
licenses: Dict[LicenseId, LicenseData],
|
| 500 |
+
categories: Dict[CategoryId, CategoryData],
|
| 501 |
+
) -> Iterator[Tuple[int, PersonKeypointExample]]:
|
| 502 |
+
for idx, image_id in enumerate(images.keys()):
|
| 503 |
+
image_data = images[image_id]
|
| 504 |
+
image_anns = annotations[image_id]
|
| 505 |
+
|
| 506 |
+
if len(image_anns) < 1:
|
| 507 |
+
# If there are no persons in the image,
|
| 508 |
+
# no keypoint annotations will be assigned.
|
| 509 |
+
continue
|
| 510 |
+
|
| 511 |
+
image = _load_image(
|
| 512 |
+
image_path=os.path.join(image_dir, image_data.file_name),
|
| 513 |
+
)
|
| 514 |
+
example = asdict(image_data)
|
| 515 |
+
example["image"] = image
|
| 516 |
+
example["license"] = asdict(licenses[image_data.license_id])
|
| 517 |
+
|
| 518 |
+
example["annotations"] = []
|
| 519 |
+
for ann in image_anns:
|
| 520 |
+
ann_dict = asdict(ann)
|
| 521 |
+
category = categories[ann.category_id]
|
| 522 |
+
ann_dict["category"] = asdict(category)
|
| 523 |
+
example["annotations"].append(ann_dict)
|
| 524 |
+
|
| 525 |
+
yield idx, example # type: ignore
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
class MsCocoConfig(ds.BuilderConfig):
|
| 529 |
+
YEARS: Tuple[int, ...] = (
|
| 530 |
+
2014,
|
| 531 |
+
2017,
|
| 532 |
+
)
|
| 533 |
+
TASKS: Tuple[str, ...] = (
|
| 534 |
+
"captions",
|
| 535 |
+
"instances",
|
| 536 |
+
"person_keypoints",
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
def __init__(
|
| 540 |
+
self,
|
| 541 |
+
year: int,
|
| 542 |
+
coco_task: Union[str, Sequence[str]],
|
| 543 |
+
version: Optional[Union[ds.Version, str]],
|
| 544 |
+
decode_rle: bool = False,
|
| 545 |
+
data_dir: Optional[str] = None,
|
| 546 |
+
data_files: Optional[DataFilesDict] = None,
|
| 547 |
+
description: Optional[str] = None,
|
| 548 |
+
) -> None:
|
| 549 |
+
super().__init__(
|
| 550 |
+
name=self.config_name(year=year, task=coco_task),
|
| 551 |
+
version=version,
|
| 552 |
+
data_dir=data_dir,
|
| 553 |
+
data_files=data_files,
|
| 554 |
+
description=description,
|
| 555 |
+
)
|
| 556 |
+
self._check_year(year)
|
| 557 |
+
self._check_task(coco_task)
|
| 558 |
+
|
| 559 |
+
self._year = year
|
| 560 |
+
self._task = coco_task
|
| 561 |
+
self.decode_rle = decode_rle
|
| 562 |
+
|
| 563 |
+
def _check_year(self, year: int) -> None:
|
| 564 |
+
assert year in self.YEARS, year
|
| 565 |
+
|
| 566 |
+
def _check_task(self, task: Union[str, Sequence[str]]) -> None:
|
| 567 |
+
if isinstance(task, str):
|
| 568 |
+
assert task in self.TASKS, task
|
| 569 |
+
elif isinstance(task, list) or isinstance(task, tuple):
|
| 570 |
+
for t in task:
|
| 571 |
+
assert self.TASKS, task
|
| 572 |
+
else:
|
| 573 |
+
raise ValueError(f"Invalid task: {task}")
|
| 574 |
+
|
| 575 |
+
@property
|
| 576 |
+
def year(self) -> int:
|
| 577 |
+
return self._year
|
| 578 |
+
|
| 579 |
+
@property
|
| 580 |
+
def task(self) -> str:
|
| 581 |
+
if isinstance(self._task, str):
|
| 582 |
+
return self._task
|
| 583 |
+
elif isinstance(self._task, list) or isinstance(self._task, tuple):
|
| 584 |
+
return "-".join(sorted(self._task))
|
| 585 |
+
else:
|
| 586 |
+
raise ValueError(f"Invalid task: {self._task}")
|
| 587 |
+
|
| 588 |
+
@classmethod
|
| 589 |
+
def config_name(cls, year: int, task: Union[str, Sequence[str]]) -> str:
|
| 590 |
+
if isinstance(task, str):
|
| 591 |
+
return f"{year}-{task}"
|
| 592 |
+
elif isinstance(task, list) or isinstance(task, tuple):
|
| 593 |
+
task = "-".join(task)
|
| 594 |
+
return f"{year}-{task}"
|
| 595 |
+
else:
|
| 596 |
+
raise ValueError(f"Invalid task: {task}")
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def _load_image(image_path: str) -> PilImage:
|
| 600 |
+
return Image.open(image_path)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
def _load_annotation_json(ann_file_path: str) -> JsonDict:
|
| 604 |
+
logger.info(f"Load annotation json from {ann_file_path}")
|
| 605 |
+
with open(ann_file_path, "r") as rf:
|
| 606 |
+
ann_json = json.load(rf)
|
| 607 |
+
return ann_json
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
def _load_licenses_data(license_dicts: List[JsonDict]) -> Dict[LicenseId, LicenseData]:
|
| 611 |
+
licenses = {}
|
| 612 |
+
for license_dict in license_dicts:
|
| 613 |
+
license_data = LicenseData.from_dict(license_dict)
|
| 614 |
+
licenses[license_data.license_id] = license_data
|
| 615 |
+
return licenses
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
def _load_images_data(
|
| 619 |
+
image_dicts: List[JsonDict],
|
| 620 |
+
tqdm_desc: str = "Load images",
|
| 621 |
+
) -> Dict[ImageId, ImageData]:
|
| 622 |
+
images = {}
|
| 623 |
+
for image_dict in tqdm(image_dicts, desc=tqdm_desc):
|
| 624 |
+
image_data = ImageData.from_dict(image_dict)
|
| 625 |
+
images[image_data.image_id] = image_data
|
| 626 |
+
return images
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def _load_categories_data(
|
| 630 |
+
category_dicts: List[JsonDict],
|
| 631 |
+
tqdm_desc: str = "Load categories",
|
| 632 |
+
) -> Dict[CategoryId, CategoryData]:
|
| 633 |
+
categories = {}
|
| 634 |
+
for category_dict in tqdm(category_dicts, desc=tqdm_desc):
|
| 635 |
+
category_data = CategoryData.from_dict(category_dict)
|
| 636 |
+
categories[category_data.category_id] = category_data
|
| 637 |
+
return categories
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def _load_captions_data(
|
| 641 |
+
ann_dicts: List[JsonDict],
|
| 642 |
+
tqdm_desc: str = "Load captions data",
|
| 643 |
+
) -> Dict[ImageId, List[CaptionsAnnotationData]]:
|
| 644 |
+
annotations = defaultdict(list)
|
| 645 |
+
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
| 646 |
+
ann_data = CaptionsAnnotationData.from_dict(ann_dict)
|
| 647 |
+
annotations[ann_data.image_id].append(ann_data)
|
| 648 |
+
return annotations
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
def _load_instances_data(
|
| 652 |
+
ann_dicts: List[JsonDict],
|
| 653 |
+
images: Dict[ImageId, ImageData],
|
| 654 |
+
decode_rle: bool,
|
| 655 |
+
tqdm_desc: str = "Load instances data",
|
| 656 |
+
) -> Dict[ImageId, List[InstancesAnnotationData]]:
|
| 657 |
+
annotations = defaultdict(list)
|
| 658 |
+
ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
|
| 659 |
+
|
| 660 |
+
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
| 661 |
+
ann_data = InstancesAnnotationData.from_dict(
|
| 662 |
+
ann_dict, images=images, decode_rle=decode_rle
|
| 663 |
+
)
|
| 664 |
+
annotations[ann_data.image_id].append(ann_data)
|
| 665 |
+
|
| 666 |
+
return annotations
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def _load_person_keypoints_data(
|
| 670 |
+
ann_dicts: List[JsonDict],
|
| 671 |
+
images: Dict[ImageId, ImageData],
|
| 672 |
+
decode_rle: bool,
|
| 673 |
+
tqdm_desc: str = "Load person keypoints data",
|
| 674 |
+
) -> Dict[ImageId, List[PersonKeypointsAnnotationData]]:
|
| 675 |
+
annotations = defaultdict(list)
|
| 676 |
+
ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
|
| 677 |
+
|
| 678 |
+
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
| 679 |
+
ann_data = PersonKeypointsAnnotationData.from_dict(
|
| 680 |
+
ann_dict, images=images, decode_rle=decode_rle
|
| 681 |
+
)
|
| 682 |
+
annotations[ann_data.image_id].append(ann_data)
|
| 683 |
+
return annotations
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
def get_features_base_dict():
|
| 687 |
+
return {
|
| 688 |
+
"image_id": ds.Value("int64"),
|
| 689 |
+
"image": ds.Image(),
|
| 690 |
+
"file_name": ds.Value("string"),
|
| 691 |
+
"coco_url": ds.Value("string"),
|
| 692 |
+
"height": ds.Value("int32"),
|
| 693 |
+
"width": ds.Value("int32"),
|
| 694 |
+
"date_captured": ds.Value("string"),
|
| 695 |
+
"flickr_url": ds.Value("string"),
|
| 696 |
+
"license_id": ds.Value("int32"),
|
| 697 |
+
"license": {
|
| 698 |
+
"url": ds.Value("string"),
|
| 699 |
+
"license_id": ds.Value("int8"),
|
| 700 |
+
"name": ds.Value("string"),
|
| 701 |
+
},
|
| 702 |
+
}
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
def get_features_instance_dict(decode_rle: bool):
|
| 706 |
+
if decode_rle:
|
| 707 |
+
segmentation_feature = ds.Image()
|
| 708 |
+
else:
|
| 709 |
+
segmentation_feature = {
|
| 710 |
+
"counts": ds.Sequence(ds.Value("int64")),
|
| 711 |
+
"size": ds.Sequence(ds.Value("int32")),
|
| 712 |
+
}
|
| 713 |
+
return {
|
| 714 |
+
"annotation_id": ds.Value("int64"),
|
| 715 |
+
"image_id": ds.Value("int64"),
|
| 716 |
+
"segmentation": segmentation_feature,
|
| 717 |
+
"area": ds.Value("float32"),
|
| 718 |
+
"iscrowd": ds.Value("bool"),
|
| 719 |
+
"bbox": ds.Sequence(ds.Value("float32"), length=4),
|
| 720 |
+
"category_id": ds.Value("int32"),
|
| 721 |
+
"category": {
|
| 722 |
+
"category_id": ds.Value("int32"),
|
| 723 |
+
"name": ds.Value("string"),
|
| 724 |
+
"supercategory": ds.Value("string"),
|
| 725 |
+
},
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
def get_features_captions() -> ds.Features:
|
| 730 |
+
features_dict = get_features_base_dict()
|
| 731 |
+
annotations = ds.Sequence(
|
| 732 |
+
{
|
| 733 |
+
"annotation_id": ds.Value("int64"),
|
| 734 |
+
"image_id": ds.Value("int64"),
|
| 735 |
+
"caption": ds.Value("string"),
|
| 736 |
+
}
|
| 737 |
+
)
|
| 738 |
+
features_dict.update({"annotations": annotations})
|
| 739 |
+
|
| 740 |
+
return ds.Features(features_dict)
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
def get_features_instances(decode_rle: bool) -> ds.Features:
|
| 744 |
+
features_dict = get_features_base_dict()
|
| 745 |
+
annotations = ds.Sequence(get_features_instance_dict(decode_rle=decode_rle))
|
| 746 |
+
features_dict.update({"annotations": annotations})
|
| 747 |
+
return ds.Features(features_dict)
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
def get_features_person_keypoints(decode_rle: bool) -> ds.Features:
|
| 751 |
+
features_dict = get_features_base_dict()
|
| 752 |
+
features_instance_dict = get_features_instance_dict(decode_rle=decode_rle)
|
| 753 |
+
features_instance_dict.update(
|
| 754 |
+
{
|
| 755 |
+
"keypoints": ds.Sequence(
|
| 756 |
+
{
|
| 757 |
+
"state": ds.Value("string"),
|
| 758 |
+
"x": ds.Value("int32"),
|
| 759 |
+
"y": ds.Value("int32"),
|
| 760 |
+
"v": ds.Value("int32"),
|
| 761 |
+
}
|
| 762 |
+
),
|
| 763 |
+
"num_keypoints": ds.Value("int32"),
|
| 764 |
+
}
|
| 765 |
+
)
|
| 766 |
+
annotations = ds.Sequence(features_instance_dict)
|
| 767 |
+
features_dict.update({"annotations": annotations})
|
| 768 |
+
return ds.Features(features_dict)
|
| 769 |
+
|
| 770 |
+
|
| 771 |
+
def dataset_configs(year: int, version: ds.Version) -> List[MsCocoConfig]:
|
| 772 |
+
return [
|
| 773 |
+
MsCocoConfig(
|
| 774 |
+
year=year,
|
| 775 |
+
coco_task="captions",
|
| 776 |
+
version=version,
|
| 777 |
+
),
|
| 778 |
+
MsCocoConfig(
|
| 779 |
+
year=year,
|
| 780 |
+
coco_task="instances",
|
| 781 |
+
version=version,
|
| 782 |
+
),
|
| 783 |
+
MsCocoConfig(
|
| 784 |
+
year=year,
|
| 785 |
+
coco_task="person_keypoints",
|
| 786 |
+
version=version,
|
| 787 |
+
),
|
| 788 |
+
MsCocoConfig(
|
| 789 |
+
year=year,
|
| 790 |
+
coco_task=("captions", "instances"),
|
| 791 |
+
version=version,
|
| 792 |
+
),
|
| 793 |
+
MsCocoConfig(
|
| 794 |
+
year=year,
|
| 795 |
+
coco_task=("captions", "person_keypoints"),
|
| 796 |
+
version=version,
|
| 797 |
+
),
|
| 798 |
+
]
|
| 799 |
+
|
| 800 |
+
|
| 801 |
+
def configs_2014(version: ds.Version) -> List[MsCocoConfig]:
|
| 802 |
+
return dataset_configs(year=2014, version=version)
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
def configs_2017(version: ds.Version) -> List[MsCocoConfig]:
|
| 806 |
+
return dataset_configs(year=2017, version=version)
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
class MsCocoDataset(ds.GeneratorBasedBuilder):
|
| 810 |
+
VERSION = ds.Version("1.0.0")
|
| 811 |
+
BUILDER_CONFIG_CLASS = MsCocoConfig
|
| 812 |
+
BUILDER_CONFIGS = configs_2014(version=VERSION) + configs_2017(version=VERSION)
|
| 813 |
+
|
| 814 |
+
@property
|
| 815 |
+
def year(self) -> int:
|
| 816 |
+
config: MsCocoConfig = self.config # type: ignore
|
| 817 |
+
return config.year
|
| 818 |
+
|
| 819 |
+
@property
|
| 820 |
+
def task(self) -> str:
|
| 821 |
+
config: MsCocoConfig = self.config # type: ignore
|
| 822 |
+
return config.task
|
| 823 |
+
|
| 824 |
+
def _info(self) -> ds.DatasetInfo:
|
| 825 |
+
if self.task == "captions":
|
| 826 |
+
features = get_features_captions()
|
| 827 |
+
elif self.task == "instances":
|
| 828 |
+
features = get_features_instances(
|
| 829 |
+
decode_rle=self.config.decode_rle, # type: ignore
|
| 830 |
+
)
|
| 831 |
+
elif self.task == "person_keypoints":
|
| 832 |
+
features = get_features_person_keypoints(
|
| 833 |
+
decode_rle=self.config.decode_rle, # type: ignore
|
| 834 |
+
)
|
| 835 |
+
else:
|
| 836 |
+
raise ValueError(f"Invalid task: {self.task}")
|
| 837 |
+
|
| 838 |
+
return ds.DatasetInfo(
|
| 839 |
+
description=_DESCRIPTION,
|
| 840 |
+
citation=_CITATION,
|
| 841 |
+
homepage=_HOMEPAGE,
|
| 842 |
+
license=_LICENSE,
|
| 843 |
+
features=features,
|
| 844 |
+
)
|
| 845 |
+
|
| 846 |
+
def _split_generators(self, dl_manager: ds.DownloadManager):
|
| 847 |
+
file_paths = dl_manager.download_and_extract(_URLS[f"{self.year}"])
|
| 848 |
+
|
| 849 |
+
imgs = file_paths["images"] # type: ignore
|
| 850 |
+
anns = file_paths["annotations"] # type: ignore
|
| 851 |
+
|
| 852 |
+
return [
|
| 853 |
+
ds.SplitGenerator(
|
| 854 |
+
name=ds.Split.TRAIN, # type: ignore
|
| 855 |
+
gen_kwargs={
|
| 856 |
+
"base_image_dir": imgs["train"],
|
| 857 |
+
"base_annotation_dir": anns["train_validation"],
|
| 858 |
+
"split": "train",
|
| 859 |
+
},
|
| 860 |
+
),
|
| 861 |
+
ds.SplitGenerator(
|
| 862 |
+
name=ds.Split.VALIDATION, # type: ignore
|
| 863 |
+
gen_kwargs={
|
| 864 |
+
"base_image_dir": imgs["validation"],
|
| 865 |
+
"base_annotation_dir": anns["train_validation"],
|
| 866 |
+
"split": "val",
|
| 867 |
+
},
|
| 868 |
+
),
|
| 869 |
+
# ds.SplitGenerator(
|
| 870 |
+
# name=ds.Split.TEST, # type: ignore
|
| 871 |
+
# gen_kwargs={
|
| 872 |
+
# "base_image_dir": imgs["test"],
|
| 873 |
+
# "test_image_info_path": anns["test_image_info"],
|
| 874 |
+
# "split": "test",
|
| 875 |
+
# },
|
| 876 |
+
# ),
|
| 877 |
+
]
|
| 878 |
+
|
| 879 |
+
def _generate_train_val_examples(
|
| 880 |
+
self, split: str, base_image_dir: str, base_annotation_dir: str
|
| 881 |
+
):
|
| 882 |
+
image_dir = os.path.join(base_image_dir, f"{split}{self.year}")
|
| 883 |
+
|
| 884 |
+
ann_dir = os.path.join(base_annotation_dir, "annotations")
|
| 885 |
+
ann_file_path = os.path.join(ann_dir, f"{self.task}_{split}{self.year}.json")
|
| 886 |
+
|
| 887 |
+
ann_json = _load_annotation_json(ann_file_path=ann_file_path)
|
| 888 |
+
|
| 889 |
+
# info = AnnotationInfo.from_dict(ann_json["info"])
|
| 890 |
+
licenses = _load_licenses_data(license_dicts=ann_json["licenses"])
|
| 891 |
+
images = _load_images_data(image_dicts=ann_json["images"])
|
| 892 |
+
|
| 893 |
+
category_dicts = ann_json.get("categories")
|
| 894 |
+
categories = (
|
| 895 |
+
_load_categories_data(category_dicts=category_dicts)
|
| 896 |
+
if category_dicts is not None
|
| 897 |
+
else None
|
| 898 |
+
)
|
| 899 |
+
|
| 900 |
+
config: MsCocoConfig = self.config # type: ignore
|
| 901 |
+
if config.task == "captions":
|
| 902 |
+
yield from generate_captions_examples(
|
| 903 |
+
annotations=_load_captions_data(
|
| 904 |
+
ann_dicts=ann_json["annotations"],
|
| 905 |
+
),
|
| 906 |
+
image_dir=image_dir,
|
| 907 |
+
images=images,
|
| 908 |
+
licenses=licenses,
|
| 909 |
+
)
|
| 910 |
+
elif config.task == "instances":
|
| 911 |
+
assert categories is not None
|
| 912 |
+
yield from generate_instances_examples(
|
| 913 |
+
annotations=_load_instances_data(
|
| 914 |
+
images=images,
|
| 915 |
+
ann_dicts=ann_json["annotations"],
|
| 916 |
+
decode_rle=self.config.decode_rle, # type: ignore
|
| 917 |
+
),
|
| 918 |
+
categories=categories,
|
| 919 |
+
image_dir=image_dir,
|
| 920 |
+
images=images,
|
| 921 |
+
licenses=licenses,
|
| 922 |
+
)
|
| 923 |
+
elif config.task == "person_keypoints":
|
| 924 |
+
assert categories is not None
|
| 925 |
+
yield from generate_person_keypoints_examples(
|
| 926 |
+
annotations=_load_person_keypoints_data(
|
| 927 |
+
images=images,
|
| 928 |
+
ann_dicts=ann_json["annotations"],
|
| 929 |
+
decode_rle=self.config.decode_rle, # type: ignore
|
| 930 |
+
),
|
| 931 |
+
categories=categories,
|
| 932 |
+
image_dir=image_dir,
|
| 933 |
+
images=images,
|
| 934 |
+
licenses=licenses,
|
| 935 |
+
)
|
| 936 |
+
else:
|
| 937 |
+
raise ValueError(f"Invalid task: {config.task}")
|
| 938 |
+
|
| 939 |
+
def _generate_test_examples(self, test_image_info_path: str):
|
| 940 |
+
raise NotImplementedError
|
| 941 |
+
|
| 942 |
+
def _generate_examples(
|
| 943 |
+
self,
|
| 944 |
+
split: MscocoSplits,
|
| 945 |
+
base_image_dir: Optional[str] = None,
|
| 946 |
+
base_annotation_dir: Optional[str] = None,
|
| 947 |
+
test_image_info_path: Optional[str] = None,
|
| 948 |
+
):
|
| 949 |
+
if split == "test" and test_image_info_path is not None:
|
| 950 |
+
yield from self._generate_test_examples(
|
| 951 |
+
test_image_info_path=test_image_info_path
|
| 952 |
+
)
|
| 953 |
+
elif (
|
| 954 |
+
split in get_args(MscocoSplits)
|
| 955 |
+
and base_image_dir is not None
|
| 956 |
+
and base_annotation_dir is not None
|
| 957 |
+
):
|
| 958 |
+
yield from self._generate_train_val_examples(
|
| 959 |
+
split=split,
|
| 960 |
+
base_image_dir=base_image_dir,
|
| 961 |
+
base_annotation_dir=base_annotation_dir,
|
| 962 |
+
)
|
| 963 |
+
else:
|
| 964 |
+
raise ValueError(
|
| 965 |
+
f"Invalid arguments: split = {split}, "
|
| 966 |
+
f"base_image_dir = {base_image_dir}, "
|
| 967 |
+
f"base_annotation_dir = {base_annotation_dir}, "
|
| 968 |
+
f"test_image_info_path = {test_image_info_path}",
|
| 969 |
+
)
|
README.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dataset Card for MSCOCO
|
| 2 |
+
|
| 3 |
+
[](https://github.com/shunk031/huggingface-datasets_MSCOCO/actions/workflows/ci.yaml)
|
poetry.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pyproject.toml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[tool.poetry]
|
| 2 |
+
name = "huggingface-datasets-mscoco"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = ""
|
| 5 |
+
authors = ["Shunsuke KITADA <[email protected]>"]
|
| 6 |
+
readme = "README.md"
|
| 7 |
+
|
| 8 |
+
[tool.poetry.dependencies]
|
| 9 |
+
python = "^3.9"
|
| 10 |
+
datasets = {extras = ["vision"], version = "^2.14.4"}
|
| 11 |
+
pycocotools = "^2.0.7"
|
| 12 |
+
|
| 13 |
+
[tool.poetry.group.dev.dependencies]
|
| 14 |
+
ruff = "^0.0.286"
|
| 15 |
+
black = "^23.7.0"
|
| 16 |
+
mypy = "^1.5.1"
|
| 17 |
+
pytest = "^7.4.0"
|
| 18 |
+
pytest-xdist = "^3.3.1"
|
| 19 |
+
|
| 20 |
+
[build-system]
|
| 21 |
+
requires = ["poetry-core"]
|
| 22 |
+
build-backend = "poetry.core.masonry.api"
|
tests/MSCOCO_test.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datasets as ds
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@pytest.fixture
|
| 6 |
+
def dataset_path() -> str:
|
| 7 |
+
return "MSCOCO.py"
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@pytest.mark.parametrize(
|
| 11 |
+
argnames="decode_rle,",
|
| 12 |
+
argvalues=(
|
| 13 |
+
True,
|
| 14 |
+
False,
|
| 15 |
+
),
|
| 16 |
+
)
|
| 17 |
+
@pytest.mark.parametrize(
|
| 18 |
+
argnames=(
|
| 19 |
+
"dataset_year",
|
| 20 |
+
"coco_task",
|
| 21 |
+
"expected_num_train",
|
| 22 |
+
"expected_num_validation",
|
| 23 |
+
),
|
| 24 |
+
argvalues=(
|
| 25 |
+
(2014, "captions", 82783, 40504),
|
| 26 |
+
(2017, "captions", 118287, 5000),
|
| 27 |
+
(2014, "instances", 82081, 40137),
|
| 28 |
+
(2017, "instances", 117266, 4952),
|
| 29 |
+
(2014, "person_keypoints", 45174, 21634),
|
| 30 |
+
(2017, "person_keypoints", 64115, 2693),
|
| 31 |
+
),
|
| 32 |
+
)
|
| 33 |
+
def test_load_dataset(
|
| 34 |
+
dataset_path: str,
|
| 35 |
+
dataset_year: int,
|
| 36 |
+
coco_task: str,
|
| 37 |
+
decode_rle: bool,
|
| 38 |
+
expected_num_train: int,
|
| 39 |
+
expected_num_validation: int,
|
| 40 |
+
):
|
| 41 |
+
dataset = ds.load_dataset(
|
| 42 |
+
path=dataset_path,
|
| 43 |
+
year=dataset_year,
|
| 44 |
+
coco_task=coco_task,
|
| 45 |
+
decode_rle=decode_rle,
|
| 46 |
+
)
|
| 47 |
+
assert dataset["train"].num_rows == expected_num_train
|
| 48 |
+
assert dataset["validation"].num_rows == expected_num_validation
|
tests/__init__.py
ADDED
|
File without changes
|