Commit
·
b32aa52
0
Parent(s):
initial commit
Browse files- .gitattributes +60 -0
- FE-Wireframe.py +115 -0
- README.md +132 -0
- events_raw.zip +3 -0
- figures/sim_data.png +3 -0
- images-blur.zip +3 -0
- images-end.zip +3 -0
- images-start.zip +3 -0
- test.jsonl +3 -0
- test.py +49 -0
- train.jsonl +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
# Audio files - uncompressed
|
| 39 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
# Audio files - compressed
|
| 43 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
# Image files - uncompressed
|
| 49 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
# Image files - compressed
|
| 53 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
# Video files - compressed
|
| 57 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
FE-Wireframe.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- encoding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
@File : FE-Wireframe.py
|
| 5 |
+
@Time : 2025/08/31 23:00:00
|
| 6 |
+
@Author : lh9171338
|
| 7 |
+
@Version : 1.0
|
| 8 |
+
@Contact : [email protected]
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import numpy as np
|
| 13 |
+
import json
|
| 14 |
+
import datasets
|
| 15 |
+
from datasets import Features, Image, Sequence, Value
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
_CITATION = """\
|
| 19 |
+
@ARTICLE{10323537,
|
| 20 |
+
author={Yu, Huai and Li, Hao and Yang, Wen and Yu, Lei and Xia, Gui-Song},
|
| 21 |
+
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
|
| 22 |
+
title={Detecting Line Segments in Motion-Blurred Images With Events},
|
| 23 |
+
year={2023},
|
| 24 |
+
pages={1-16},
|
| 25 |
+
doi={10.1109/TPAMI.2023.3334877}
|
| 26 |
+
}
|
| 27 |
+
"""
|
| 28 |
+
_DESCRIPTION = """\
|
| 29 |
+
This new dataset is designed for motion-blurred image line segment detection with events.
|
| 30 |
+
"""
|
| 31 |
+
_HOMEPAGE = ""
|
| 32 |
+
_LICENSE = "mit"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class FEBlurframe(datasets.GeneratorBasedBuilder):
|
| 36 |
+
"""FE-Blurframe Dataset"""
|
| 37 |
+
|
| 38 |
+
VERSION = datasets.Version("1.1.0")
|
| 39 |
+
|
| 40 |
+
def _info(self):
|
| 41 |
+
"""infos"""
|
| 42 |
+
features = Features(
|
| 43 |
+
{
|
| 44 |
+
"blur_image": Image(),
|
| 45 |
+
"start_image": Image(),
|
| 46 |
+
"end_image": Image(),
|
| 47 |
+
"events": {
|
| 48 |
+
"image_size": Sequence(Value("int32")),
|
| 49 |
+
"x": Sequence(Value("int16")),
|
| 50 |
+
"y": Sequence(Value("int16")),
|
| 51 |
+
"t": Sequence(Value("int32")),
|
| 52 |
+
"p": Sequence(Value("bool")),
|
| 53 |
+
},
|
| 54 |
+
"H": Sequence(Sequence(Value("float32"))), # shape [3, 3]
|
| 55 |
+
"image_size": Sequence(Value("int32")), # shape [2]
|
| 56 |
+
"junc": Sequence(Sequence(Value("float32"))), # shape [M, 2]
|
| 57 |
+
"flow": Sequence(Sequence(Value("float32"))), # shape [M, 2]
|
| 58 |
+
"lines": Sequence(Sequence(Value("float32"))), # shape [N, 4]
|
| 59 |
+
"edges_positive": Sequence(Sequence(Value("float32"))), # shape [Np, 2]
|
| 60 |
+
}
|
| 61 |
+
)
|
| 62 |
+
return datasets.DatasetInfo(
|
| 63 |
+
description=_DESCRIPTION,
|
| 64 |
+
features=features,
|
| 65 |
+
homepage=_HOMEPAGE,
|
| 66 |
+
license=_LICENSE,
|
| 67 |
+
citation=_CITATION,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def _split_generators(self, dl_manager):
|
| 71 |
+
"""split generators"""
|
| 72 |
+
data_files = {
|
| 73 |
+
"train": "train.jsonl",
|
| 74 |
+
"test": "test.jsonl",
|
| 75 |
+
"events_raw": "events_raw.zip",
|
| 76 |
+
"images-blur": "images-blur.zip",
|
| 77 |
+
"images-start": "images-start.zip",
|
| 78 |
+
"images-end": "images-end.zip",
|
| 79 |
+
}
|
| 80 |
+
data_files = dl_manager.download_and_extract(data_files)
|
| 81 |
+
print(f"data_files: {data_files}")
|
| 82 |
+
return [
|
| 83 |
+
datasets.SplitGenerator(
|
| 84 |
+
name=datasets.Split.TRAIN,
|
| 85 |
+
gen_kwargs={
|
| 86 |
+
"filepath": data_files["train"],
|
| 87 |
+
"data_files": data_files,
|
| 88 |
+
},
|
| 89 |
+
),
|
| 90 |
+
datasets.SplitGenerator(
|
| 91 |
+
name=datasets.Split.TEST,
|
| 92 |
+
gen_kwargs={
|
| 93 |
+
"filepath": data_files["test"],
|
| 94 |
+
"data_files": data_files,
|
| 95 |
+
},
|
| 96 |
+
),
|
| 97 |
+
]
|
| 98 |
+
|
| 99 |
+
def _generate_examples(self, filepath, data_files):
|
| 100 |
+
"""generate examples"""
|
| 101 |
+
with open(filepath, encoding="utf-8") as f:
|
| 102 |
+
lines = f.readlines()[:10]
|
| 103 |
+
for idx, line in enumerate(lines):
|
| 104 |
+
info = json.loads(line)
|
| 105 |
+
new_info = dict()
|
| 106 |
+
new_info["blur_image"] = os.path.join(data_files["images-blur"], "images-blur", info["filename"])
|
| 107 |
+
new_info["start_image"] = os.path.join(data_files["images-start"], "images-start", info["filename"])
|
| 108 |
+
new_info["end_image"] = os.path.join(data_files["images-end"], "images-end", info["filename"])
|
| 109 |
+
events = np.load(
|
| 110 |
+
os.path.join(data_files["events_raw"], "events_raw", info["filename"].replace(".png", ".npz"))
|
| 111 |
+
)
|
| 112 |
+
new_info["events"] = dict(**events)
|
| 113 |
+
for key in ["image_size", "H", "junc", "flow", "lines", "edges_positive"]:
|
| 114 |
+
new_info[key] = info[key]
|
| 115 |
+
yield idx, new_info
|
README.md
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
datasets: lh9171338/FE-Wireframe
|
| 3 |
+
pretty_name: FE-Wireframe Dataset
|
| 4 |
+
license: mit
|
| 5 |
+
tags:
|
| 6 |
+
- computer-vision
|
| 7 |
+
- line-segment-detection
|
| 8 |
+
- wireframe-parsing
|
| 9 |
+
- blurred image
|
| 10 |
+
- event data
|
| 11 |
+
size_categories: n<1K
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
# FE-Wireframe Dataset
|
| 15 |
+
|
| 16 |
+
<p align="center">
|
| 17 |
+
✏️ <a href="https://github.com/lh9171338/FE-LSD"><b>Github</b></a>   |   📑 <a href="https://arxiv.org/abs/2211.07365">Paper</a>    |   🖼️ <a href="https://huggingface.co/spaces/lh9171338/FE-LineViewer">Viewer</a>
|
| 18 |
+
</p>
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
This is the FE-Wireframe dataset, designed for motion-blurred image line segment detection with events.
|
| 22 |
+
|
| 23 |
+
## Summary
|
| 24 |
+
|
| 25 |
+
The FE-Wireframe dataset is constructed to provide synthetic data for motion-blur scenarios without requiring manual line annotations. Starting from the labeled [Wireframe dataset](https://huggingface.co/datasets/lh9171338/Wireframe), we employ the ESIM simulator to generate event-based synthetic sequences. The overall data generation pipeline is shown as follows.
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+
- File structure:
|
| 30 |
+
```
|
| 31 |
+
|-- events_raw
|
| 32 |
+
|-- 0000001.npz
|
| 33 |
+
|-- 0000002.npz
|
| 34 |
+
|-- ...
|
| 35 |
+
|-- images-blur
|
| 36 |
+
|-- 0000001.png
|
| 37 |
+
|-- 0000002.png
|
| 38 |
+
|-- ...
|
| 39 |
+
|-- images-start
|
| 40 |
+
|-- 0000001.png
|
| 41 |
+
|-- 0000002.png
|
| 42 |
+
|-- ...
|
| 43 |
+
|-- images-end
|
| 44 |
+
|-- 0000001.png
|
| 45 |
+
|-- 0000002.png
|
| 46 |
+
|-- ...
|
| 47 |
+
|-- train.jsonl
|
| 48 |
+
|-- test.jsonl
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
- Number of samples:
|
| 52 |
+
- Train: 5,000
|
| 53 |
+
- Test: 462
|
| 54 |
+
|
| 55 |
+
## Download
|
| 56 |
+
|
| 57 |
+
- Download with huggingface-hub
|
| 58 |
+
|
| 59 |
+
```shell
|
| 60 |
+
python3 -m pip install huggingface-hub
|
| 61 |
+
huggingface-cli download --repo-type dataset lh9171338/FE-Wireframe --local-dir ./
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
- Download with Git
|
| 65 |
+
|
| 66 |
+
```shell
|
| 67 |
+
git lfs install
|
| 68 |
+
git clone https://huggingface.co/datasets/lh9171338/FE-Wireframe
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## Usage
|
| 72 |
+
|
| 73 |
+
- Load the dataset from Hugging Face Hub
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
from datasets import load_dataset
|
| 77 |
+
|
| 78 |
+
ds = load_dataset("lh9171338/FE-Wireframe", trust_remote_code=True)
|
| 79 |
+
print(ds)
|
| 80 |
+
# DatasetDict({
|
| 81 |
+
# train: Dataset({
|
| 82 |
+
# features: ['blur_image', 'start_image', 'end_image', 'events', 'H', 'image_size', 'junc', 'flow', 'lines', 'edges_positive'],
|
| 83 |
+
# num_rows: 5000
|
| 84 |
+
# })
|
| 85 |
+
# test: Dataset({
|
| 86 |
+
# features: ['blur_image', 'start_image', 'end_image', 'events', 'H', 'image_size', 'junc', 'flow', 'lines', 'edges_positive'],
|
| 87 |
+
# num_rows: 462
|
| 88 |
+
# })
|
| 89 |
+
# })
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
- Load the dataset from local
|
| 93 |
+
|
| 94 |
+
```python
|
| 95 |
+
from datasets import load_dataset
|
| 96 |
+
|
| 97 |
+
ds = load_dataset("FE-Wireframe", trust_remote_code=True)
|
| 98 |
+
print(ds)
|
| 99 |
+
# DatasetDict({
|
| 100 |
+
# train: Dataset({
|
| 101 |
+
# features: ['blur_image', 'start_image', 'end_image', 'events', 'H', 'image_size', 'junc', 'flow', 'lines', 'edges_positive'],
|
| 102 |
+
# num_rows: 5000
|
| 103 |
+
# })
|
| 104 |
+
# test: Dataset({
|
| 105 |
+
# features: ['blur_image', 'start_image', 'end_image', 'events', 'H', 'image_size', 'junc', 'flow', 'lines', 'edges_positive'],
|
| 106 |
+
# num_rows: 462
|
| 107 |
+
# })
|
| 108 |
+
# })
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
- Load the dataset with jsonl files
|
| 112 |
+
```python
|
| 113 |
+
import jsonlines
|
| 114 |
+
|
| 115 |
+
with jsonlines.open("test.jsonl") as reader:
|
| 116 |
+
infos = list(reader)
|
| 117 |
+
print(infos[0].keys())
|
| 118 |
+
# dict_keys(['filename', 'image_size', 'H', 'junc', 'flow', 'lines', 'edges_positive'])
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
## Citation
|
| 122 |
+
|
| 123 |
+
```
|
| 124 |
+
@ARTICLE{10323537,
|
| 125 |
+
author={Yu, Huai and Li, Hao and Yang, Wen and Yu, Lei and Xia, Gui-Song},
|
| 126 |
+
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
|
| 127 |
+
title={Detecting Line Segments in Motion-Blurred Images With Events},
|
| 128 |
+
year={2023},
|
| 129 |
+
pages={1-16},
|
| 130 |
+
doi={10.1109/TPAMI.2023.3334877}
|
| 131 |
+
}
|
| 132 |
+
```
|
events_raw.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b2fd9c27565e62dbb3c279fe55c7a9a17e17886cd0078e1724eb7891b35fde4
|
| 3 |
+
size 1243791800
|
figures/sim_data.png
ADDED
|
Git LFS Details
|
images-blur.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b5ea2045e3f26fb27b8ee0014c913876f912fb5fb953900af8a393e2f748792
|
| 3 |
+
size 1147123145
|
images-end.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:808412532be48270ff6eed2cf8c10fc9d9d9fde874ba149cde40ce20721aa250
|
| 3 |
+
size 1773391309
|
images-start.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:694fe15142987a5c6d13b2e9dd55ea9802cd5b16ac0859becfe4c379b32e203e
|
| 3 |
+
size 1555126309
|
test.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9ee9dba07525ecb0e99dde77279065d91f83a6781e7d10bbe10da3cace8644ad
|
| 3 |
+
size 6879074
|
test.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
|
| 3 |
+
ds = load_dataset("lh9171338/FE-Wireframe", trust_remote_code=True)
|
| 4 |
+
# ds = load_dataset("/Users/lihao57/workspace/FE-Blurframe", trust_remote_code=True)
|
| 5 |
+
# ds = load_dataset("FE-Wireframe.py", trust_remote_code=True)
|
| 6 |
+
print(ds)
|
| 7 |
+
print(ds["test"][0].keys())
|
| 8 |
+
print(ds["test"][0]["events"]["image_size"])
|
| 9 |
+
|
| 10 |
+
# import os
|
| 11 |
+
# import numpy as np
|
| 12 |
+
# import glob
|
| 13 |
+
# from PIL import Image
|
| 14 |
+
# from lh_tool.iterator import AutoParallelProcess
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# def func(event_file, save_path):
|
| 18 |
+
# events = np.load(event_file)
|
| 19 |
+
# x = events["x"].astype("int16")
|
| 20 |
+
# y = events["y"].astype("int16")
|
| 21 |
+
# p = events["p"].astype("int32")
|
| 22 |
+
# t = events["t"].astype("bool")
|
| 23 |
+
# image_file = event_file.replace("events_raw", "images-blur").replace(".npz", ".png")
|
| 24 |
+
# image = Image.open(image_file)
|
| 25 |
+
# image_size = image.size
|
| 26 |
+
# w, h = image_size
|
| 27 |
+
# mask = (x >=0 ) & (x < w) & (y >= 0) & (y < h)
|
| 28 |
+
# x = x[mask]
|
| 29 |
+
# y = y[mask]
|
| 30 |
+
# p = p[mask]
|
| 31 |
+
# t = t[mask]
|
| 32 |
+
# data = dict(
|
| 33 |
+
# image_size=image_size,
|
| 34 |
+
# x=x,
|
| 35 |
+
# y=y,
|
| 36 |
+
# p=p,
|
| 37 |
+
# t=t,
|
| 38 |
+
# )
|
| 39 |
+
# np.savez_compressed(os.path.join(save_path, os.path.basename(event_file)), **data)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# if __name__ == "__main__":
|
| 43 |
+
# event_files = glob.glob("events_raw/*.npz")
|
| 44 |
+
# save_path = "events"
|
| 45 |
+
# os.makedirs(save_path, exist_ok=True)
|
| 46 |
+
# AutoParallelProcess(func).run(
|
| 47 |
+
# event_file=event_files,
|
| 48 |
+
# save_path=save_path,
|
| 49 |
+
# )
|
train.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39e186404f6f33205e073d74fadb2dda711200314787364b8e93b63fd55c9663
|
| 3 |
+
size 75449406
|