TransPhy3D / load_demo.py
Songlin Wei
upload all
71bfc48
from os.path import dirname, join
import webdataset as wds
from PIL import Image
import io
import json
import numpy as np
import argparse
import matplotlib
import matplotlib.pyplot as plt
import os
try:
import imageio
HAS_IMAGEIO = True
except ImportError:
HAS_IMAGEIO = False
try:
import cv2
HAS_CV2 = True
except ImportError:
HAS_CV2 = False
def dump_video(image_seq, output_path, fps=30, codec='libx264', quality=8):
"""
Dump a sequence of PIL Images to a video file.
Args:
image_seq: List of PIL Images
output_path: Output video file path (e.g., 'output.mp4')
fps: Frames per second (default: 30)
codec: Video codec (default: 'libx264')
quality: Video quality, 0-10, higher is better (default: 8)
"""
if not image_seq:
print("Warning: Empty image sequence, skipping video dump")
return
if HAS_IMAGEIO:
# Use imageio (simpler API)
frames = []
for img in image_seq:
# Convert PIL Image to numpy array
frames.append(np.array(img))
# Write video
# imageio v2 uses mimwrite, v3 uses get_writer
try:
# Try imageio v2 API
imageio.mimwrite(output_path, frames, fps=fps, codec=codec, quality=quality)
except (AttributeError, TypeError):
# Fallback for imageio v3
try:
writer = imageio.get_writer(output_path, fps=fps, codec=codec)
for frame in frames:
writer.append_data(frame)
writer.close()
except Exception:
# Final fallback without codec
writer = imageio.get_writer(output_path, fps=fps)
for frame in frames:
writer.append_data(frame)
writer.close()
print(f"Video saved to {output_path} using imageio")
elif HAS_CV2:
# Use OpenCV as fallback
if not image_seq:
return
# Get image dimensions
first_img = image_seq[0]
height, width = first_img.size[1], first_img.size[0]
# Define codec and create VideoWriter
fourcc = cv2.VideoWriter_fourcc(*codec if len(codec) == 4 else 'mp4v')
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
for img in image_seq:
# Convert PIL Image to numpy array (RGB -> BGR for OpenCV)
img_array = np.array(img)
if len(img_array.shape) == 3:
if img_array.shape[2] == 3:
img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
elif img_array.shape[2] == 4:
img_array = cv2.cvtColor(img_array, cv2.COLOR_RGBA2BGR)
out.write(img_array)
out.release()
print(f"Video saved to {output_path} using OpenCV")
else:
raise ImportError("Neither imageio nor cv2 is available. Please install one: pip install imageio[ffmpeg] or pip install opencv-python")
# Parse 16bit depth to actual depth values
def parse_depth_16bit(depth_image: Image.Image, max_depth: float) -> np.ndarray:
"""Parse 16-bit depth image back to actual depth values."""
# Convert PIL image to numpy array
depth_array = np.array(depth_image, dtype=np.uint16)
# Normalize to [0, 1] and multiply by max_depth to get actual depth
depth_normalized = depth_array.astype(np.float32) / 65535.0
actual_depth = depth_normalized * max_depth
return actual_depth
def colorize_depth_map(depth, mask=None, reverse_color=False):
from decord import VideoReader,cpu
cm = matplotlib.colormaps["Spectral"]
# colorize
if reverse_color:
img_colored_np = cm(1 - depth, bytes=False)[:, :, 0:3] # Invert the depth values before applying colormap
else:
img_colored_np = cm(depth, bytes=False)[:, :, 0:3] # (h,w,3)
depth_colored = (img_colored_np * 255).astype(np.uint8)
# if mask is not None:
# masked_image = np.zeros_like(depth_colored)
# masked_image[mask.numpy()] = depth_colored[mask.numpy()]
# depth_colored_img = Image.fromarray(masked_image)
# else:
depth_colored_img = Image.fromarray(depth_colored)
return depth_colored_img
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument('--data_path', type=str, default='data/TransPhy3D/parametric_train/training/0_materials.000000.tar')
args.add_argument('--output_path', type=str, default='output')
args = args.parse_args()
os.makedirs(args.output_path, exist_ok=True)
# Use WebDataset's built-in verification
dataset = wds.WebDataset(args.data_path)
data = {}
depth_seq_raw = [] # Store raw 16bit depth images
depth_max_values = [] # Store max_depth for each frame
normal_seq = []
rgbs_seq = []
meta_info =[]
# First pass: load all data including 16bit depth
for sample in dataset:
depth_img = None
max_depth = None
for key, value in sample.items():
# Ensure value is bytes-like
if not isinstance(value, (bytes, bytearray)):
continue
# Match exact key names or check file extension
if key == 'depth.png':
# Load 16bit depth image
depth_img = Image.open(io.BytesIO(value))
elif key == 'depth.json':
# Load max_depth value
depth_info = json.loads(value)
max_depth = depth_info.get('max_depth', None)
elif key == 'normal.png':
img = Image.open(io.BytesIO(value))
normal_seq.append(img)
elif key == 'image.png':
img = Image.open(io.BytesIO(value))
rgbs_seq.append(img)
elif key.endswith('.json'):
meta_info.append(json.loads(value))
# Store depth data if both image and max_depth are available
if depth_img is not None:
depth_seq_raw.append(depth_img)
depth_max_values.append(max_depth)
#* depth processing
depth_seq_vis = []
for depth_img, max_depth in zip(depth_seq_raw, depth_max_values):
if max_depth is not None:
# Parse 16bit depth to actual depth values
#* show how to convert to original depth unit
actual_depth = parse_depth_16bit(depth_img, max_depth)
# Normalize for visualization (0-255)
depth_normalized = actual_depth / max_depth # [0, 1]
depth_colored_img = colorize_depth_map(depth_normalized)
depth_seq_vis.append(depth_colored_img)
else:
# Fallback: use raw depth image if max_depth not available
depth_seq_vis.append(depth_img.convert('L'))
dump_video(rgbs_seq, join(args.output_path, 'output_rgb.mp4'), fps=30)
dump_video(normal_seq, join(args.output_path, 'output_normal.mp4'), fps=30)
dump_video(depth_seq_vis, join(args.output_path, 'output_depth.mp4'), fps=30)