mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-17 01:30:14 +00:00
bd9619dfc3
* chore(video backend): renaming codec into video_backend in get_safe_default_video_backend() * feat(pyav utils): adding suport for PyAV encoding parameters validation * feat(VideoEncoderConfig): creating a VideoEncoderConfig to encapsulate encoding parameters * feat(VideoEncoderConfig): propagating the VideoEncoderConfig in the codebase * chore(docs): updating the docs * feat(metadata): adding encoding parameters in dataset metadata * fix(concatenation compatibility): adding compatibility check when concatenating video files * feat(VideoEncoderConfig init): making VideoEncoderConfig more robust and adaptable to multiple backends * feat(pyav checks): making pyav parameters checks more robust * chore(duplicate): removing duplicate get_codec_options definition * test(existing): adapting existing tests * test(new): adding new tests for encoding related features * chore(format): fixing formatting issues * chore(PyAV): cleaning up PyAV utils and encoding parameters checks to stick to the minimun required tooling. * chore(format): formatting code * chore(doctrings): updating docstrings * fix(camera_encoder_config): Removing camera_encoder_config from LeRobotDataset, as it's only required in LeRobotDatasetWriter. * feat(default values): applying a consistent naming convention for default RGB cameras video encoder parameters * fix(rollout): propagating VideoEncoderConfig to the latest recording modes * chore(format): formatting code, fixing error messages and variable names * fix(arguments order): reverting changes in arguments order in StreamingVideoEncoder * chore(relative imports): switching to relative local imports within lerobot.datasets * test(artifacts): cleaning up artifacts for the video encoding tests * chore(docs): updating docs * chore(fromat): formatting code * fix(imports): refactoring the file architecture to avoid circular imports. VideoEncoderConfig is now defined in lerobot.configs and lazily imports av at runtime. * fix(typos): fixing typos and small mistakes * test(factories): updating factories * feat(aggregate): updating dataset aggregation procedure. Encoding tuning paramters (crf, g,...) are ignored for validation and changed to None in the aggregated dataset if incompatible. * docs(typos): fixing typos * fix(deletion): reverting unwanted deletion * fix(typos): fixing multiple typos * feat(codec options): passing codec options to lerobot_edit_dataset episode deletion tool * typo(typo): typo * fix(typos): fixing remaining typos * chore(rename): renaming camera_encoder_config to camera_encoder * docs(clean): cleaning and formating docs * docs(dataset): addind details about datasets * chore(format): formatting code * docs(warning): adding warning regarding encoding parameters modification * fix(re-encoding): removing inconsistent re-encoding option in lerobot_edit_dataset * typos(typos): typos * chore(format): resolving prettier issues * fix(h264_nvenc): fixing crf handling for h264_nvenc * docs(clean): removing too technical parts of the docs * fix(imports): fixing imports at the __init__ level * fix(imports): fixing not very pretty imports in video config file
239 lines
8.9 KiB
Python
239 lines
8.9 KiB
Python
#!/usr/bin/env python
|
|
|
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""Contract tests for DatasetWriter."""
|
|
|
|
from pathlib import Path
|
|
from unittest.mock import patch
|
|
|
|
import numpy as np
|
|
import pytest
|
|
import torch
|
|
from PIL import Image
|
|
|
|
pytest.importorskip("datasets", reason="datasets is required (install lerobot[dataset])")
|
|
|
|
from lerobot.configs import VideoEncoderConfig
|
|
from lerobot.datasets.dataset_writer import _encode_video_worker
|
|
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
|
from lerobot.datasets.utils import DEFAULT_IMAGE_PATH
|
|
from tests.fixtures.constants import DEFAULT_FPS, DUMMY_REPO_ID
|
|
|
|
SIMPLE_FEATURES = {
|
|
"state": {"dtype": "float32", "shape": (6,), "names": None},
|
|
"action": {"dtype": "float32", "shape": (6,), "names": None},
|
|
}
|
|
|
|
|
|
def _make_frame(features: dict, task: str = "Dummy task") -> dict:
|
|
"""Build a valid frame dict for the given features."""
|
|
frame = {"task": task}
|
|
for key, ft in features.items():
|
|
if ft["dtype"] in ("image", "video"):
|
|
frame[key] = np.random.randint(0, 256, size=ft["shape"], dtype=np.uint8)
|
|
elif ft["dtype"] in ("float32", "float64"):
|
|
frame[key] = torch.randn(ft["shape"])
|
|
elif ft["dtype"] == "int64":
|
|
frame[key] = torch.zeros(ft["shape"], dtype=torch.int64)
|
|
return frame
|
|
|
|
|
|
# ── Existing encode_video_worker tests ───────────────────────────────
|
|
|
|
|
|
def test_encode_video_worker_forwards_camera_encoder(tmp_path):
|
|
"""_encode_video_worker forwards camera_encoder to encode_video_frames."""
|
|
video_key = "observation.images.laptop"
|
|
fpath = DEFAULT_IMAGE_PATH.format(image_key=video_key, episode_index=0, frame_index=0)
|
|
img_dir = tmp_path / Path(fpath).parent
|
|
img_dir.mkdir(parents=True, exist_ok=True)
|
|
Image.new("RGB", (64, 64), color="red").save(img_dir / "frame-000000.png")
|
|
|
|
captured_kwargs = {}
|
|
|
|
def mock_encode(imgs_dir, video_path, fps, **kwargs):
|
|
captured_kwargs.update(kwargs)
|
|
Path(video_path).parent.mkdir(parents=True, exist_ok=True)
|
|
Path(video_path).touch()
|
|
|
|
with patch("lerobot.datasets.dataset_writer.encode_video_frames", side_effect=mock_encode):
|
|
_encode_video_worker(
|
|
video_key,
|
|
0,
|
|
tmp_path,
|
|
fps=30,
|
|
camera_encoder=VideoEncoderConfig(vcodec="h264", preset=None),
|
|
encoder_threads=4,
|
|
)
|
|
|
|
assert captured_kwargs["camera_encoder"].vcodec == "h264"
|
|
assert captured_kwargs["encoder_threads"] == 4
|
|
|
|
|
|
def test_encode_video_worker_default_camera_encoder(tmp_path):
|
|
"""_encode_video_worker passes None camera_encoder which encode_video_frames defaults."""
|
|
video_key = "observation.images.laptop"
|
|
fpath = DEFAULT_IMAGE_PATH.format(image_key=video_key, episode_index=0, frame_index=0)
|
|
img_dir = tmp_path / Path(fpath).parent
|
|
img_dir.mkdir(parents=True, exist_ok=True)
|
|
Image.new("RGB", (64, 64), color="red").save(img_dir / "frame-000000.png")
|
|
|
|
captured_kwargs = {}
|
|
|
|
def mock_encode(imgs_dir, video_path, fps, **kwargs):
|
|
captured_kwargs.update(kwargs)
|
|
Path(video_path).parent.mkdir(parents=True, exist_ok=True)
|
|
Path(video_path).touch()
|
|
|
|
with patch("lerobot.datasets.dataset_writer.encode_video_frames", side_effect=mock_encode):
|
|
_encode_video_worker(video_key, 0, tmp_path, fps=30)
|
|
|
|
assert captured_kwargs["camera_encoder"] is None
|
|
assert captured_kwargs["encoder_threads"] is None
|
|
|
|
|
|
# ── add_frame contracts ──────────────────────────────────────────────
|
|
|
|
|
|
def test_add_frame_increments_buffer_size(tmp_path):
|
|
"""Each add_frame() call increases episode_buffer['size'] by 1."""
|
|
dataset = LeRobotDataset.create(
|
|
repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=SIMPLE_FEATURES, root=tmp_path / "ds"
|
|
)
|
|
assert dataset.writer.episode_buffer["size"] == 0
|
|
|
|
dataset.add_frame(_make_frame(SIMPLE_FEATURES))
|
|
assert dataset.writer.episode_buffer["size"] == 1
|
|
|
|
dataset.add_frame(_make_frame(SIMPLE_FEATURES))
|
|
assert dataset.writer.episode_buffer["size"] == 2
|
|
|
|
|
|
def test_add_frame_rejects_missing_feature(tmp_path):
|
|
"""add_frame() raises ValueError when a required feature is missing."""
|
|
dataset = LeRobotDataset.create(
|
|
repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=SIMPLE_FEATURES, root=tmp_path / "ds"
|
|
)
|
|
with pytest.raises(ValueError, match="Missing features"):
|
|
dataset.add_frame({"task": "Dummy task", "state": torch.randn(6)})
|
|
# missing 'action'
|
|
|
|
|
|
# ── save_episode contracts ───────────────────────────────────────────
|
|
|
|
|
|
def test_save_episode_writes_parquet(tmp_path):
|
|
"""After save_episode(), at least one .parquet file exists under data/."""
|
|
dataset = LeRobotDataset.create(
|
|
repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=SIMPLE_FEATURES, root=tmp_path / "ds"
|
|
)
|
|
for _ in range(3):
|
|
dataset.add_frame(_make_frame(SIMPLE_FEATURES))
|
|
dataset.save_episode()
|
|
|
|
parquet_files = list((tmp_path / "ds" / "data").rglob("*.parquet"))
|
|
assert len(parquet_files) > 0
|
|
|
|
|
|
def test_save_episode_updates_counters(tmp_path):
|
|
"""After save_episode(), metadata counters are updated."""
|
|
dataset = LeRobotDataset.create(
|
|
repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=SIMPLE_FEATURES, root=tmp_path / "ds"
|
|
)
|
|
for _ in range(5):
|
|
dataset.add_frame(_make_frame(SIMPLE_FEATURES))
|
|
dataset.save_episode()
|
|
|
|
assert dataset.meta.total_episodes == 1
|
|
assert dataset.meta.total_frames == 5
|
|
|
|
|
|
def test_save_episode_resets_buffer(tmp_path):
|
|
"""After save_episode(), the episode buffer is reset."""
|
|
dataset = LeRobotDataset.create(
|
|
repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=SIMPLE_FEATURES, root=tmp_path / "ds"
|
|
)
|
|
for _ in range(3):
|
|
dataset.add_frame(_make_frame(SIMPLE_FEATURES))
|
|
dataset.save_episode()
|
|
|
|
assert dataset.writer.episode_buffer["size"] == 0
|
|
|
|
|
|
def test_save_multiple_episodes(tmp_path):
|
|
"""Recording 3 episodes results in correct total counts."""
|
|
dataset = LeRobotDataset.create(
|
|
repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=SIMPLE_FEATURES, root=tmp_path / "ds"
|
|
)
|
|
total_frames = 0
|
|
for ep in range(3):
|
|
n_frames = ep + 2 # 2, 3, 4
|
|
for _ in range(n_frames):
|
|
dataset.add_frame(_make_frame(SIMPLE_FEATURES))
|
|
dataset.save_episode()
|
|
total_frames += n_frames
|
|
|
|
assert dataset.meta.total_episodes == 3
|
|
assert dataset.meta.total_frames == total_frames
|
|
|
|
|
|
# ── clear / lifecycle ────────────────────────────────────────────────
|
|
|
|
|
|
def test_clear_resets_buffer(tmp_path):
|
|
"""clear_episode_buffer() resets the buffer size to 0."""
|
|
dataset = LeRobotDataset.create(
|
|
repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=SIMPLE_FEATURES, root=tmp_path / "ds"
|
|
)
|
|
dataset.add_frame(_make_frame(SIMPLE_FEATURES))
|
|
assert dataset.writer.episode_buffer["size"] == 1
|
|
|
|
dataset.clear_episode_buffer()
|
|
assert dataset.writer.episode_buffer["size"] == 0
|
|
|
|
|
|
def test_finalize_is_idempotent(tmp_path):
|
|
"""Calling finalize() twice does not raise."""
|
|
dataset = LeRobotDataset.create(
|
|
repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=SIMPLE_FEATURES, root=tmp_path / "ds"
|
|
)
|
|
for _ in range(3):
|
|
dataset.add_frame(_make_frame(SIMPLE_FEATURES))
|
|
dataset.save_episode()
|
|
|
|
dataset.finalize()
|
|
dataset.finalize() # second call should not raise
|
|
|
|
|
|
def test_finalize_then_read_roundtrip(tmp_path):
|
|
"""Write data, finalize, re-open, and verify data matches."""
|
|
root = tmp_path / "roundtrip"
|
|
features = {"state": {"dtype": "float32", "shape": (2,), "names": None}}
|
|
dataset = LeRobotDataset.create(repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS, features=features, root=root)
|
|
|
|
# Record known values
|
|
known_states = []
|
|
for i in range(5):
|
|
state = torch.tensor([float(i), float(i * 10)])
|
|
known_states.append(state)
|
|
dataset.add_frame({"task": "Test task", "state": state})
|
|
dataset.save_episode()
|
|
dataset.finalize()
|
|
|
|
# Read back
|
|
for i in range(5):
|
|
item = dataset[i]
|
|
assert torch.allclose(item["state"], known_states[i], atol=1e-5)
|