From 3615160d891f00a1cb8258ed8f81d327049b640e Mon Sep 17 00:00:00 2001 From: taken-yjyoon Date: Fri, 13 Feb 2026 02:13:51 +0900 Subject: [PATCH 01/21] fix(typo): Fixing wrong argparse examples in the comments (using 'True' not 'true') (#1040) Co-authored-by: juni <> --- src/lerobot/processor/pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lerobot/processor/pipeline.py b/src/lerobot/processor/pipeline.py index 97ec716ff..8de376928 100644 --- a/src/lerobot/processor/pipeline.py +++ b/src/lerobot/processor/pipeline.py @@ -413,7 +413,7 @@ class DataProcessorPipeline(HubMixin, Generic[TInput, TOutput]): Args: save_directory: The directory where the pipeline will be saved. If None, saves to HF_LEROBOT_HOME/processors/{sanitized_pipeline_name}. - repo_id: ID of your repository on the Hub. Used only if `push_to_hub=True`. + repo_id: ID of your repository on the Hub. Used only if `push_to_hub=true`. push_to_hub: Whether or not to push your object to the Hugging Face Hub after saving it. card_kwargs: Additional arguments passed to the card template to customize the card. config_filename: The name of the JSON configuration file. If None, a name is From adebbcf090b47913d3f2e27bb27feccab174f2fc Mon Sep 17 00:00:00 2001 From: Caroline Pascal Date: Thu, 12 Feb 2026 18:56:04 +0100 Subject: [PATCH 02/21] fix(dataset tools draccus): fixing draccus parsing for dataset edit operation type specification (#2949) * fix(edit dataset operation): fixing dataset tools CLI operation type specification * test(edit dataset operation): adding tests for dataset tools operation type specification * chore(format): running pre-commit * chore(backward compatibility): adding a type property in OperationConfig for backward compatibility Signed-off-by: Caroline Pascal --- src/lerobot/scripts/lerobot_edit_dataset.py | 49 +++++++------- tests/scripts/test_edit_dataset_parsing.py | 71 +++++++++++++++++++++ 2 files changed, 96 insertions(+), 24 deletions(-) create mode 100644 tests/scripts/test_edit_dataset_parsing.py diff --git a/src/lerobot/scripts/lerobot_edit_dataset.py b/src/lerobot/scripts/lerobot_edit_dataset.py index 2ca9c520d..7c222ac6c 100644 --- a/src/lerobot/scripts/lerobot_edit_dataset.py +++ b/src/lerobot/scripts/lerobot_edit_dataset.py @@ -109,11 +109,14 @@ Using JSON config file: --config_path path/to/edit_config.json """ +import abc import logging import shutil from dataclasses import dataclass from pathlib import Path +import draccus + from lerobot.configs import parser from lerobot.datasets.dataset_tools import ( convert_image_to_video_dataset, @@ -129,39 +132,46 @@ from lerobot.utils.utils import init_logging @dataclass -class DeleteEpisodesConfig: - type: str = "delete_episodes" +class OperationConfig(draccus.ChoiceRegistry, abc.ABC): + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +@OperationConfig.register_subclass("delete_episodes") +@dataclass +class DeleteEpisodesConfig(OperationConfig): episode_indices: list[int] | None = None +@OperationConfig.register_subclass("split") @dataclass -class SplitConfig: - type: str = "split" +class SplitConfig(OperationConfig): splits: dict[str, float | list[int]] | None = None +@OperationConfig.register_subclass("merge") @dataclass -class MergeConfig: - type: str = "merge" +class MergeConfig(OperationConfig): repo_ids: list[str] | None = None +@OperationConfig.register_subclass("remove_feature") @dataclass -class RemoveFeatureConfig: - type: str = "remove_feature" +class RemoveFeatureConfig(OperationConfig): feature_names: list[str] | None = None +@OperationConfig.register_subclass("modify_tasks") @dataclass -class ModifyTasksConfig: - type: str = "modify_tasks" +class ModifyTasksConfig(OperationConfig): new_task: str | None = None episode_tasks: dict[str, str] | None = None +@OperationConfig.register_subclass("convert_image_to_video") @dataclass -class ConvertImageToVideoConfig: - type: str = "convert_image_to_video" +class ConvertImageToVideoConfig(OperationConfig): output_dir: str | None = None vcodec: str = "libsvtav1" pix_fmt: str = "yuv420p" @@ -177,14 +187,7 @@ class ConvertImageToVideoConfig: @dataclass class EditDatasetConfig: repo_id: str - operation: ( - DeleteEpisodesConfig - | SplitConfig - | MergeConfig - | RemoveFeatureConfig - | ModifyTasksConfig - | ConvertImageToVideoConfig - ) + operation: OperationConfig root: str | None = None new_repo_id: str | None = None push_to_hub: bool = False @@ -450,10 +453,8 @@ def edit_dataset(cfg: EditDatasetConfig) -> None: elif operation_type == "convert_image_to_video": handle_convert_image_to_video(cfg) else: - raise ValueError( - f"Unknown operation type: {operation_type}\n" - f"Available operations: delete_episodes, split, merge, remove_feature, modify_tasks, convert_image_to_video" - ) + available = ", ".join(OperationConfig.get_known_choices()) + raise ValueError(f"Unknown operation: {operation_type}\nAvailable operations: {available}") def main() -> None: diff --git a/tests/scripts/test_edit_dataset_parsing.py b/tests/scripts/test_edit_dataset_parsing.py new file mode 100644 index 000000000..bf7386b52 --- /dev/null +++ b/tests/scripts/test_edit_dataset_parsing.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import draccus +import pytest + +from lerobot.scripts.lerobot_edit_dataset import ( + ConvertImageToVideoConfig, + DeleteEpisodesConfig, + EditDatasetConfig, + MergeConfig, + ModifyTasksConfig, + OperationConfig, + RemoveFeatureConfig, + SplitConfig, +) + + +def parse_cfg(cli_args: list[str]) -> EditDatasetConfig: + """Helper to parse CLI args into an EditDatasetConfig via draccus.""" + return draccus.parse(EditDatasetConfig, args=cli_args) + + +class TestOperationTypeParsing: + """Test that --operation.type correctly selects the right config subclass.""" + + @pytest.mark.parametrize( + "type_name, expected_cls", + [ + ("delete_episodes", DeleteEpisodesConfig), + ("split", SplitConfig), + ("merge", MergeConfig), + ("remove_feature", RemoveFeatureConfig), + ("modify_tasks", ModifyTasksConfig), + ("convert_image_to_video", ConvertImageToVideoConfig), + ], + ) + def test_operation_type_resolves_correct_class(self, type_name, expected_cls): + cfg = parse_cfg(["--repo_id", "test/repo", "--operation.type", type_name]) + assert isinstance(cfg.operation, expected_cls), ( + f"Expected {expected_cls.__name__}, got {type(cfg.operation).__name__}" + ) + + @pytest.mark.parametrize( + "type_name, expected_cls", + [ + ("delete_episodes", DeleteEpisodesConfig), + ("split", SplitConfig), + ("merge", MergeConfig), + ("remove_feature", RemoveFeatureConfig), + ("modify_tasks", ModifyTasksConfig), + ("convert_image_to_video", ConvertImageToVideoConfig), + ], + ) + def test_get_choice_name_roundtrips(self, type_name, expected_cls): + cfg = parse_cfg(["--repo_id", "test/repo", "--operation.type", type_name]) + resolved_name = OperationConfig.get_choice_name(type(cfg.operation)) + assert resolved_name == type_name From 6600b60e7f5cc7476ddc34beaaf0e0692f82e4b6 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Fri, 13 Feb 2026 13:49:01 +0100 Subject: [PATCH 03/21] always use degrees (#2968) --- src/lerobot/robots/so_follower/config_so_follower.py | 2 +- src/lerobot/teleoperators/so_leader/config_so_leader.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lerobot/robots/so_follower/config_so_follower.py b/src/lerobot/robots/so_follower/config_so_follower.py index e9ce27123..1ee589bda 100644 --- a/src/lerobot/robots/so_follower/config_so_follower.py +++ b/src/lerobot/robots/so_follower/config_so_follower.py @@ -40,7 +40,7 @@ class SOFollowerConfig: cameras: dict[str, CameraConfig] = field(default_factory=dict) # Set to `True` for backward compatibility with previous policies/dataset - use_degrees: bool = False + use_degrees: bool = True @RobotConfig.register_subclass("so101_follower") diff --git a/src/lerobot/teleoperators/so_leader/config_so_leader.py b/src/lerobot/teleoperators/so_leader/config_so_leader.py index dd55196d7..2b4f782a7 100644 --- a/src/lerobot/teleoperators/so_leader/config_so_leader.py +++ b/src/lerobot/teleoperators/so_leader/config_so_leader.py @@ -28,7 +28,7 @@ class SOLeaderConfig: port: str # Whether to use degrees for angles - use_degrees: bool = False + use_degrees: bool = True @TeleoperatorConfig.register_subclass("so101_leader") From 51d3822d75491507561b5f11db0e62d56b342d93 Mon Sep 17 00:00:00 2001 From: masato-ka Date: Wed, 18 Feb 2026 04:09:42 +0900 Subject: [PATCH 04/21] feat(datasets): Add info operation to lerobot-edit-dataset command (#2917) * Add New featrue to lerobot_edit_datset.py that show dataset information. * Fix to draccus error when happen give only --operation.type=info * Updating test and documents regarding lerobot-edit-dataset info function. * Updating documents regarding lerobot-edit-dataset extract function. option name in document is mistake. * feat(datasets): Update to align formatting with pre-commit.(#2917) Update to align formatting by pre-commit. --------- Co-authored-by: Caroline Pascal --- docs/source/using_dataset_tools.mdx | 25 ++++++++ src/lerobot/scripts/lerobot_edit_dataset.py | 65 +++++++++++++++++++++ tests/scripts/test_edit_dataset_parsing.py | 3 + 3 files changed, 93 insertions(+) diff --git a/docs/source/using_dataset_tools.mdx b/docs/source/using_dataset_tools.mdx index 9e662604e..f7fc9be20 100644 --- a/docs/source/using_dataset_tools.mdx +++ b/docs/source/using_dataset_tools.mdx @@ -12,6 +12,7 @@ LeRobot provides several utilities for manipulating datasets: 4. **Add Features** - Add new features to a dataset 5. **Remove Features** - Remove features from a dataset 6. **Convert to Video** - Convert image-based datasets to video format for efficient storage +7. **Show the Info of Datasets** - Show the summary of datasets information such as number of episode etc. The core implementation is in `lerobot.datasets.dataset_tools`. An example script detailing how to use the tools API is available in `examples/dataset/use_dataset_tools.py`. @@ -156,6 +157,30 @@ lerobot-edit-dataset \ **Note:** The resulting dataset will be a proper LeRobotDataset with all cameras encoded as videos in the `videos/` directory, with parquet files containing only metadata (no raw image data). All episodes, stats, and tasks are preserved. +### Show the information of datasets + +Show the information of datasets such as number of episode, number of frame, File size and so on. +No change will be made to the dataset + +```bash + +# Show dataset information without feature details +lerobot-edit-dataset \ + --repo_id lerobot/pusht_image \ + --operation.type info \ + +# Show dataset information with feature details +lerobot-edit-dataset \ + --repo_id lerobot/pusht_image \ + --operation.type info \ + --operation.show_features true + +``` + +**Parameters:** + +- `parameters`: The flag to control show or no show dataset information with feature details.(default=false) + ### Push to Hub Add the `--push_to_hub true` flag to any command to automatically upload the resulting dataset to the Hugging Face Hub: diff --git a/src/lerobot/scripts/lerobot_edit_dataset.py b/src/lerobot/scripts/lerobot_edit_dataset.py index 7c222ac6c..06e256fa2 100644 --- a/src/lerobot/scripts/lerobot_edit_dataset.py +++ b/src/lerobot/scripts/lerobot_edit_dataset.py @@ -104,6 +104,18 @@ Convert image dataset to video format and push to hub: --operation.type convert_image_to_video \ --push_to_hub true +Show dataset information: + python -m lerobot.scripts.lerobot_edit_dataset \ + --repo_id lerobot/pusht_image \ + --operation.type info \ + --operation.show_features true + +Show dataset information without feature details: + python -m lerobot.scripts.lerobot_edit_dataset \ + --repo_id lerobot/pusht_image \ + --operation.type info \ + --operation.show_features false + Using JSON config file: python -m lerobot.scripts.lerobot_edit_dataset \ --config_path path/to/edit_config.json @@ -112,6 +124,7 @@ Using JSON config file: import abc import logging import shutil +import sys from dataclasses import dataclass from pathlib import Path @@ -184,6 +197,13 @@ class ConvertImageToVideoConfig(OperationConfig): max_frames_per_batch: int | None = None +@OperationConfig.register_subclass("info") +@dataclass +class InfoConfig(OperationConfig): + type: str = "info" + show_features: bool = False + + @dataclass class EditDatasetConfig: repo_id: str @@ -436,6 +456,49 @@ def handle_convert_image_to_video(cfg: EditDatasetConfig) -> None: logging.info("Dataset saved locally (not pushed to hub)") +def _get_dataset_size(repo_path): + import os + + total = 0 + with os.scandir(repo_path) as it: + for entry in it: + if entry.is_file(): + total += entry.stat().st_size + elif entry.is_dir(): + total += _get_dataset_size(entry.path) + return total + + +def handle_info(cfg: EditDatasetConfig): + if not isinstance(cfg.operation, InfoConfig): + raise ValueError("Operation config must be InfoConfig") + + dataset = LeRobotDataset(cfg.repo_id, root=cfg.root) + sys.stdout.write(f"======Info {dataset.meta.repo_id}\n") + sys.stdout.write(f"Repository ID: {dataset.meta.repo_id} \n") + sys.stdout.write(f"Total episode: {dataset.meta.total_episodes} \n") + sys.stdout.write(f"Total task: {dataset.meta.total_tasks} \n") + sys.stdout.write(f"Total frame(Actual Count): {dataset.meta.total_frames}({len(dataset)}) \n") + sys.stdout.write( + f"Average frame per episode: {dataset.meta.total_frames / dataset.meta.total_episodes:.1f}\n" + ) + sys.stdout.write( + f"Average episode time(sec): {(dataset.meta.total_frames / dataset.meta.total_episodes) / dataset.meta.fps:.1f}\n" + ) + sys.stdout.write(f"FPS: {dataset.meta.fps}\n") + + total_file_size = _get_dataset_size(dataset.root) + sys.stdout.write(f"Size: {total_file_size / (1024 * 1024):.1f} MB\n") + if cfg.operation.show_features: + import json + + feature_dump_str = json.dumps( + dataset.meta.features, ensure_ascii=False, indent=4, sort_keys=True, separators=(",", ": ") + ) + sys.stdout.write("Features:\n") + sys.stdout.write(f"{feature_dump_str}\n") + + @parser.wrap() def edit_dataset(cfg: EditDatasetConfig) -> None: operation_type = cfg.operation.type @@ -452,6 +515,8 @@ def edit_dataset(cfg: EditDatasetConfig) -> None: handle_modify_tasks(cfg) elif operation_type == "convert_image_to_video": handle_convert_image_to_video(cfg) + elif operation_type == "info": + handle_info(cfg) else: available = ", ".join(OperationConfig.get_known_choices()) raise ValueError(f"Unknown operation: {operation_type}\nAvailable operations: {available}") diff --git a/tests/scripts/test_edit_dataset_parsing.py b/tests/scripts/test_edit_dataset_parsing.py index bf7386b52..8800b92ee 100644 --- a/tests/scripts/test_edit_dataset_parsing.py +++ b/tests/scripts/test_edit_dataset_parsing.py @@ -21,6 +21,7 @@ from lerobot.scripts.lerobot_edit_dataset import ( ConvertImageToVideoConfig, DeleteEpisodesConfig, EditDatasetConfig, + InfoConfig, MergeConfig, ModifyTasksConfig, OperationConfig, @@ -46,6 +47,7 @@ class TestOperationTypeParsing: ("remove_feature", RemoveFeatureConfig), ("modify_tasks", ModifyTasksConfig), ("convert_image_to_video", ConvertImageToVideoConfig), + ("info", InfoConfig), ], ) def test_operation_type_resolves_correct_class(self, type_name, expected_cls): @@ -63,6 +65,7 @@ class TestOperationTypeParsing: ("remove_feature", RemoveFeatureConfig), ("modify_tasks", ModifyTasksConfig), ("convert_image_to_video", ConvertImageToVideoConfig), + ("info", InfoConfig), ], ) def test_get_choice_name_roundtrips(self, type_name, expected_cls): From 1c388c0002c609ca783bf42729a1e41532a1fba0 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Tue, 17 Feb 2026 23:37:46 +0100 Subject: [PATCH 05/21] (Chore) Bump upper bound for torch version (#2897) * Bump upper torch version bound * Apply suggestion from @Copilot Signed-off-by: Vladislav Sovrasov * Update ref state dicts for schedulers * Support older than 2.8 torch versions * Fix precommit --------- Signed-off-by: Vladislav Sovrasov --- pyproject.toml | 6 +++--- tests/optim/test_schedulers.py | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c4b1c547e..e5431ada3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,9 +76,9 @@ dependencies = [ "pyserial>=3.5,<4.0", "wandb>=0.24.0,<0.25.0", - "torch>=2.2.1,<2.8.0", # TODO: Bumb dependency - "torchcodec>=0.2.1,<0.6.0; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", # TODO: Bumb dependency - "torchvision>=0.21.0,<0.23.0", # TODO: Bumb dependency + "torch>=2.2.1,<2.11.0", # TODO: Bump dependency + "torchcodec>=0.2.1,<0.11.0; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", # TODO: Bump dependency + "torchvision>=0.21.0,<0.26.0", # TODO: Bump dependency "draccus==0.10.0", # TODO: Remove == "gymnasium>=1.1.1,<2.0.0", diff --git a/tests/optim/test_schedulers.py b/tests/optim/test_schedulers.py index 1e566a6ba..224613416 100644 --- a/tests/optim/test_schedulers.py +++ b/tests/optim/test_schedulers.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import torch +from packaging.version import Version from torch.optim.lr_scheduler import LambdaLR from lerobot.optim.schedulers import ( @@ -38,6 +40,10 @@ def test_diffuser_scheduler(optimizer): "last_epoch": 1, "lr_lambdas": [None], } + + if Version(torch.__version__) >= Version("2.8"): + expected_state_dict["_is_initial"] = False + assert scheduler.state_dict() == expected_state_dict @@ -56,6 +62,10 @@ def test_vqbet_scheduler(optimizer): "last_epoch": 1, "lr_lambdas": [None], } + + if Version(torch.__version__) >= Version("2.8"): + expected_state_dict["_is_initial"] = False + assert scheduler.state_dict() == expected_state_dict @@ -76,6 +86,10 @@ def test_cosine_decay_with_warmup_scheduler(optimizer): "last_epoch": 1, "lr_lambdas": [None], } + + if Version(torch.__version__) >= Version("2.8"): + expected_state_dict["_is_initial"] = False + assert scheduler.state_dict() == expected_state_dict From af036ce57e8ce2750f7fa57f4262c87a013bcdff Mon Sep 17 00:00:00 2001 From: Sota Nakamura <49087984+sotanakamura@users.noreply.github.com> Date: Wed, 18 Feb 2026 09:05:51 +0900 Subject: [PATCH 06/21] fix(scripts): serve grpc for a web viewer (#2881) * serve grpc for a web viewer * add help * remove ip detection * fix comment * pass grpc_port * fix(CLI): fixing CLI display-compressed-images argument 1/2 Co-authored-by: HUANG TZU-CHUN Signed-off-by: Caroline Pascal * fix(CLI): fixing CLI display-compressed-images argument 2/2 Co-authored-by: HUANG TZU-CHUN Signed-off-by: Caroline Pascal --------- Signed-off-by: Caroline Pascal Co-authored-by: Caroline Pascal Co-authored-by: HUANG TZU-CHUN Co-authored-by: Steven Palma --- src/lerobot/scripts/lerobot_dataset_viz.py | 37 +++++++++++++++------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/src/lerobot/scripts/lerobot_dataset_viz.py b/src/lerobot/scripts/lerobot_dataset_viz.py index 2cd48eab8..29d64554f 100644 --- a/src/lerobot/scripts/lerobot_dataset_viz.py +++ b/src/lerobot/scripts/lerobot_dataset_viz.py @@ -47,16 +47,14 @@ local$ rerun lerobot_pusht_episode_0.rrd ``` - Visualize data stored on a distant machine through streaming: -(You need to forward the websocket port to the distant machine, with -`ssh -L 9087:localhost:9087 username@remote-host`) ``` distant$ lerobot-dataset-viz \ --repo-id lerobot/pusht \ --episode-index 0 \ --mode distant \ - --ws-port 9087 + --grpc-port 9876 -local$ rerun ws://localhost:9087 +local$ rerun rerun+http://IP:GRPC_PORT/proxy ``` """ @@ -75,6 +73,7 @@ import tqdm from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.utils.constants import ACTION, DONE, OBS_STATE, REWARD +from lerobot.utils.utils import init_logging def to_hwc_uint8_numpy(chw_float32_torch: torch.Tensor) -> np.ndarray: @@ -93,10 +92,11 @@ def visualize_dataset( num_workers: int = 0, mode: str = "local", web_port: int = 9090, - ws_port: int = 9087, + grpc_port: int = 9876, save: bool = False, output_dir: Path | None = None, display_compressed_images: bool = False, + **kwargs, ) -> Path | None: if save: assert output_dir is not None, ( @@ -126,7 +126,9 @@ def visualize_dataset( gc.collect() if mode == "distant": - rr.serve_web_viewer(open_browser=False, web_port=web_port) + server_uri = rr.serve_grpc(grpc_port=grpc_port) + logging.info(f"Connect to a Rerun Server: rerun rerun+http://IP:{grpc_port}/proxy") + rr.serve_web_viewer(open_browser=False, web_port=web_port, connect_to=server_uri) logging.info("Logging to Rerun") @@ -226,7 +228,7 @@ def main(): "Mode of viewing between 'local' or 'distant'. " "'local' requires data to be on a local machine. It spawns a viewer to visualize the data locally. " "'distant' creates a server on the distant machine where the data is stored. " - "Visualize the data by connecting to the server with `rerun ws://localhost:PORT` on the local machine." + "Visualize the data by connecting to the server with `rerun rerun+http://IP:GRPC_PORT/proxy` on the local machine." ), ) parser.add_argument( @@ -238,8 +240,13 @@ def main(): parser.add_argument( "--ws-port", type=int, - default=9087, - help="Web socket port for rerun.io when `--mode distant` is set.", + help="deprecated, please use --grpc-port instead.", + ) + parser.add_argument( + "--grpc-port", + type=int, + default=9876, + help="gRPC port for rerun.io when `--mode distant` is set.", ) parser.add_argument( "--save", @@ -265,9 +272,7 @@ def main(): parser.add_argument( "--display-compressed-images", - type=bool, - required=True, - default=False, + action="store_true", help="If set, display compressed images in Rerun instead of uncompressed ones.", ) @@ -277,6 +282,14 @@ def main(): root = kwargs.pop("root") tolerance_s = kwargs.pop("tolerance_s") + if kwargs["ws_port"] is not None: + logging.warning( + "--ws-port is deprecated and will be removed in future versions. Please use --grpc-port instead." + ) + logging.warning("Setting grpc_port to ws_port value.") + kwargs["grpc_port"] = kwargs.pop("ws_port") + + init_logging() logging.info("Loading dataset") dataset = LeRobotDataset(repo_id, episodes=[args.episode_index], root=root, tolerance_s=tolerance_s) From fcbf550952b3794e425e20d01ec76475be54be4e Mon Sep 17 00:00:00 2001 From: HUANG TZU-CHUN Date: Wed, 18 Feb 2026 18:27:40 +0800 Subject: [PATCH 07/21] fix(docs): update environment variable name to HF_LEROBOT_HOME in docstring (#2973) Co-authored-by: Steven Palma --- src/lerobot/datasets/lerobot_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lerobot/datasets/lerobot_dataset.py b/src/lerobot/datasets/lerobot_dataset.py index 36bffa190..360ed8d30 100644 --- a/src/lerobot/datasets/lerobot_dataset.py +++ b/src/lerobot/datasets/lerobot_dataset.py @@ -656,7 +656,7 @@ class LeRobotDataset(torch.utils.data.Dataset): repo_id (str): This is the repo id that will be used to fetch the dataset. Locally, the dataset will be stored under root/repo_id. root (Path | None, optional): Local directory to use for downloading/writing files. You can also - set the LEROBOT_HOME environment variable to point to a different location. Defaults to + set the HF_LEROBOT_HOME environment variable to point to a different location. Defaults to '~/.cache/huggingface/lerobot'. episodes (list[int] | None, optional): If specified, this will only load episodes specified by their episode_index in this list. Defaults to None. From b22e0315b05447efbc9a0eb1d612192aad0337c2 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 18 Feb 2026 17:32:25 +0100 Subject: [PATCH 08/21] fix(utils): more conservative sleep_margin default value in precise_sleep (#2985) --- src/lerobot/utils/robot_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lerobot/utils/robot_utils.py b/src/lerobot/utils/robot_utils.py index 28c8e7c49..656dc2649 100644 --- a/src/lerobot/utils/robot_utils.py +++ b/src/lerobot/utils/robot_utils.py @@ -16,14 +16,14 @@ import platform import time -def precise_sleep(seconds: float, spin_threshold: float = 0.010, sleep_margin: float = 0.003): +def precise_sleep(seconds: float, spin_threshold: float = 0.010, sleep_margin: float = 0.005): """ Wait for `seconds` with better precision than time.sleep alone at the expense of more CPU usage. Parameters: - seconds: duration to wait - spin_threshold: if remaining <= spin_threshold -> spin; otherwise sleep (seconds). Default 10ms - - sleep_margin: when sleeping leave this much time before deadline to avoid oversleep. Default 3ms + - sleep_margin: when sleeping leave this much time before deadline to avoid oversleep. Default 5ms Note: The default parameters are chosen to prioritize timing accuracy over CPU usage for the common 30 FPS use case. From 89bd58a9a26ec5820df13866b6ebc1670ed8cd83 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 18 Feb 2026 18:22:35 +0100 Subject: [PATCH 09/21] chore(scripts): warn if we don't respect the target FPS (#2986) --- src/lerobot/scripts/lerobot_record.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/lerobot/scripts/lerobot_record.py b/src/lerobot/scripts/lerobot_record.py index 0b39e6fff..216ab22a6 100644 --- a/src/lerobot/scripts/lerobot_record.py +++ b/src/lerobot/scripts/lerobot_record.py @@ -398,7 +398,14 @@ def record_loop( ) dt_s = time.perf_counter() - start_loop_t - precise_sleep(max(1 / fps - dt_s, 0.0)) + + sleep_time_s: float = 1 / fps - dt_s + if sleep_time_s < 0: + logging.warning( + f"Record loop is running slower ({1 / dt_s:.1f} Hz) than the target FPS ({fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + precise_sleep(max(sleep_time_s, 0.0)) timestamp = time.perf_counter() - start_episode_t From aaf37070587581b3ffa8a28b6c134e846afe3a2e Mon Sep 17 00:00:00 2001 From: Caroline Pascal Date: Wed, 18 Feb 2026 19:16:53 +0100 Subject: [PATCH 10/21] fix(filtering): fixing episodes filtering in load_nested_dataset to always use .from_parquet() (#2982) --- src/lerobot/datasets/utils.py | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/src/lerobot/datasets/utils.py b/src/lerobot/datasets/utils.py index 321ecedd5..da186bf30 100644 --- a/src/lerobot/datasets/utils.py +++ b/src/lerobot/datasets/utils.py @@ -122,19 +122,9 @@ def load_nested_dataset( raise FileNotFoundError(f"Provided directory does not contain any parquet file: {pq_dir}") with SuppressProgressBars(): - # When no filtering needed, Dataset uses memory-mapped loading for efficiency - # PyArrow loads the entire dataset into memory - if episodes is None: - return Dataset.from_parquet([str(path) for path in paths], features=features) - - arrow_dataset = pa_ds.dataset(paths, format="parquet") - filter_expr = pa_ds.field("episode_index").isin(episodes) - table = arrow_dataset.to_table(filter=filter_expr) - - if features is not None: - table = table.cast(features.arrow_schema) - - return Dataset(table) + # We use .from_parquet() memory-mapped loading for efficiency + filters = pa_ds.field("episode_index").isin(episodes) if episodes is not None else None + return Dataset.from_parquet([str(path) for path in paths], filters=filters, features=features) def get_parquet_num_frames(parquet_path: str | Path) -> int: From bc38261321f377621a05595914798023bc05d301 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 18 Feb 2026 20:05:15 +0100 Subject: [PATCH 11/21] feat(robots): use read_latest() camera (#2987) * feat(robots): use read_latest() camera * fix(test): add read_latest reachy cam mock --- src/lerobot/cameras/camera.py | 2 +- src/lerobot/cameras/opencv/camera_opencv.py | 2 +- src/lerobot/cameras/reachy2_camera/reachy2_camera.py | 2 +- src/lerobot/cameras/realsense/camera_realsense.py | 2 +- src/lerobot/robots/hope_jr/hope_jr_arm.py | 2 +- src/lerobot/robots/hope_jr/hope_jr_hand.py | 2 +- src/lerobot/robots/koch_follower/koch_follower.py | 2 +- src/lerobot/robots/lekiwi/lekiwi.py | 2 +- src/lerobot/robots/omx_follower/omx_follower.py | 2 +- src/lerobot/robots/openarm_follower/openarm_follower.py | 2 +- src/lerobot/robots/reachy2/robot_reachy2.py | 2 +- src/lerobot/robots/so_follower/so_follower.py | 2 +- src/lerobot/robots/unitree_g1/unitree_g1.py | 2 +- tests/robots/test_reachy2.py | 1 + 14 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/lerobot/cameras/camera.py b/src/lerobot/cameras/camera.py index 2894e0215..2a53d2544 100644 --- a/src/lerobot/cameras/camera.py +++ b/src/lerobot/cameras/camera.py @@ -150,7 +150,7 @@ class Camera(abc.ABC): """ pass - def read_latest(self, max_age_ms: int = 1000) -> NDArray[Any]: + def read_latest(self, max_age_ms: int = 500) -> NDArray[Any]: """Return the most recent frame captured immediately (Peeking). This method is non-blocking and returns whatever is currently in the diff --git a/src/lerobot/cameras/opencv/camera_opencv.py b/src/lerobot/cameras/opencv/camera_opencv.py index 10b3f21da..f3289ddc7 100644 --- a/src/lerobot/cameras/opencv/camera_opencv.py +++ b/src/lerobot/cameras/opencv/camera_opencv.py @@ -530,7 +530,7 @@ class OpenCVCamera(Camera): return frame @check_if_not_connected - def read_latest(self, max_age_ms: int = 1000) -> NDArray[Any]: + def read_latest(self, max_age_ms: int = 500) -> NDArray[Any]: """Return the most recent frame captured immediately (Peeking). This method is non-blocking and returns whatever is currently in the diff --git a/src/lerobot/cameras/reachy2_camera/reachy2_camera.py b/src/lerobot/cameras/reachy2_camera/reachy2_camera.py index 0c1dc43d8..9bef957bc 100644 --- a/src/lerobot/cameras/reachy2_camera/reachy2_camera.py +++ b/src/lerobot/cameras/reachy2_camera/reachy2_camera.py @@ -201,7 +201,7 @@ class Reachy2Camera(Camera): return self.read() @check_if_not_connected - def read_latest(self, max_age_ms: int = 1000) -> NDArray[Any]: + def read_latest(self, max_age_ms: int = 500) -> NDArray[Any]: """Return the most recent frame captured immediately (Peeking). This method is non-blocking and returns whatever is currently in the diff --git a/src/lerobot/cameras/realsense/camera_realsense.py b/src/lerobot/cameras/realsense/camera_realsense.py index d599cdce0..d80ec8093 100644 --- a/src/lerobot/cameras/realsense/camera_realsense.py +++ b/src/lerobot/cameras/realsense/camera_realsense.py @@ -573,7 +573,7 @@ class RealSenseCamera(Camera): # NOTE(Steven): Missing implementation for depth for now @check_if_not_connected - def read_latest(self, max_age_ms: int = 1000) -> NDArray[Any]: + def read_latest(self, max_age_ms: int = 500) -> NDArray[Any]: """Return the most recent (color) frame captured immediately (Peeking). This method is non-blocking and returns whatever is currently in the diff --git a/src/lerobot/robots/hope_jr/hope_jr_arm.py b/src/lerobot/robots/hope_jr/hope_jr_arm.py index 5fd9c4d1d..e8269ae46 100644 --- a/src/lerobot/robots/hope_jr/hope_jr_arm.py +++ b/src/lerobot/robots/hope_jr/hope_jr_arm.py @@ -140,7 +140,7 @@ class HopeJrArm(Robot): # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() - obs_dict[cam_key] = cam.async_read() + obs_dict[cam_key] = cam.read_latest() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") diff --git a/src/lerobot/robots/hope_jr/hope_jr_hand.py b/src/lerobot/robots/hope_jr/hope_jr_hand.py index 1e5c72b72..a05c4bbcb 100644 --- a/src/lerobot/robots/hope_jr/hope_jr_hand.py +++ b/src/lerobot/robots/hope_jr/hope_jr_hand.py @@ -171,7 +171,7 @@ class HopeJrHand(Robot): # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() - obs_dict[cam_key] = cam.async_read() + obs_dict[cam_key] = cam.read_latest() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") diff --git a/src/lerobot/robots/koch_follower/koch_follower.py b/src/lerobot/robots/koch_follower/koch_follower.py index fee0adba9..53a32beed 100644 --- a/src/lerobot/robots/koch_follower/koch_follower.py +++ b/src/lerobot/robots/koch_follower/koch_follower.py @@ -193,7 +193,7 @@ class KochFollower(Robot): # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() - obs_dict[cam_key] = cam.async_read() + obs_dict[cam_key] = cam.read_latest() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") diff --git a/src/lerobot/robots/lekiwi/lekiwi.py b/src/lerobot/robots/lekiwi/lekiwi.py index 54848f49d..9d11a000f 100644 --- a/src/lerobot/robots/lekiwi/lekiwi.py +++ b/src/lerobot/robots/lekiwi/lekiwi.py @@ -360,7 +360,7 @@ class LeKiwi(Robot): # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() - obs_dict[cam_key] = cam.async_read() + obs_dict[cam_key] = cam.read_latest() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") diff --git a/src/lerobot/robots/omx_follower/omx_follower.py b/src/lerobot/robots/omx_follower/omx_follower.py index a171affbd..e0b612c60 100644 --- a/src/lerobot/robots/omx_follower/omx_follower.py +++ b/src/lerobot/robots/omx_follower/omx_follower.py @@ -176,7 +176,7 @@ class OmxFollower(Robot): # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() - obs_dict[cam_key] = cam.async_read() + obs_dict[cam_key] = cam.read_latest() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") diff --git a/src/lerobot/robots/openarm_follower/openarm_follower.py b/src/lerobot/robots/openarm_follower/openarm_follower.py index d6794a226..c865f1ec1 100644 --- a/src/lerobot/robots/openarm_follower/openarm_follower.py +++ b/src/lerobot/robots/openarm_follower/openarm_follower.py @@ -241,7 +241,7 @@ class OpenArmFollower(Robot): # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() - obs_dict[cam_key] = cam.async_read() + obs_dict[cam_key] = cam.read_latest() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") diff --git a/src/lerobot/robots/reachy2/robot_reachy2.py b/src/lerobot/robots/reachy2/robot_reachy2.py index 6f4eef56c..fb466f85b 100644 --- a/src/lerobot/robots/reachy2/robot_reachy2.py +++ b/src/lerobot/robots/reachy2/robot_reachy2.py @@ -180,7 +180,7 @@ class Reachy2Robot(Robot): # Capture images from cameras for cam_key, cam in self.cameras.items(): - obs_dict[cam_key] = cam.async_read() + obs_dict[cam_key] = cam.read_latest() return obs_dict diff --git a/src/lerobot/robots/so_follower/so_follower.py b/src/lerobot/robots/so_follower/so_follower.py index b4d11fe3f..bc72a2b6a 100644 --- a/src/lerobot/robots/so_follower/so_follower.py +++ b/src/lerobot/robots/so_follower/so_follower.py @@ -187,7 +187,7 @@ class SOFollower(Robot): # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() - obs_dict[cam_key] = cam.async_read() + obs_dict[cam_key] = cam.read_latest() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") diff --git a/src/lerobot/robots/unitree_g1/unitree_g1.py b/src/lerobot/robots/unitree_g1/unitree_g1.py index 01b4f330e..df0de8f19 100644 --- a/src/lerobot/robots/unitree_g1/unitree_g1.py +++ b/src/lerobot/robots/unitree_g1/unitree_g1.py @@ -324,7 +324,7 @@ class UnitreeG1(Robot): # Cameras - read images from ZMQ cameras for cam_name, cam in self._cameras.items(): - obs[cam_name] = cam.async_read() + obs[cam_name] = cam.read_latest() return obs diff --git a/tests/robots/test_reachy2.py b/tests/robots/test_reachy2.py index d3c44bf5a..d3f32b1c2 100644 --- a/tests/robots/test_reachy2.py +++ b/tests/robots/test_reachy2.py @@ -142,6 +142,7 @@ def _make_reachy2_camera_mock(*args, **kwargs): cam.connect = MagicMock() cam.disconnect = MagicMock() cam.async_read = MagicMock(side_effect=lambda: np.zeros((height, width, 3), dtype=np.uint8)) + cam.read_latest = MagicMock(side_effect=lambda: np.zeros((height, width, 3), dtype=np.uint8)) return cam From 5f15232271a81ee6be16cec1960e300f55f25466 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 18 Feb 2026 22:46:12 +0100 Subject: [PATCH 12/21] chore: remove usernames + use entrypoints in docs, comments & sample commands (#2988) --- benchmarks/video/README.md | 84 +++++++++---------- docs/source/earthrover_mini_plus.mdx | 2 +- docs/source/hope_jr.mdx | 10 +-- docs/source/pi0.mdx | 2 +- docs/source/pi05.mdx | 2 +- docs/source/sarm.mdx | 8 +- docs/source/unitree_g1.mdx | 4 +- docs/source/walloss.mdx | 2 +- docs/source/xvla.mdx | 2 +- examples/backward_compatibility/replay.py | 2 +- examples/rtc/eval_dataset.py | 20 ++--- examples/rtc/eval_with_real_robot.py | 6 +- .../v30/convert_dataset_v21_to_v30.py | 2 +- .../policies/sarm/compute_rabc_weights.py | 10 +-- .../policies/smolvla/modeling_smolvla.py | 4 +- src/lerobot/scripts/lerobot_edit_dataset.py | 32 +++---- src/lerobot/scripts/lerobot_replay.py | 2 +- 17 files changed, 97 insertions(+), 97 deletions(-) diff --git a/benchmarks/video/README.md b/benchmarks/video/README.md index 490a4b495..1feee69c4 100644 --- a/benchmarks/video/README.md +++ b/benchmarks/video/README.md @@ -28,9 +28,9 @@ We don't expect the same optimal settings for a dataset of images from a simulat For these reasons, we run this benchmark on four representative datasets: - `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera. -- `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera. -- `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera. -- `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera. +- `lerobot/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera. +- `lerobot/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera. +- `lerobot/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera. Note: The datasets used for this benchmark need to be image datasets, not video datasets. @@ -179,7 +179,7 @@ python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ - aliberts/aloha_mobile_shrimp_image \ + lerobot/aloha_mobile_shrimp_image \ --vcodec libx264 libx265 \ --pix-fmt yuv444p yuv420p \ --g 2 20 None \ @@ -203,9 +203,9 @@ python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ - aliberts/aloha_mobile_shrimp_image \ - aliberts/paris_street \ - aliberts/kitchen \ + lerobot/aloha_mobile_shrimp_image \ + lerobot/paris_street \ + lerobot/kitchen \ --vcodec libx264 libx265 \ --pix-fmt yuv444p yuv420p \ --g 1 2 3 4 5 6 10 15 20 40 None \ @@ -221,9 +221,9 @@ python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ - aliberts/aloha_mobile_shrimp_image \ - aliberts/paris_street \ - aliberts/kitchen \ + lerobot/aloha_mobile_shrimp_image \ + lerobot/paris_street \ + lerobot/kitchen \ --vcodec libsvtav1 \ --pix-fmt yuv420p \ --g 1 2 3 4 5 6 10 15 20 40 None \ @@ -252,37 +252,37 @@ Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_read These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav` -| video_images_size_ratio | vcodec | pix_fmt | | | | -| ---------------------------------- | ---------- | ------- | --------- | --------- | --------- | -| | libx264 | | libx265 | | libsvtav1 | -| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | -| lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% | -| aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% | -| aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% | -| aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% | +| video_images_size_ratio | vcodec | pix_fmt | | | | +| --------------------------------- | ---------- | ------- | --------- | --------- | --------- | +| | libx264 | | libx265 | | libsvtav1 | +| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% | +| lerobot/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% | +| lerobot/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% | +| lerobot/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% | -| video_images_load_time_ratio | vcodec | pix_fmt | | | | -| ---------------------------------- | ------- | ------- | -------- | ------- | --------- | -| | libx264 | | libx265 | | libsvtav1 | -| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | -| lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 | -| aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** | -| aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** | -| aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** | +| video_images_load_time_ratio | vcodec | pix_fmt | | | | +| --------------------------------- | ------- | ------- | -------- | ------- | --------- | +| | libx264 | | libx265 | | libsvtav1 | +| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 | +| lerobot/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** | +| lerobot/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** | +| lerobot/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** | -| | | vcodec | pix_fmt | | | | -| ---------------------------------- | -------- | -------- | ------------ | -------- | --------- | ------------ | -| | | libx264 | | libx265 | | libsvtav1 | -| repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | -| lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 | -| | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 | -| | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% | -| aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** | -| | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** | -| | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** | -| aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** | -| | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** | -| | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** | -| aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** | -| | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** | -| | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** | +| | | vcodec | pix_fmt | | | | +| --------------------------------- | -------- | -------- | ------------ | -------- | --------- | ------------ | +| | | libx264 | | libx265 | | libsvtav1 | +| repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 | +| | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 | +| | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% | +| lerobot/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** | +| | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** | +| | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** | +| lerobot/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** | +| | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** | +| | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** | +| lerobot/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** | +| | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** | +| | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** | diff --git a/docs/source/earthrover_mini_plus.mdx b/docs/source/earthrover_mini_plus.mdx index d8083336a..dd9c2ad2b 100644 --- a/docs/source/earthrover_mini_plus.mdx +++ b/docs/source/earthrover_mini_plus.mdx @@ -185,7 +185,7 @@ echo $HF_USER Use the standard recording command: ```bash -python src/lerobot/scripts/lerobot_record.py \ +lerobot-record \ --robot.type=earthrover_mini_plus \ --teleop.type=keyboard_rover \ --dataset.repo_id=your_username/dataset_name \ diff --git a/docs/source/hope_jr.mdx b/docs/source/hope_jr.mdx index 856febb95..026cd084a 100644 --- a/docs/source/hope_jr.mdx +++ b/docs/source/hope_jr.mdx @@ -224,7 +224,7 @@ lerobot-record \ --teleop.port=/dev/tty.usbmodem1201 \ --teleop.id=right \ --teleop.side=right \ - --dataset.repo_id=nepyope/hand_record_test_with_video_data \ + --dataset.repo_id=/hand_record_test_with_video_data \ --dataset.single_task="Hand recording test with video data" \ --dataset.num_episodes=1 \ --dataset.episode_time_s=5 \ @@ -241,7 +241,7 @@ lerobot-replay \ --robot.port=/dev/tty.usbmodem58760432281 \ --robot.id=right \ --robot.side=right \ - --dataset.repo_id=nepyope/hand_record_test_with_camera \ + --dataset.repo_id=/hand_record_test_with_camera \ --dataset.episode=0 ``` @@ -249,13 +249,13 @@ lerobot-replay \ ```bash lerobot-train \ - --dataset.repo_id=nepyope/hand_record_test_with_video_data \ + --dataset.repo_id=/hand_record_test_with_video_data \ --policy.type=act \ --output_dir=outputs/train/hopejr_hand \ --job_name=hopejr \ --policy.device=mps \ --wandb.enable=true \ - --policy.repo_id=nepyope/hand_test_policy + --policy.repo_id=/hand_test_policy ``` ### Evaluate @@ -270,7 +270,7 @@ lerobot-record \ --robot.side=right \ --robot.cameras='{"main": {"type": "opencv", "index_or_path": 0, "width": 640, "height": 480, "fps": 30}}' \ --display_data=false \ - --dataset.repo_id=nepyope/eval_hopejr \ + --dataset.repo_id=/eval_hopejr \ --dataset.single_task="Evaluate hopejr hand policy" \ --dataset.num_episodes=10 \ --policy.path=outputs/train/hopejr_hand/checkpoints/last/pretrained_model diff --git a/docs/source/pi0.mdx b/docs/source/pi0.mdx index 93e0b4c88..879bbd16d 100644 --- a/docs/source/pi0.mdx +++ b/docs/source/pi0.mdx @@ -60,7 +60,7 @@ policy.type=pi0 For training π₀, you can use the standard LeRobot training script with the appropriate configuration: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your_dataset \ --policy.type=pi0 \ --output_dir=./outputs/pi0_training \ diff --git a/docs/source/pi05.mdx b/docs/source/pi05.mdx index dbf118aa3..8abaca989 100644 --- a/docs/source/pi05.mdx +++ b/docs/source/pi05.mdx @@ -56,7 +56,7 @@ policy.type=pi05 Here's a complete training command for finetuning the base π₀.₅ model on your own dataset: ```bash -python src/lerobot/scripts/lerobot_train.py\ +lerobot-train \ --dataset.repo_id=your_dataset \ --policy.type=pi05 \ --output_dir=./outputs/pi05_training \ diff --git a/docs/source/sarm.mdx b/docs/source/sarm.mdx index 65e49792b..cd488fe1f 100644 --- a/docs/source/sarm.mdx +++ b/docs/source/sarm.mdx @@ -269,7 +269,7 @@ This generates visualizations showing video frames with subtask boundaries overl Train with **no annotations** - uses linear progress from 0 to 1: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=sarm \ --policy.annotation_mode=single_stage \ @@ -288,7 +288,7 @@ python src/lerobot/scripts/lerobot_train.py \ Train with **dense annotations only** (sparse auto-generated): ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=sarm \ --policy.annotation_mode=dense_only \ @@ -307,7 +307,7 @@ python src/lerobot/scripts/lerobot_train.py \ Train with **both sparse and dense annotations**: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=sarm \ --policy.annotation_mode=dual \ @@ -468,7 +468,7 @@ This script: Once you have the progress file, train your policy with RA-BC weighting. The progress file is auto-detected from the dataset path (`sarm_progress.parquet`). Currently PI0, PI0.5 and SmolVLA are supported with RA-BC: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=pi0 \ --use_rabc=true \ diff --git a/docs/source/unitree_g1.mdx b/docs/source/unitree_g1.mdx index ea6bf54ad..4c5d28924 100644 --- a/docs/source/unitree_g1.mdx +++ b/docs/source/unitree_g1.mdx @@ -216,7 +216,7 @@ lerobot-teleoperate \ ### Record Dataset in Simulation ```bash -python -m lerobot.scripts.lerobot_record \ +lerobot-record \ --robot.type=unitree_g1 \ --robot.is_simulation=true \ --robot.cameras='{"global_view": {"type": "zmq", "server_address": "localhost", "port": 5555, "camera_name": "head_camera", "width": 640, "height": 480, "fps": 30}}' \ @@ -266,7 +266,7 @@ lerobot-teleoperate \ ### Record Dataset on Real Robot ```bash -python -m lerobot.scripts.lerobot_record \ +lerobot-record \ --robot.type=unitree_g1 \ --robot.is_simulation=false \ --robot.cameras='{"global_view": {"type": "zmq", "server_address": "172.18.129.215", "port": 5555, "camera_name": "head_camera", "width": 640, "height": 480, "fps": 30}}' \ diff --git a/docs/source/walloss.mdx b/docs/source/walloss.mdx index c0756c087..e9785cc93 100644 --- a/docs/source/walloss.mdx +++ b/docs/source/walloss.mdx @@ -45,7 +45,7 @@ policy.type=wall_x For training WallX, you can use the standard LeRobot training script with the appropriate configuration: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your_dataset \ --policy.type=wall_x \ --output_dir=./outputs/wallx_training \ diff --git a/docs/source/xvla.mdx b/docs/source/xvla.mdx index dd7d1ef57..97e04d4ec 100644 --- a/docs/source/xvla.mdx +++ b/docs/source/xvla.mdx @@ -154,7 +154,7 @@ lerobot-train \ ```bash lerobot-train \ - --dataset.repo_id=pepijn223/bimanual-so100-handover-cube \ + --dataset.repo_id=/bimanual-so100-handover-cube \ --output_dir=./outputs/xvla_bimanual \ --job_name=xvla_so101_training \ --policy.path="lerobot/xvla-base" \ diff --git a/examples/backward_compatibility/replay.py b/examples/backward_compatibility/replay.py index 8de5ba197..f7c47bec5 100644 --- a/examples/backward_compatibility/replay.py +++ b/examples/backward_compatibility/replay.py @@ -22,7 +22,7 @@ lerobot-replay \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=black \ - --dataset.repo_id=aliberts/record-test \ + --dataset.repo_id=/record-test \ --dataset.episode=2 ``` """ diff --git a/examples/rtc/eval_dataset.py b/examples/rtc/eval_dataset.py index 4652df107..613fd67d7 100644 --- a/examples/rtc/eval_dataset.py +++ b/examples/rtc/eval_dataset.py @@ -27,8 +27,8 @@ measuring consistency and ground truth alignment. Usage: # Basic usage with smolvla policy uv run python examples/rtc/eval_dataset.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ - --dataset.repo_id=helper2424/check_rtc \ + --policy.path=/smolvla_check_rtc_last3 \ + --dataset.repo_id=/check_rtc \ --rtc.execution_horizon=8 \ --device=mps \ --rtc.max_guidance_weight=10.0 \ @@ -58,16 +58,16 @@ Usage: --device=cuda uv run python examples/rtc/eval_dataset.py \ - --policy.path=lipsop/reuben_pi0 \ - --dataset.repo_id=ReubenLim/so101_cube_in_cup \ + --policy.path=/reuben_pi0 \ + --dataset.repo_id=/so101_cube_in_cup \ --rtc.execution_horizon=8 \ --device=cuda # With torch.compile for faster inference (PyTorch 2.0+) # Note: CUDA graphs disabled by default due to in-place ops in denoising loop uv run python examples/rtc/eval_dataset.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ - --dataset.repo_id=helper2424/check_rtc \ + --policy.path=/smolvla_check_rtc_last3 \ + --dataset.repo_id=/check_rtc \ --rtc.execution_horizon=8 \ --device=mps \ --use_torch_compile=true \ @@ -75,8 +75,8 @@ Usage: # With torch.compile on CUDA (CUDA graphs disabled by default) uv run python examples/rtc/eval_dataset.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ - --dataset.repo_id=helper2424/check_rtc \ + --policy.path=/smolvla_check_rtc_last3 \ + --dataset.repo_id=/check_rtc \ --rtc.execution_horizon=8 \ --device=cuda \ --use_torch_compile=true \ @@ -84,8 +84,8 @@ Usage: # Enable CUDA graphs (advanced - may cause tensor aliasing errors) uv run python examples/rtc/eval_dataset.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ - --dataset.repo_id=helper2424/check_rtc \ + --policy.path=/smolvla_check_rtc_last3 \ + --dataset.repo_id=/check_rtc \ --use_torch_compile=true \ --torch_compile_backend=inductor \ --torch_compile_mode=max-autotune \ diff --git a/examples/rtc/eval_with_real_robot.py b/examples/rtc/eval_with_real_robot.py index 1470899d9..4c803eb7e 100644 --- a/examples/rtc/eval_with_real_robot.py +++ b/examples/rtc/eval_with_real_robot.py @@ -28,7 +28,7 @@ For simulation environments, see eval_with_simulation.py Usage: # Run RTC with Real robot with RTC uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ + --policy.path=/smolvla_check_rtc_last3 \ --policy.device=mps \ --rtc.enabled=true \ --rtc.execution_horizon=20 \ @@ -41,7 +41,7 @@ Usage: # Run RTC with Real robot without RTC uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ + --policy.path=/smolvla_check_rtc_last3 \ --policy.device=mps \ --rtc.enabled=false \ --robot.type=so100_follower \ @@ -53,7 +53,7 @@ Usage: # Run RTC with Real robot with pi0.5 policy uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=helper2424/pi05_check_rtc \ + --policy.path=/pi05_check_rtc \ --policy.device=mps \ --rtc.enabled=true \ --rtc.execution_horizon=20 \ diff --git a/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py b/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py index 74be6bfa4..7be37a1b1 100644 --- a/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py +++ b/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py @@ -529,7 +529,7 @@ if __name__ == "__main__": type=str, required=True, help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset " - "(e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).", + "(e.g. `lerobot/pusht`, `/aloha_sim_insertion_human`).", ) parser.add_argument( "--branch", diff --git a/src/lerobot/policies/sarm/compute_rabc_weights.py b/src/lerobot/policies/sarm/compute_rabc_weights.py index 5b6ea6e9b..485c1096b 100644 --- a/src/lerobot/policies/sarm/compute_rabc_weights.py +++ b/src/lerobot/policies/sarm/compute_rabc_weights.py @@ -27,18 +27,18 @@ Usage: # Full RA-BC computation with visualizations python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 + --reward-model-path /sarm_single_uni4 # Faster computation with stride (compute every 5 frames, interpolate the rest) python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 \\ + --reward-model-path /sarm_single_uni4 \\ --stride 5 # Visualize predictions only (no RA-BC computation) python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 \\ + --reward-model-path /sarm_single_uni4 \\ --visualize-only \\ --num-visualizations 5 @@ -714,12 +714,12 @@ Examples: # Full RA-BC computation with visualizations python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 + --reward-model-path /sarm_single_uni4 # Visualize predictions only (no RA-BC computation) python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 \\ + --reward-model-path /sarm_single_uni4 \\ --visualize-only \\ --num-visualizations 10 """, diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py index 60b968a42..10544a949 100644 --- a/src/lerobot/policies/smolvla/modeling_smolvla.py +++ b/src/lerobot/policies/smolvla/modeling_smolvla.py @@ -30,7 +30,7 @@ Example of finetuning the smolvla pretrained model (`smolvla_base`): ```bash lerobot-train \ --policy.path=lerobot/smolvla_base \ ---dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \ +--dataset.repo_id=/svla_so100_task1_v3 \ --batch_size=64 \ --steps=200000 ``` @@ -40,7 +40,7 @@ and an action expert. ```bash lerobot-train \ --policy.type=smolvla \ ---dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \ +--dataset.repo_id=/svla_so100_task1_v3 \ --batch_size=64 \ --steps=200000 ``` diff --git a/src/lerobot/scripts/lerobot_edit_dataset.py b/src/lerobot/scripts/lerobot_edit_dataset.py index 06e256fa2..afdc95efd 100644 --- a/src/lerobot/scripts/lerobot_edit_dataset.py +++ b/src/lerobot/scripts/lerobot_edit_dataset.py @@ -24,100 +24,100 @@ When new_repo_id is specified, creates a new dataset. Usage Examples: Delete episodes 0, 2, and 5 from a dataset: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type delete_episodes \ --operation.episode_indices "[0, 2, 5]" Delete episodes and save to a new dataset: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --new_repo_id lerobot/pusht_filtered \ --operation.type delete_episodes \ --operation.episode_indices "[0, 2, 5]" Split dataset by fractions: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type split \ --operation.splits '{"train": 0.8, "val": 0.2}' Split dataset by episode indices: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type split \ --operation.splits '{"train": [0, 1, 2, 3], "val": [4, 5]}' Split into more than two splits: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type split \ --operation.splits '{"train": 0.6, "val": 0.2, "test": 0.2}' Merge multiple datasets: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_merged \ --operation.type merge \ --operation.repo_ids "['lerobot/pusht_train', 'lerobot/pusht_val']" Remove camera feature: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type remove_feature \ --operation.feature_names "['observation.images.top']" Modify tasks - set a single task for all episodes (WARNING: modifies in-place): - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type modify_tasks \ --operation.new_task "Pick up the cube and place it" Modify tasks - set different tasks for specific episodes (WARNING: modifies in-place): - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type modify_tasks \ --operation.episode_tasks '{"0": "Task A", "1": "Task B", "2": "Task A"}' Modify tasks - set default task with overrides for specific episodes (WARNING: modifies in-place): - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type modify_tasks \ --operation.new_task "Default task" \ --operation.episode_tasks '{"5": "Special task for episode 5"}' Convert image dataset to video format and save locally: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --operation.type convert_image_to_video \ --operation.output_dir /path/to/output/pusht_video Convert image dataset to video format and save with new repo_id: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --new_repo_id lerobot/pusht_video \ --operation.type convert_image_to_video Convert image dataset to video format and push to hub: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --new_repo_id lerobot/pusht_video \ --operation.type convert_image_to_video \ --push_to_hub true Show dataset information: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --operation.type info \ --operation.show_features true Show dataset information without feature details: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --operation.type info \ --operation.show_features false Using JSON config file: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --config_path path/to/edit_config.json """ diff --git a/src/lerobot/scripts/lerobot_replay.py b/src/lerobot/scripts/lerobot_replay.py index c9a559d07..8e2a394b9 100644 --- a/src/lerobot/scripts/lerobot_replay.py +++ b/src/lerobot/scripts/lerobot_replay.py @@ -22,7 +22,7 @@ lerobot-replay \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=black \ - --dataset.repo_id=aliberts/record-test \ + --dataset.repo_id=/record-test \ --dataset.episode=0 ``` From 2dd366436ed30ed9729b4f18076a54fec7ec589b Mon Sep 17 00:00:00 2001 From: Khalil Date: Thu, 19 Feb 2026 14:35:02 +0100 Subject: [PATCH 13/21] Fix gym-hil integration with the new LeRobot pipeline. (#2482) * Add GymHILAdapterProcessorStep for gym-hil environment integration * Fix action features in control loop for None teleop device with gym-hil * Finalize dataset before pushing to hub for visualization on the hub * Fix neutral action for gripper * fix pre-commit --- src/lerobot/processor/__init__.py | 2 ++ src/lerobot/processor/gym_action_processor.py | 8 +++++ src/lerobot/processor/hil_processor.py | 31 +++++++++++++++++++ src/lerobot/rl/gym_manipulator.py | 15 +++++++-- 4 files changed, 54 insertions(+), 2 deletions(-) diff --git a/src/lerobot/processor/__init__.py b/src/lerobot/processor/__init__.py index 164f7da03..0b63e1606 100644 --- a/src/lerobot/processor/__init__.py +++ b/src/lerobot/processor/__init__.py @@ -44,6 +44,7 @@ from .hil_processor import ( AddTeleopActionAsComplimentaryDataStep, AddTeleopEventsAsInfoStep, GripperPenaltyProcessorStep, + GymHILAdapterProcessorStep, ImageCropResizeProcessorStep, InterventionActionProcessorStep, RewardClassifierProcessorStep, @@ -87,6 +88,7 @@ __all__ = [ "DoneProcessorStep", "EnvAction", "EnvTransition", + "GymHILAdapterProcessorStep", "GripperPenaltyProcessorStep", "hotswap_stats", "IdentityProcessorStep", diff --git a/src/lerobot/processor/gym_action_processor.py b/src/lerobot/processor/gym_action_processor.py index 8fa8cfd86..4f225af92 100644 --- a/src/lerobot/processor/gym_action_processor.py +++ b/src/lerobot/processor/gym_action_processor.py @@ -20,6 +20,7 @@ from lerobot.configs.types import PipelineFeatureType, PolicyFeature from .converters import to_tensor from .core import EnvAction, EnvTransition, PolicyAction +from .hil_processor import TELEOP_ACTION_KEY from .pipeline import ActionProcessorStep, ProcessorStep, ProcessorStepRegistry @@ -89,6 +90,13 @@ class Numpy2TorchActionProcessorStep(ProcessorStep): torch_action = to_tensor(action, dtype=None) # Preserve original dtype new_transition[TransitionKey.ACTION] = torch_action + complementary_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {}) + if TELEOP_ACTION_KEY in complementary_data: + teleop_action = complementary_data[TELEOP_ACTION_KEY] + if isinstance(teleop_action, EnvAction): + complementary_data[TELEOP_ACTION_KEY] = to_tensor(teleop_action) + new_transition[TransitionKey.COMPLEMENTARY_DATA] = complementary_data + return new_transition def transform_features( diff --git a/src/lerobot/processor/hil_processor.py b/src/lerobot/processor/hil_processor.py index 24b5628fa..34eaeed51 100644 --- a/src/lerobot/processor/hil_processor.py +++ b/src/lerobot/processor/hil_processor.py @@ -312,6 +312,37 @@ class TimeLimitProcessorStep(TruncatedProcessorStep): return features +@ProcessorStepRegistry.register("gym_hil_adapter_processor") +class GymHILAdapterProcessorStep(ProcessorStep): + """ + Adapts the output of the `gym-hil` environment to the format expected by `lerobot` processors. + + This step normalizes the `transition` object by: + 1. Copying `teleop_action` from `info` to `complementary_data`. + 2. Copying `is_intervention` from `info` (using the string key) to `info` (using the enum key). + """ + + def __call__(self, transition: EnvTransition) -> EnvTransition: + info = transition.get(TransitionKey.INFO, {}) + complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {}) + + if TELEOP_ACTION_KEY in info: + complementary_data[TELEOP_ACTION_KEY] = info[TELEOP_ACTION_KEY] + + if "is_intervention" in info: + info[TeleopEvents.IS_INTERVENTION] = info["is_intervention"] + + transition[TransitionKey.INFO] = info + transition[TransitionKey.COMPLEMENTARY_DATA] = complementary_data + + return transition + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + return features + + @dataclass @ProcessorStepRegistry.register("gripper_penalty_processor") class GripperPenaltyProcessorStep(ProcessorStep): diff --git a/src/lerobot/rl/gym_manipulator.py b/src/lerobot/rl/gym_manipulator.py index 1c1cb752f..f5fcb7437 100644 --- a/src/lerobot/rl/gym_manipulator.py +++ b/src/lerobot/rl/gym_manipulator.py @@ -36,6 +36,7 @@ from lerobot.processor import ( DeviceProcessorStep, EnvTransition, GripperPenaltyProcessorStep, + GymHILAdapterProcessorStep, ImageCropResizeProcessorStep, InterventionActionProcessorStep, MapDeltaActionToRobotActionStep, @@ -379,6 +380,7 @@ def make_processors( ] env_pipeline_steps = [ + GymHILAdapterProcessorStep(), Numpy2TorchActionProcessorStep(), VanillaObservationProcessorStep(), AddBatchDimensionProcessorStep(), @@ -608,7 +610,14 @@ def control_loop( dataset = None if cfg.mode == "record": - action_features = teleop_device.action_features + if teleop_device: + action_features = teleop_device.action_features + else: + action_features = { + "dtype": "float32", + "shape": (4,), + "names": ["delta_x", "delta_y", "delta_z", "gripper"], + } features = { ACTION: action_features, REWARD: {"dtype": "float32", "shape": (1,), "names": None}, @@ -656,7 +665,7 @@ def control_loop( # Create a neutral action (no movement) neutral_action = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float32) if use_gripper: - neutral_action = torch.cat([neutral_action, torch.tensor([1.0])]) # Gripper stay + neutral_action = torch.cat([neutral_action, torch.tensor([0.0])]) # Gripper stay # Use the new step function transition = step_env_and_process_transition( @@ -725,6 +734,8 @@ def control_loop( precise_sleep(max(dt - (time.perf_counter() - step_start_time), 0.0)) if dataset is not None and cfg.dataset.push_to_hub: + logging.info("Finalizing dataset before pushing to hub") + dataset.finalize() logging.info("Pushing dataset to hub") dataset.push_to_hub() From 5865170d36442b907bb35f946e837eee18aafdf1 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Fri, 20 Feb 2026 17:01:46 +0100 Subject: [PATCH 14/21] chore(deps): bump ceil datasets (#2946) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e5431ada3..0ca1f0432 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,7 +59,7 @@ keywords = ["lerobot", "huggingface", "robotics", "machine learning", "artifici dependencies = [ # Hugging Face dependencies - "datasets>=4.0.0,<4.2.0", + "datasets>=4.0.0,<5.0.0", "diffusers>=0.27.2,<0.36.0", "huggingface-hub[hf-transfer,cli]>=0.34.2,<0.36.0", "accelerate>=1.10.0,<2.0.0", From e96339a3b49ac080e11664d2be4737987d861a57 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Mon, 23 Feb 2026 13:57:43 +0100 Subject: [PATCH 15/21] feat(dataset): add streaming video encoding + HW encoder support (#2974) * feat(dataset): init stream encoding * feat(dataset): use threads to fix frame pickle latency * refactor(dataset): remove HW encoded related changes * add lp (#2977) * feat(dataset): add Hw encoding + log drop frames (#2978) * chore(docs): add streaming video encoding guide * fix(dataset): style docs + testing * chore(docs): simplify sttreaming video encoding guide * chore(dataset): add commands + streaming encoding default false + print note if false + queue default is now 30 * chore(docs): add verification note advice * chore(dataset): adjusting defaults & docs for streaming encoding * docs(scripts): improve docstrings * test(dataset): polish streaming encoding tests * chore(dataset): move FYI log related to streaming * chore(dataset): add arg vcodec to suggestions * refactor(dataset): better handling for auto and available vcodec * chore(dataset): change log level * docs(dataset): add note related to training performance vcodec * docs(dataset): add more notes to streaming encoding --------- Co-authored-by: Caroline Pascal Co-authored-by: Pepijn --- docs/source/_toctree.yml | 2 + docs/source/act.mdx | 3 + docs/source/earthrover_mini_plus.mdx | 3 + docs/source/groot.mdx | 9 +- docs/source/hope_jr.mdx | 6 + docs/source/il_robots.mdx | 8 +- docs/source/lerobot-dataset-v3.mdx | 5 +- docs/source/reachy2.mdx | 6 + docs/source/smolvla.mdx | 3 + docs/source/streaming_video_encoding.mdx | 155 ++++ docs/source/unitree_g1.mdx | 10 +- src/lerobot/datasets/lerobot_dataset.py | 124 ++- src/lerobot/datasets/video_utils.py | 480 +++++++++++- src/lerobot/scripts/lerobot_record.py | 34 +- tests/datasets/test_datasets.py | 9 +- .../datasets/test_streaming_video_encoder.py | 730 ++++++++++++++++++ 16 files changed, 1532 insertions(+), 55 deletions(-) create mode 100644 docs/source/streaming_video_encoding.mdx create mode 100644 tests/datasets/test_streaming_video_encoder.py diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index d61aac9c1..1055975d7 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -29,6 +29,8 @@ title: Using the Dataset Tools - local: dataset_subtask title: Using Subtasks in the Dataset + - local: streaming_video_encoding + title: Streaming Video Encoding title: "Datasets" - sections: - local: act diff --git a/docs/source/act.mdx b/docs/source/act.mdx index e3294ca69..453bcbba8 100644 --- a/docs/source/act.mdx +++ b/docs/source/act.mdx @@ -88,5 +88,8 @@ lerobot-record \ --dataset.repo_id=${HF_USER}/eval_act_your_dataset \ --dataset.num_episodes=10 \ --dataset.single_task="Your task description" \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ --policy.path=${HF_USER}/act_policy ``` diff --git a/docs/source/earthrover_mini_plus.mdx b/docs/source/earthrover_mini_plus.mdx index dd9c2ad2b..cfc3a2eef 100644 --- a/docs/source/earthrover_mini_plus.mdx +++ b/docs/source/earthrover_mini_plus.mdx @@ -192,6 +192,9 @@ lerobot-record \ --dataset.num_episodes=2 \ --dataset.fps=10 \ --dataset.single_task="Navigate around obstacles" \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ --display_data=true ``` diff --git a/docs/source/groot.mdx b/docs/source/groot.mdx index 8bfc22996..0ef591466 100644 --- a/docs/source/groot.mdx +++ b/docs/source/groot.mdx @@ -120,9 +120,12 @@ lerobot-record \ --display_data=true \ --dataset.repo_id=/eval_groot-bimanual \ --dataset.num_episodes=10 \ - --dataset.single_task="Grab and handover the red cube to the other arm" - --policy.path=/groot-bimanual # your trained model - --dataset.episode_time_s=30 + --dataset.single_task="Grab and handover the red cube to the other arm" \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ + --policy.path=/groot-bimanual \ # your trained model + --dataset.episode_time_s=30 \ --dataset.reset_time_s=10 ``` diff --git a/docs/source/hope_jr.mdx b/docs/source/hope_jr.mdx index 026cd084a..8826d9758 100644 --- a/docs/source/hope_jr.mdx +++ b/docs/source/hope_jr.mdx @@ -230,6 +230,9 @@ lerobot-record \ --dataset.episode_time_s=5 \ --dataset.push_to_hub=true \ --dataset.private=true \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ --display_data=true ``` @@ -273,5 +276,8 @@ lerobot-record \ --dataset.repo_id=/eval_hopejr \ --dataset.single_task="Evaluate hopejr hand policy" \ --dataset.num_episodes=10 \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ --policy.path=outputs/train/hopejr_hand/checkpoints/last/pretrained_model ``` diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx index 84dc6f2f6..7fc770b0c 100644 --- a/docs/source/il_robots.mdx +++ b/docs/source/il_robots.mdx @@ -185,7 +185,10 @@ lerobot-record \ --display_data=true \ --dataset.repo_id=${HF_USER}/record-test \ --dataset.num_episodes=5 \ - --dataset.single_task="Grab the black cube" + --dataset.single_task="Grab the black cube" \ + --dataset.streaming_encoding=true \ + # --dataset.vcodec=auto \ + --dataset.encoder_threads=2 ``` @@ -515,6 +518,9 @@ lerobot-record \ --display_data=false \ --dataset.repo_id=${HF_USER}/eval_so100 \ --dataset.single_task="Put lego brick into the transparent box" \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ # <- Teleop optional if you want to teleoperate in between episodes \ # --teleop.type=so100_leader \ # --teleop.port=/dev/ttyACM0 \ diff --git a/docs/source/lerobot-dataset-v3.mdx b/docs/source/lerobot-dataset-v3.mdx index 3521914f2..235a355bd 100644 --- a/docs/source/lerobot-dataset-v3.mdx +++ b/docs/source/lerobot-dataset-v3.mdx @@ -41,7 +41,10 @@ lerobot-record \ --display_data=true \ --dataset.repo_id=${HF_USER}/record-test \ --dataset.num_episodes=5 \ - --dataset.single_task="Grab the black cube" + --dataset.single_task="Grab the black cube" \ + --dataset.streaming_encoding=true \ + # --dataset.vcodec=auto \ + --dataset.encoder_threads=2 ``` See the [recording guide](./il_robots#record-a-dataset) for more details. diff --git a/docs/source/reachy2.mdx b/docs/source/reachy2.mdx index 51b09acd2..1b868711a 100644 --- a/docs/source/reachy2.mdx +++ b/docs/source/reachy2.mdx @@ -159,6 +159,9 @@ lerobot-record \ --dataset.fps=15 \ --dataset.push_to_hub=true \ --dataset.private=true \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ --display_data=true ``` @@ -198,6 +201,9 @@ lerobot-record \ --dataset.fps=15 \ --dataset.push_to_hub=true \ --dataset.private=true \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ --display_data=true ``` diff --git a/docs/source/smolvla.mdx b/docs/source/smolvla.mdx index a56298b5e..bf8a0d2f0 100644 --- a/docs/source/smolvla.mdx +++ b/docs/source/smolvla.mdx @@ -106,6 +106,9 @@ lerobot-record \ --dataset.repo_id=${HF_USER}/eval_DATASET_NAME_test \ # <- This will be the dataset name on HF Hub --dataset.episode_time_s=50 \ --dataset.num_episodes=10 \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ + # --dataset.vcodec=auto \ # <- Teleop optional if you want to teleoperate in between episodes \ # --teleop.type=so100_leader \ # --teleop.port=/dev/ttyACM0 \ diff --git a/docs/source/streaming_video_encoding.mdx b/docs/source/streaming_video_encoding.mdx new file mode 100644 index 000000000..40004200e --- /dev/null +++ b/docs/source/streaming_video_encoding.mdx @@ -0,0 +1,155 @@ +# Streaming Video Encoding Guide + +## 1. Overview + +Streaming video encoding eliminates the traditional PNG round-trip during video dataset recording. Instead of: + +1. Capture frame -> write PNG to disk -> (at episode end) read PNG's -> encode to MP4 -> delete PNG's + +Frames can be encoded in real-time during capture: + +1. Capture frame -> queue to encoder thread -> encode to MP4 directly + +This makes `save_episode()` near-instant (the video is already encoded by the time the episode ends) and removes the blocking wait that previously occurred between episodes, especially with multiple cameras in long episodes. + +## 2. Tuning Parameters + +| Parameter | CLI Flag | Type | Default | Description | +| ----------------------- | --------------------------------- | ------------- | ------------- | ----------------------------------------------------------------- | +| `streaming_encoding` | `--dataset.streaming_encoding` | `bool` | `True` | Enable real-time encoding during capture | +| `vcodec` | `--dataset.vcodec` | `str` | `"libsvtav1"` | Video codec. `"auto"` detects best HW encoder | +| `encoder_threads` | `--dataset.encoder_threads` | `int \| None` | `None` (auto) | Threads per encoder instance. `None` will leave the vcoded decide | +| `encoder_queue_maxsize` | `--dataset.encoder_queue_maxsize` | `int` | `60` | Max buffered frames per camera (~2s at 30fps). Consumes RAM | + +## 3. Performance Considerations + +Streaming encoding means the CPU is encoding video **during** the capture loop, not after. This creates a CPU budget that must be shared between: + +- **Control loop** (reading cameras, control the robot, writing non-video data) +- **Encoder threads** (one pool per camera) +- **Rerun visualization** (if enabled) +- **OS and other processes** + +### Resolution & Number of Cameras Impact + +| Setup | Throughput (px/sec) | CPU Encoding Load | Notes | +| ------------------------- | ------------------- | ----------------- | ------------------------------ | +| 2camsx 640x480x3 @30fps | 55M | Low | Works on most systems | +| 2camsx 1280x720x3 @30fps | 165M | Moderate | Comfortable on modern systems | +| 2camsx 1920x1080x3 @30fps | 373M | High | Requires powerful high-end CPU | + +### `encoder_threads` Tuning + +This parameter controls how many threads each encoder instance uses internally: + +- **Higher values** (e.g., 4-5): Faster encoding, but uses more CPU cores per camera. Good for high-end systems with many cores. +- **Lower values** (e.g., 1-2): Less CPU per camera, freeing cores for capture and visualization. Good for low-res images and capable CPUs. +- **`None` (default)**: Lets the codec decide. Information available in the codec logs. + +### Backpressure and Frame Dropping + +Each camera has a bounded queue (`encoder_queue_maxsize`, default 60 frames). When the encoder can't keep up: + +1. The queue fills up (consuming RAM) +2. New frames are **dropped** (not blocked) — the capture loop continues uninterrupted +3. A warning is logged: `"Encoder queue full for {camera}, dropped N frame(s)"` +4. At episode end, total dropped frames per camera are reported + +### Symptoms of Encoder Falling Behind + +- **System feels laggy and freezes**: all CPUs are at 100% +- **Dropped frame warnings** in the log or lower frames/FPS than expected in the recorded dataset +- **Choppy robot movement**: If CPU is severely overloaded, even the capture loop may be affected +- **Accumulated rerun lag**: Visualization falls behind real-time + +## 4. Hardware-Accelerated Encoding + +### When to Use + +Use HW encoding when: + +- CPU is the bottleneck (dropped frames, choppy robot, rerun lag) +- You have compatible hardware (GPU or dedicated encoder) +- You're recording at high throughput (high resolution or with many cameras) + +### Choosing a Codec + +| Codec | CPU Usage | File Size | Quality | Notes | +| --------------------- | --------- | -------------- | ------- | ---------------------------------------------------------------- | +| `libsvtav1` (default) | High | Smallest | Best | Default. Best compression but most CPU-intensive | +| `h264` | Medium | ~30-50% larger | Good | Software H.264. Lower CPU | +| HW encoders | Very Low | Largest | Good | Offloads to dedicated hardware. Best for CPU-constrained systems | + +### Available HW Encoders + +| Encoder | Platform | Hardware | CLI Value | +| ------------------- | ------------- | ------------------------------------------------------------------------------------------------ | ------------------------------------ | +| `h264_videotoolbox` | macOS | Apple Silicon / Intel | `--dataset.vcodec=h264_videotoolbox` | +| `hevc_videotoolbox` | macOS | Apple Silicon / Intel | `--dataset.vcodec=hevc_videotoolbox` | +| `h264_nvenc` | Linux/Windows | NVIDIA GPU | `--dataset.vcodec=h264_nvenc` | +| `hevc_nvenc` | Linux/Windows | NVIDIA GPU | `--dataset.vcodec=hevc_nvenc` | +| `h264_vaapi` | Linux | Intel/AMD GPU | `--dataset.vcodec=h264_vaapi` | +| `h264_qsv` | Linux/Windows | Intel Quick Sync | `--dataset.vcodec=h264_qsv` | +| `auto` | Any | Probes the system for available HW encoders. Falls back to `libsvtav1` if no HW encoder is found | `--dataset.vcodec=auto` | + +> [!NOTE] +> In order to use the HW accelerated encoders you might need to upgrade your GPU drivers. + +> [!NOTE] +> `libsvtav1` is the default because it provides the best training performance; other vcodecs can reduce CPU usage and be faster, but they typically produce larger files and may affect training time. + +## 5. Troubleshooting + +| Symptom | Likely Cause | Fix | +| ------------------------------------------------------------------ | -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| System freezes or choppy robot movement or Rerun visualization lag | CPU starved (100% load usage) | Close other apps, reduce encoding throughput, lower `encoder_threads`, use `h264`, use `display_data=False`. If the CPU continues to be at 100% then it might be insufficient for your setup, consider `--dataset.streaming_encoding=false` or HW encoding (`--dataset.vcodec=auto`) | +| "Encoder queue full" warnings or dropped frames in dataset | Encoder can't keep up (Queue overflow) | If CPU is not at 100%: Increase `encoder_threads`, increase `encoder_queue_maxsize` or use HW encoding (`--dataset.vcodec=auto`). | +| High RAM usage | Queue filling faster than encoding | `encoder_threads` too low or CPU insufficient. Reduce `encoder_queue_maxsize` or use HW encoding | +| Large video files | Using HW encoder or H.264 | Expected trade-off. Switch to `libsvtav1` if CPU allows | +| `save_episode()` still slow | `streaming_encoding` is `False` | Set `--dataset.streaming_encoding=true` | +| Encoder thread crash | Codec not available or invalid settings | Check `vcodec` is installed, try `--dataset.vcodec=auto` | +| Recorded dataset is missing frames | CPU/GPU starvation or occasional load spikes | If ~5% of frames are missing, your system is likely overloaded — follow the recommendations above. If fewer frames are missing (~2%), they are probably due to occasional transient load spikes (often at startup) and can be considered expected. | + +## 6. Recommended Configurations + +These estimates are conservative; we recommend testing them on your setup—start with a low load and increase it gradually. + +### High-End Systems: modern 12+ cores (24+ threads) + +A throughput between ~250-500M px/sec should be comfortable in CPU. For even better results try HW encoding if available. + +```bash +# 3camsx 1280x720x3 @30fps: Defaults work well. Optionally increase encoder parallelism. +# 2camsx 1920x1080x3 @30fps: Defaults work well. Optionally increase encoder parallelism. +lerobot-record --dataset.encoder_threads=5 ... + +# 3camsx 1920x1080x3 @30fps: Might require some tuning. +``` + +### Mid-Range Systems: modern 8+ cores (16+ threads) or Apple Silicon + +A throughput between ~80-300M px/sec should be possible in CPU. + +```bash +# 3camsx 640x480x3 @30fps: Defaults work well. Optionally decrease encoder parallelism. +# 2camsx 1280x720x3 @30fps: Defaults work well. Optionally decrease encoder parallelism. +lerobot-record --dataset.encoder_threads=2 ... + +# 2camsx 1920x1080x3 @30fps: Might require some tuning. +``` + +### Low-Resource Systems: modern 4+ cores (8+ threads) or Raspberry Pi 5 + +On very constrained systems, streaming encoding may compete too heavily with the capture loop. Disabling it falls back to the PNG-based approach where encoding happens between episodes (blocking, but doesn't interfere with capture). Alternatively, record at a lower throughput to reduce both capture and encoding load. Consider also changing codec to `h264` and using batch encoding. + +```bash +# 2camsx 640x480x3 @30fps: Requires some tuning. + +# Use H.264, disable streaming, consider batching encoding +lerobot-record --dataset.vcodec=h264 --dataset.streaming_encoding=false ... +``` + +## 7. Closing note + +Performance ultimately depends on your exact setup — frames-per-second, resolution, CPU cores and load, available memory, episode length, and the encoder you choose. Always test with your target workload, be mindful about your CPU & system capabilities and tune `encoder_threads`, `encoder_queue_maxsize`, and +`vcodec` reasonably. That said, a common practical configuration (for many applications) is three cameras at 640×480x3 @30fps; this usually runs fine with the default streaming video encoding settings in modern systems. Always verify your recorded dataset is healthy by comparing the video duration to the CLI episode duration and confirming the row count equals FPS × CLI duration. diff --git a/docs/source/unitree_g1.mdx b/docs/source/unitree_g1.mdx index 4c5d28924..76e972dca 100644 --- a/docs/source/unitree_g1.mdx +++ b/docs/source/unitree_g1.mdx @@ -229,7 +229,10 @@ lerobot-record \ --dataset.num_episodes=2 \ --dataset.episode_time_s=5 \ --dataset.reset_time_s=5 \ - --dataset.push_to_hub=true + --dataset.push_to_hub=true \ + --dataset.streaming_encoding=true \ + # --dataset.vcodec=auto \ + --dataset.encoder_threads=2 ``` Example simulation dataset: [nepyope/teleop_test_sim](https://huggingface.co/datasets/nepyope/teleop_test_sim) @@ -279,7 +282,10 @@ lerobot-record \ --dataset.num_episodes=2 \ --dataset.episode_time_s=5 \ --dataset.reset_time_s=5 \ - --dataset.push_to_hub=true + --dataset.push_to_hub=true \ + --dataset.streaming_encoding=true \ + # --dataset.vcodec=auto \ + --dataset.encoder_threads=2 ``` **Note**: Update `server_address` to match your robot's camera server IP. diff --git a/src/lerobot/datasets/lerobot_dataset.py b/src/lerobot/datasets/lerobot_dataset.py index 360ed8d30..65b475e26 100644 --- a/src/lerobot/datasets/lerobot_dataset.py +++ b/src/lerobot/datasets/lerobot_dataset.py @@ -68,6 +68,7 @@ from lerobot.datasets.utils import ( write_tasks, ) from lerobot.datasets.video_utils import ( + StreamingVideoEncoder, VideoFrame, concatenate_video_files, decode_video_frames, @@ -75,11 +76,11 @@ from lerobot.datasets.video_utils import ( get_safe_default_codec, get_video_duration_in_s, get_video_info, + resolve_vcodec, ) from lerobot.utils.constants import HF_LEROBOT_HOME CODEBASE_VERSION = "v3.0" -VALID_VIDEO_CODECS = {"h264", "hevc", "libsvtav1"} class LeRobotDatasetMetadata: @@ -545,12 +546,19 @@ class LeRobotDatasetMetadata: def _encode_video_worker( - video_key: str, episode_index: int, root: Path, fps: int, vcodec: str = "libsvtav1" + video_key: str, + episode_index: int, + root: Path, + fps: int, + vcodec: str = "libsvtav1", + encoder_threads: int | None = None, ) -> Path: temp_path = Path(tempfile.mkdtemp(dir=root)) / f"{video_key}_{episode_index:03d}.mp4" fpath = DEFAULT_IMAGE_PATH.format(image_key=video_key, episode_index=episode_index, frame_index=0) img_dir = (root / fpath).parent - encode_video_frames(img_dir, temp_path, fps, vcodec=vcodec, overwrite=True) + encode_video_frames( + img_dir, temp_path, fps, vcodec=vcodec, overwrite=True, encoder_threads=encoder_threads + ) shutil.rmtree(img_dir) return temp_path @@ -570,6 +578,9 @@ class LeRobotDataset(torch.utils.data.Dataset): video_backend: str | None = None, batch_encoding_size: int = 1, vcodec: str = "libsvtav1", + streaming_encoding: bool = False, + encoder_queue_maxsize: int = 30, + encoder_threads: int | None = None, ): """ 2 modes are available for instantiating this class, depending on 2 different use cases: @@ -683,12 +694,17 @@ class LeRobotDataset(torch.utils.data.Dataset): batch_encoding_size (int, optional): Number of episodes to accumulate before batch encoding videos. Set to 1 for immediate encoding (default), or higher for batched encoding. Defaults to 1. vcodec (str, optional): Video codec for encoding videos during recording. Options: 'h264', 'hevc', - 'libsvtav1'. Defaults to 'libsvtav1'. Use 'h264' for faster encoding on systems where AV1 - encoding is CPU-heavy. + 'libsvtav1', 'auto', or hardware-specific codecs like 'h264_videotoolbox', 'h264_nvenc'. + Defaults to 'libsvtav1'. Use 'auto' to auto-detect the best available hardware encoder. + streaming_encoding (bool, optional): If True, encode video frames in real-time during capture + instead of writing PNG images first. This makes save_episode() near-instant. Defaults to False. + encoder_queue_maxsize (int, optional): Maximum number of frames to buffer per camera when using + streaming encoding. Defaults to 30 (~1s at 30fps). + encoder_threads (int | None, optional): Number of threads per encoder instance. None lets the + codec auto-detect (default). Lower values reduce CPU usage per encoder. Maps to 'lp' (via svtav1-params) for + libsvtav1 and 'threads' for h264/hevc. """ super().__init__() - if vcodec not in VALID_VIDEO_CODECS: - raise ValueError(f"Invalid vcodec '{vcodec}'. Must be one of: {sorted(VALID_VIDEO_CODECS)}") self.repo_id = repo_id self.root = Path(root) if root else HF_LEROBOT_HOME / repo_id self.image_transforms = image_transforms @@ -700,7 +716,8 @@ class LeRobotDataset(torch.utils.data.Dataset): self.delta_indices = None self.batch_encoding_size = batch_encoding_size self.episodes_since_last_encoding = 0 - self.vcodec = vcodec + self.vcodec = resolve_vcodec(vcodec) + self._encoder_threads = encoder_threads # Unused attributes self.image_writer = None @@ -708,6 +725,7 @@ class LeRobotDataset(torch.utils.data.Dataset): self.writer = None self.latest_episode = None self._current_file_start_frame = None # Track the starting frame index of the current parquet file + self._streaming_encoder = None self.root.mkdir(exist_ok=True, parents=True) @@ -749,6 +767,19 @@ class LeRobotDataset(torch.utils.data.Dataset): check_delta_timestamps(self.delta_timestamps, self.fps, self.tolerance_s) self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps) + # Initialize streaming encoder for resumed recording + if streaming_encoding and len(self.meta.video_keys) > 0: + self._streaming_encoder = StreamingVideoEncoder( + fps=self.meta.fps, + vcodec=self.vcodec, + pix_fmt="yuv420p", + g=2, + crf=30, + preset=None, + queue_maxsize=encoder_queue_maxsize, + encoder_threads=encoder_threads, + ) + def _close_writer(self) -> None: """Close and cleanup the parquet writer if it exists.""" writer = getattr(self, "writer", None) @@ -1104,6 +1135,8 @@ class LeRobotDataset(torch.utils.data.Dataset): """ self._close_writer() self.meta._close_writer() + if self._streaming_encoder is not None: + self._streaming_encoder.close() def create_episode_buffer(self, episode_index: int | None = None) -> dict: current_ep_idx = self.meta.total_episodes if episode_index is None else episode_index @@ -1158,6 +1191,13 @@ class LeRobotDataset(torch.utils.data.Dataset): self.episode_buffer["timestamp"].append(timestamp) self.episode_buffer["task"].append(frame.pop("task")) # Remove task from frame after processing + # Start streaming encoder on first frame of episode (once, before iterating keys) + if frame_index == 0 and self._streaming_encoder is not None: + self._streaming_encoder.start_episode( + video_keys=list(self.meta.video_keys), + temp_dir=self.root, + ) + # Add frame features to episode_buffer for key in frame: if key not in self.features: @@ -1165,7 +1205,10 @@ class LeRobotDataset(torch.utils.data.Dataset): f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'." ) - if self.features[key]["dtype"] in ["image", "video"]: + if self.features[key]["dtype"] == "video" and self._streaming_encoder is not None: + self._streaming_encoder.feed_frame(key, frame[key]) + self.episode_buffer[key].append(None) # Placeholder (video keys are skipped in parquet) + elif self.features[key]["dtype"] in ["image", "video"]: img_path = self._get_image_file_path( episode_index=self.episode_buffer["episode_index"], image_key=key, frame_index=frame_index ) @@ -1226,13 +1269,38 @@ class LeRobotDataset(torch.utils.data.Dataset): # Wait for image writer to end, so that episode stats over images can be computed self._wait_image_writer() - ep_stats = compute_episode_stats(episode_buffer, self.features) - ep_metadata = self._save_episode_data(episode_buffer) has_video_keys = len(self.meta.video_keys) > 0 + use_streaming = self._streaming_encoder is not None and has_video_keys use_batched_encoding = self.batch_encoding_size > 1 - if has_video_keys and not use_batched_encoding: + if use_streaming: + # Compute stats for non-video features only (video stats come from encoder) + non_video_buffer = { + k: v + for k, v in episode_buffer.items() + if self.features.get(k, {}).get("dtype") not in ("video",) + } + non_video_features = {k: v for k, v in self.features.items() if v["dtype"] != "video"} + ep_stats = compute_episode_stats(non_video_buffer, non_video_features) + else: + ep_stats = compute_episode_stats(episode_buffer, self.features) + + ep_metadata = self._save_episode_data(episode_buffer) + + if use_streaming: + # Finish streaming encoding and collect results + streaming_results = self._streaming_encoder.finish_episode() + for video_key in self.meta.video_keys: + temp_path, video_stats = streaming_results[video_key] + if video_stats is not None: + # Format stats same as compute_episode_stats: normalize to [0,1], reshape to (C,1,1) + ep_stats[video_key] = { + k: v if k == "count" else np.squeeze(v.reshape(1, -1, 1, 1) / 255.0, axis=0) + for k, v in video_stats.items() + } + ep_metadata.update(self._save_episode_video(video_key, episode_index, temp_path=temp_path)) + elif has_video_keys and not use_batched_encoding: num_cameras = len(self.meta.video_keys) if parallel_encoding and num_cameras > 1: # TODO(Steven): Ideally we would like to control the number of threads per encoding such that: @@ -1246,6 +1314,7 @@ class LeRobotDataset(torch.utils.data.Dataset): self.root, self.fps, self.vcodec, + self._encoder_threads, ): video_key for video_key in self.meta.video_keys } @@ -1514,6 +1583,10 @@ class LeRobotDataset(torch.utils.data.Dataset): return metadata def clear_episode_buffer(self, delete_images: bool = True) -> None: + # Cancel streaming encoder if active + if self._streaming_encoder is not None: + self._streaming_encoder.cancel_episode() + # Clean up image files for the current episode buffer if delete_images: # Wait for the async image writer to finish @@ -1561,7 +1634,9 @@ class LeRobotDataset(torch.utils.data.Dataset): Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding, since video encoding with ffmpeg is already using multithreading. """ - return _encode_video_worker(video_key, episode_index, self.root, self.fps, self.vcodec) + return _encode_video_worker( + video_key, episode_index, self.root, self.fps, self.vcodec, self._encoder_threads + ) @classmethod def create( @@ -1578,10 +1653,12 @@ class LeRobotDataset(torch.utils.data.Dataset): video_backend: str | None = None, batch_encoding_size: int = 1, vcodec: str = "libsvtav1", + streaming_encoding: bool = False, + encoder_queue_maxsize: int = 30, + encoder_threads: int | None = None, ) -> "LeRobotDataset": """Create a LeRobot Dataset from scratch in order to record data.""" - if vcodec not in VALID_VIDEO_CODECS: - raise ValueError(f"Invalid vcodec '{vcodec}'. Must be one of: {sorted(VALID_VIDEO_CODECS)}") + vcodec = resolve_vcodec(vcodec) obj = cls.__new__(cls) obj.meta = LeRobotDatasetMetadata.create( repo_id=repo_id, @@ -1599,6 +1676,7 @@ class LeRobotDataset(torch.utils.data.Dataset): obj.batch_encoding_size = batch_encoding_size obj.episodes_since_last_encoding = 0 obj.vcodec = vcodec + obj._encoder_threads = encoder_threads if image_writer_processes or image_writer_threads: obj.start_image_writer(image_writer_processes, image_writer_threads) @@ -1620,6 +1698,22 @@ class LeRobotDataset(torch.utils.data.Dataset): obj._lazy_loading = False obj._recorded_frames = 0 obj._writer_closed_for_reading = False + + # Initialize streaming encoder + if streaming_encoding and len(obj.meta.video_keys) > 0: + obj._streaming_encoder = StreamingVideoEncoder( + fps=fps, + vcodec=vcodec, + pix_fmt="yuv420p", + g=2, + crf=30, + preset=None, + queue_maxsize=encoder_queue_maxsize, + encoder_threads=encoder_threads, + ) + else: + obj._streaming_encoder = None + return obj diff --git a/src/lerobot/datasets/video_utils.py b/src/lerobot/datasets/video_utils.py index 84ce13772..acc24a9e0 100644 --- a/src/lerobot/datasets/video_utils.py +++ b/src/lerobot/datasets/video_utils.py @@ -13,25 +13,106 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import glob import importlib import logging +import queue import shutil import tempfile +import threading import warnings from dataclasses import dataclass, field +from fractions import Fraction from pathlib import Path from threading import Lock from typing import Any, ClassVar import av import fsspec +import numpy as np import pyarrow as pa import torch import torchvision from datasets.features.features import register_feature from PIL import Image +# List of hardware encoders to probe for auto-selection. Availability depends on the platform and FFmpeg build. +# Determines the order of preference for auto-selection when vcodec="auto" is used. +HW_ENCODERS = [ + "h264_videotoolbox", # macOS + "hevc_videotoolbox", # macOS + "h264_nvenc", # NVIDIA GPU + "hevc_nvenc", # NVIDIA GPU + "h264_vaapi", # Linux Intel/AMD + "h264_qsv", # Intel Quick Sync +] + +VALID_VIDEO_CODECS = {"h264", "hevc", "libsvtav1", "auto"} | set(HW_ENCODERS) + + +def _get_codec_options( + vcodec: str, + g: int | None = 2, + crf: int | None = 30, + preset: int | None = None, +) -> dict: + """Build codec-specific options dict for video encoding.""" + options = {} + + # GOP size (keyframe interval) - supported by VideoToolbox and software encoders + if g is not None and (vcodec in ("h264_videotoolbox", "hevc_videotoolbox") or vcodec not in HW_ENCODERS): + options["g"] = str(g) + + # Quality control (codec-specific parameter names) + if crf is not None: + if vcodec in ("h264", "hevc", "libsvtav1"): + options["crf"] = str(crf) + elif vcodec in ("h264_videotoolbox", "hevc_videotoolbox"): + quality = max(1, min(100, int(100 - crf * 2))) + options["q:v"] = str(quality) + elif vcodec in ("h264_nvenc", "hevc_nvenc"): + options["rc"] = "constqp" + options["qp"] = str(crf) + elif vcodec in ("h264_vaapi",): + options["qp"] = str(crf) + elif vcodec in ("h264_qsv",): + options["global_quality"] = str(crf) + + # Preset (only for libsvtav1) + if vcodec == "libsvtav1": + options["preset"] = str(preset) if preset is not None else "12" + + return options + + +def detect_available_hw_encoders() -> list[str]: + """Probe PyAV/FFmpeg for available hardware video encoders.""" + available = [] + for codec_name in HW_ENCODERS: + try: + av.codec.Codec(codec_name, "w") + available.append(codec_name) + except Exception: # nosec B110 + pass # nosec B110 + return available + + +def resolve_vcodec(vcodec: str) -> str: + """Validate vcodec and resolve 'auto' to best available HW encoder, fallback to libsvtav1.""" + if vcodec not in VALID_VIDEO_CODECS: + raise ValueError(f"Invalid vcodec '{vcodec}'. Must be one of: {sorted(VALID_VIDEO_CODECS)}") + if vcodec != "auto": + logging.info(f"Using video codec: {vcodec}") + return vcodec + available = detect_available_hw_encoders() + for encoder in HW_ENCODERS: + if encoder in available: + logging.info(f"Auto-selected video codec: {encoder}") + return encoder + logging.info("No hardware encoder available, falling back to software encoder 'libsvtav1'") + return "libsvtav1" + def get_safe_default_codec(): if importlib.util.find_spec("torchcodec"): @@ -309,14 +390,13 @@ def encode_video_frames( g: int | None = 2, crf: int | None = 30, fast_decode: int = 0, - log_level: int | None = av.logging.ERROR, + log_level: int | None = av.logging.WARNING, overwrite: bool = False, preset: int | None = None, + encoder_threads: int | None = None, ) -> None: """More info on ffmpeg arguments tuning on `benchmark/video/README.md`""" - # Check encoder availability - if vcodec not in ["h264", "hevc", "libsvtav1"]: - raise ValueError(f"Unsupported video codec: {vcodec}. Supported codecs are: h264, hevc, libsvtav1.") + vcodec = resolve_vcodec(vcodec) video_path = Path(video_path) imgs_dir = Path(imgs_dir) @@ -347,21 +427,22 @@ def encode_video_frames( width, height = dummy_image.size # Define video codec options - video_options = {} - - if g is not None: - video_options["g"] = str(g) - - if crf is not None: - video_options["crf"] = str(crf) + video_options = _get_codec_options(vcodec, g, crf, preset) if fast_decode: key = "svtav1-params" if vcodec == "libsvtav1" else "tune" value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode" video_options[key] = value - if vcodec == "libsvtav1": - video_options["preset"] = str(preset) if preset is not None else "12" + if encoder_threads is not None: + if vcodec == "libsvtav1": + lp_param = f"lp={encoder_threads}" + if "svtav1-params" in video_options: + video_options["svtav1-params"] += f":{lp_param}" + else: + video_options["svtav1-params"] = lp_param + else: + video_options["threads"] = str(encoder_threads) # Set logging level if log_level is not None: @@ -480,6 +561,348 @@ def concatenate_video_files( Path(tmp_concatenate_path).unlink() +class _CameraEncoderThread(threading.Thread): + """A thread that encodes video frames streamed via a queue into an MP4 file. + + One instance is created per camera per episode. Frames are received as numpy arrays + from the main thread, encoded in real-time using PyAV (which releases the GIL during + encoding), and written to disk. Stats are computed incrementally using + RunningQuantileStats and returned via result_queue. + """ + + def __init__( + self, + video_path: Path, + fps: int, + vcodec: str, + pix_fmt: str, + g: int | None, + crf: int | None, + preset: int | None, + frame_queue: queue.Queue, + result_queue: queue.Queue, + stop_event: threading.Event, + encoder_threads: int | None = None, + ): + super().__init__(daemon=True) + self.video_path = video_path + self.fps = fps + self.vcodec = vcodec + self.pix_fmt = pix_fmt + self.g = g + self.crf = crf + self.preset = preset + self.frame_queue = frame_queue + self.result_queue = result_queue + self.stop_event = stop_event + self.encoder_threads = encoder_threads + + def run(self) -> None: + from lerobot.datasets.compute_stats import RunningQuantileStats, auto_downsample_height_width + + container = None + output_stream = None + stats_tracker = RunningQuantileStats() + frame_count = 0 + + try: + logging.getLogger("libav").setLevel(av.logging.WARNING) + + while True: + try: + frame_data = self.frame_queue.get(timeout=1) + except queue.Empty: + if self.stop_event.is_set(): + break + continue + + if frame_data is None: + # Sentinel: flush and close + break + + # Ensure HWC uint8 numpy array + if isinstance(frame_data, np.ndarray): + if frame_data.ndim == 3 and frame_data.shape[0] == 3: + # CHW -> HWC + frame_data = frame_data.transpose(1, 2, 0) + if frame_data.dtype != np.uint8: + frame_data = (frame_data * 255).astype(np.uint8) + + # Open container on first frame (to get width/height) + if container is None: + height, width = frame_data.shape[:2] + video_options = _get_codec_options(self.vcodec, self.g, self.crf, self.preset) + if self.encoder_threads is not None: + if self.vcodec == "libsvtav1": + lp_param = f"lp={self.encoder_threads}" + if "svtav1-params" in video_options: + video_options["svtav1-params"] += f":{lp_param}" + else: + video_options["svtav1-params"] = lp_param + else: + video_options["threads"] = str(self.encoder_threads) + Path(self.video_path).parent.mkdir(parents=True, exist_ok=True) + container = av.open(str(self.video_path), "w") + output_stream = container.add_stream(self.vcodec, self.fps, options=video_options) + output_stream.pix_fmt = self.pix_fmt + output_stream.width = width + output_stream.height = height + output_stream.time_base = Fraction(1, self.fps) + + # Encode frame with explicit timestamps + pil_img = Image.fromarray(frame_data) + video_frame = av.VideoFrame.from_image(pil_img) + video_frame.pts = frame_count + video_frame.time_base = Fraction(1, self.fps) + packet = output_stream.encode(video_frame) + if packet: + container.mux(packet) + + # Update stats with downsampled frame (per-channel stats like compute_episode_stats) + img_chw = frame_data.transpose(2, 0, 1) # HWC -> CHW + img_downsampled = auto_downsample_height_width(img_chw) + # Reshape CHW to (H*W, C) for per-channel stats + channels = img_downsampled.shape[0] + img_for_stats = img_downsampled.transpose(1, 2, 0).reshape(-1, channels) + stats_tracker.update(img_for_stats) + + frame_count += 1 + + # Flush encoder + if output_stream is not None: + packet = output_stream.encode() + if packet: + container.mux(packet) + + if container is not None: + container.close() + + av.logging.restore_default_callback() + + # Get stats and put on result queue + if frame_count >= 2: + stats = stats_tracker.get_statistics() + self.result_queue.put(("ok", stats)) + else: + self.result_queue.put(("ok", None)) + + except Exception as e: + logging.error(f"Encoder thread error: {e}") + if container is not None: + with contextlib.suppress(Exception): + container.close() + self.result_queue.put(("error", str(e))) + + +class StreamingVideoEncoder: + """Manages per-camera encoder threads for real-time video encoding during recording. + + Instead of writing frames as PNG images and then encoding to MP4 at episode end, + this class streams frames directly to encoder threads, eliminating the + PNG round-trip and making save_episode() near-instant. + + Uses threading instead of multiprocessing to avoid the overhead of pickling large + numpy arrays through multiprocessing.Queue. PyAV's encode() releases the GIL, + so encoding runs in parallel with the main recording loop. + """ + + def __init__( + self, + fps: int, + vcodec: str = "libsvtav1", + pix_fmt: str = "yuv420p", + g: int | None = 2, + crf: int | None = 30, + preset: int | None = None, + queue_maxsize: int = 30, + encoder_threads: int | None = None, + ): + self.fps = fps + self.vcodec = resolve_vcodec(vcodec) + self.pix_fmt = pix_fmt + self.g = g + self.crf = crf + self.preset = preset + self.queue_maxsize = queue_maxsize + self.encoder_threads = encoder_threads + + self._frame_queues: dict[str, queue.Queue] = {} + self._result_queues: dict[str, queue.Queue] = {} + self._threads: dict[str, _CameraEncoderThread] = {} + self._stop_events: dict[str, threading.Event] = {} + self._video_paths: dict[str, Path] = {} + self._dropped_frames: dict[str, int] = {} + self._episode_active = False + + def start_episode(self, video_keys: list[str], temp_dir: Path) -> None: + """Start encoder threads for a new episode. + + Args: + video_keys: List of video feature keys (e.g. ["observation.images.laptop"]) + temp_dir: Base directory for temporary MP4 files + """ + if self._episode_active: + self.cancel_episode() + + self._dropped_frames.clear() + + for video_key in video_keys: + frame_queue: queue.Queue = queue.Queue(maxsize=self.queue_maxsize) + result_queue: queue.Queue = queue.Queue(maxsize=1) + stop_event = threading.Event() + + temp_video_dir = Path(tempfile.mkdtemp(dir=temp_dir)) + video_path = temp_video_dir / f"{video_key.replace('/', '_')}_streaming.mp4" + + encoder_thread = _CameraEncoderThread( + video_path=video_path, + fps=self.fps, + vcodec=self.vcodec, + pix_fmt=self.pix_fmt, + g=self.g, + crf=self.crf, + preset=self.preset, + frame_queue=frame_queue, + result_queue=result_queue, + stop_event=stop_event, + encoder_threads=self.encoder_threads, + ) + encoder_thread.start() + + self._frame_queues[video_key] = frame_queue + self._result_queues[video_key] = result_queue + self._threads[video_key] = encoder_thread + self._stop_events[video_key] = stop_event + self._video_paths[video_key] = video_path + + self._episode_active = True + + def feed_frame(self, video_key: str, image: np.ndarray) -> None: + """Feed a frame to the encoder for a specific camera. + + A copy of the image is made before enqueueing to prevent race conditions + with camera drivers that may reuse buffers. If the encoder queue is full + (encoder can't keep up), the frame is dropped with a warning instead of + crashing the recording session. + + Args: + video_key: The video feature key + image: numpy array in (H,W,C) or (C,H,W) format, uint8 or float + + Raises: + RuntimeError: If the encoder thread has crashed + """ + if not self._episode_active: + raise RuntimeError("No active episode. Call start_episode() first.") + + thread = self._threads[video_key] + if not thread.is_alive(): + # Check for error + try: + status, msg = self._result_queues[video_key].get_nowait() + if status == "error": + raise RuntimeError(f"Encoder thread for {video_key} crashed: {msg}") + except queue.Empty: + pass + raise RuntimeError(f"Encoder thread for {video_key} is not alive") + + try: + self._frame_queues[video_key].put(image.copy(), timeout=0.1) + except queue.Full: + self._dropped_frames[video_key] = self._dropped_frames.get(video_key, 0) + 1 + count = self._dropped_frames[video_key] + # Log periodically to avoid spam (1st, then every 10th) + if count == 1 or count % 10 == 0: + logging.warning( + f"Encoder queue full for {video_key}, dropped {count} frame(s). " + f"Consider using vcodec='auto' for hardware encoding or increasing encoder_queue_maxsize." + ) + + def finish_episode(self) -> dict[str, tuple[Path, dict | None]]: + """Finish encoding the current episode. + + Sends sentinel values, waits for encoder threads to complete, + and collects results. + + Returns: + Dict mapping video_key to (mp4_path, stats_dict_or_None) + """ + if not self._episode_active: + raise RuntimeError("No active episode to finish.") + + results = {} + + # Report dropped frames + for video_key, count in self._dropped_frames.items(): + if count > 0: + logging.warning(f"Episode finished with {count} dropped frame(s) for {video_key}.") + + # Send sentinel to all queues + for video_key in self._frame_queues: + self._frame_queues[video_key].put(None) + + # Wait for all threads and collect results + for video_key in self._threads: + self._threads[video_key].join(timeout=120) + if self._threads[video_key].is_alive(): + logging.error(f"Encoder thread for {video_key} did not finish in time") + self._stop_events[video_key].set() + self._threads[video_key].join(timeout=5) + results[video_key] = (self._video_paths[video_key], None) + continue + + try: + status, data = self._result_queues[video_key].get(timeout=5) + if status == "error": + raise RuntimeError(f"Encoder thread for {video_key} failed: {data}") + results[video_key] = (self._video_paths[video_key], data) + except queue.Empty: + logging.error(f"No result from encoder thread for {video_key}") + results[video_key] = (self._video_paths[video_key], None) + + self._cleanup() + self._episode_active = False + return results + + def cancel_episode(self) -> None: + """Cancel the current episode, stopping encoder threads and cleaning up.""" + if not self._episode_active: + return + + # Signal all threads to stop + for video_key in self._stop_events: + self._stop_events[video_key].set() + + # Wait for threads to finish + for video_key in self._threads: + self._threads[video_key].join(timeout=5) + + # Clean up temp MP4 files + video_path = self._video_paths.get(video_key) + if video_path is not None and video_path.exists(): + shutil.rmtree(str(video_path.parent), ignore_errors=True) + + self._cleanup() + self._episode_active = False + + def close(self) -> None: + """Close the encoder, canceling any in-progress episode.""" + if self._episode_active: + self.cancel_episode() + + def _cleanup(self) -> None: + """Clean up queues and thread tracking dicts.""" + for q in self._frame_queues.values(): + with contextlib.suppress(Exception): + while not q.empty(): + q.get_nowait() + self._frame_queues.clear() + self._result_queues.clear() + self._threads.clear() + self._stop_events.clear() + self._video_paths.clear() + + @dataclass class VideoFrame: # TODO(rcadene, lhoestq): move to Hugging Face `datasets` repo @@ -514,7 +937,7 @@ with warnings.catch_warnings(): def get_audio_info(video_path: Path | str) -> dict: # Set logging level - logging.getLogger("libav").setLevel(av.logging.ERROR) + logging.getLogger("libav").setLevel(av.logging.WARNING) # Getting audio stream information audio_info = {} @@ -546,7 +969,7 @@ def get_audio_info(video_path: Path | str) -> dict: def get_video_info(video_path: Path | str) -> dict: # Set logging level - logging.getLogger("libav").setLevel(av.logging.ERROR) + logging.getLogger("libav").setLevel(av.logging.WARNING) # Getting video stream information video_info = {} @@ -632,8 +1055,15 @@ class VideoEncodingManager: return self def __exit__(self, exc_type, exc_val, exc_tb): - # Handle any remaining episodes that haven't been batch encoded - if self.dataset.episodes_since_last_encoding > 0: + streaming_encoder = getattr(self.dataset, "_streaming_encoder", None) + + if streaming_encoder is not None: + # Handle streaming encoder cleanup + if exc_type is not None: + streaming_encoder.cancel_episode() + streaming_encoder.close() + elif self.dataset.episodes_since_last_encoding > 0: + # Handle any remaining episodes that haven't been batch encoded if exc_type is not None: logging.info("Exception occurred. Encoding remaining episodes before exit...") else: @@ -650,8 +1080,8 @@ class VideoEncodingManager: # Finalize the dataset to properly close all writers self.dataset.finalize() - # Clean up episode images if recording was interrupted - if exc_type is not None: + # Clean up episode images if recording was interrupted (only for non-streaming mode) + if exc_type is not None and streaming_encoder is None: interrupted_episode_index = self.dataset.num_episodes for key in self.dataset.meta.video_keys: img_dir = self.dataset._get_image_file_path( @@ -665,14 +1095,12 @@ class VideoEncodingManager: # Clean up any remaining images directory if it's empty img_dir = self.dataset.root / "images" - # Check for any remaining PNG files - png_files = list(img_dir.rglob("*.png")) - if len(png_files) == 0: - # Only remove the images directory if no PNG files remain - if img_dir.exists(): + if img_dir.exists(): + png_files = list(img_dir.rglob("*.png")) + if len(png_files) == 0: shutil.rmtree(img_dir) logging.debug("Cleaned up empty images directory") - else: - logging.debug(f"Images directory is not empty, containing {len(png_files)} PNG files") + else: + logging.debug(f"Images directory is not empty, containing {len(png_files)} PNG files") return False # Don't suppress the original exception diff --git a/src/lerobot/scripts/lerobot_record.py b/src/lerobot/scripts/lerobot_record.py index 216ab22a6..ec04975d4 100644 --- a/src/lerobot/scripts/lerobot_record.py +++ b/src/lerobot/scripts/lerobot_record.py @@ -26,8 +26,10 @@ lerobot-record \ --dataset.repo_id=/ \ --dataset.num_episodes=2 \ --dataset.single_task="Grab the cube" \ + --dataset.streaming_encoding=true \ + --dataset.encoder_threads=2 \ --display_data=true - # <- Optional: specify video codec (h264, hevc, libsvtav1). Default is libsvtav1. \ + # <- Optional: specify video codec (auto, h264, hevc, libsvtav1). Default is libsvtav1. \ # --dataset.vcodec=h264 \ # <- Teleop optional if you want to teleoperate to record or in between episodes with a policy \ # --teleop.type=so100_leader \ @@ -58,7 +60,10 @@ lerobot-record \ --display_data=true \ --dataset.repo_id=${HF_USER}/bimanual-so-handover-cube \ --dataset.num_episodes=25 \ - --dataset.single_task="Grab and handover the red cube to the other arm" + --dataset.single_task="Grab and handover the red cube to the other arm" \ + --dataset.streaming_encoding=true \ + # --dataset.vcodec=auto \ + --dataset.encoder_threads=2 ``` """ @@ -179,9 +184,19 @@ class DatasetRecordConfig: # Number of episodes to record before batch encoding videos # Set to 1 for immediate encoding (default behavior), or higher for batched encoding video_encoding_batch_size: int = 1 - # Video codec for encoding videos. Options: 'h264', 'hevc', 'libsvtav1'. - # Use 'h264' for faster encoding on systems where AV1 encoding is CPU-heavy. + # Video codec for encoding videos. Options: 'h264', 'hevc', 'libsvtav1', 'auto', + # or hardware-specific: 'h264_videotoolbox', 'h264_nvenc', 'h264_vaapi', 'h264_qsv'. + # Use 'auto' to auto-detect the best available hardware encoder. vcodec: str = "libsvtav1" + # Enable streaming video encoding: encode frames in real-time during capture instead + # of writing PNG images first. Makes save_episode() near-instant. More info in the documentation: https://huggingface.co/docs/lerobot/streaming_video_encoding + streaming_encoding: bool = False + # Maximum number of frames to buffer per camera when using streaming encoding. + # ~1s buffer at 30fps. Provides backpressure if the encoder can't keep up. + encoder_queue_maxsize: int = 30 + # Number of threads per encoder instance. None = auto (codec default). + # Lower values reduce CPU usage, maps to 'lp' (via svtav1-params) for libsvtav1 and 'threads' for h264/hevc.. + encoder_threads: int | None = None # Rename map for the observation to override the image and state keys rename_map: dict[str, str] = field(default_factory=dict) @@ -452,6 +467,9 @@ def record(cfg: RecordConfig) -> LeRobotDataset: root=cfg.dataset.root, batch_encoding_size=cfg.dataset.video_encoding_batch_size, vcodec=cfg.dataset.vcodec, + streaming_encoding=cfg.dataset.streaming_encoding, + encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, + encoder_threads=cfg.dataset.encoder_threads, ) if hasattr(robot, "cameras") and len(robot.cameras) > 0: @@ -474,6 +492,9 @@ def record(cfg: RecordConfig) -> LeRobotDataset: image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera * len(robot.cameras), batch_encoding_size=cfg.dataset.video_encoding_batch_size, vcodec=cfg.dataset.vcodec, + streaming_encoding=cfg.dataset.streaming_encoding, + encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, + encoder_threads=cfg.dataset.encoder_threads, ) # Load pretrained policy @@ -497,6 +518,11 @@ def record(cfg: RecordConfig) -> LeRobotDataset: listener, events = init_keyboard_listener() + if not cfg.dataset.streaming_encoding: + logging.info( + "Streaming encoding is disabled. If you have capable hardware, consider enabling it for way faster episode saving. --dataset.streaming_encoding=true --dataset.encoder_threads=2 # --dataset.vcodec=auto. More info in the documentation: https://huggingface.co/docs/lerobot/streaming_video_encoding" + ) + with VideoEncodingManager(dataset): recorded_episodes = 0 while recorded_episodes < cfg.dataset.num_episodes and not events["stop_recording"]: diff --git a/tests/datasets/test_datasets.py b/tests/datasets/test_datasets.py index 27c51b3c4..6f99eb301 100644 --- a/tests/datasets/test_datasets.py +++ b/tests/datasets/test_datasets.py @@ -31,7 +31,6 @@ from lerobot.configs.train import TrainPipelineConfig from lerobot.datasets.factory import make_dataset from lerobot.datasets.image_writer import image_array_to_pil_image from lerobot.datasets.lerobot_dataset import ( - VALID_VIDEO_CODECS, LeRobotDataset, MultiLeRobotDataset, _encode_video_worker, @@ -45,6 +44,7 @@ from lerobot.datasets.utils import ( hf_transform_to_torch, hw_to_dataset_features, ) +from lerobot.datasets.video_utils import VALID_VIDEO_CODECS from lerobot.envs.factory import make_env_config from lerobot.policies.factory import make_policy_config from lerobot.robots import make_robot_from_config @@ -393,7 +393,7 @@ def test_tmp_mixed_deletion(tmp_path, empty_lerobot_dataset_factory): vid_key: {"dtype": "video", "shape": DUMMY_HWC, "names": ["height", "width", "channels"]}, } ds_mixed = empty_lerobot_dataset_factory( - root=tmp_path / "mixed", features=features_mixed, batch_encoding_size=2 + root=tmp_path / "mixed", features=features_mixed, batch_encoding_size=2, streaming_encoding=False ) ds_mixed.add_frame( { @@ -1450,7 +1450,10 @@ def test_valid_video_codecs_constant(): assert "h264" in VALID_VIDEO_CODECS assert "hevc" in VALID_VIDEO_CODECS assert "libsvtav1" in VALID_VIDEO_CODECS - assert len(VALID_VIDEO_CODECS) == 3 + assert "auto" in VALID_VIDEO_CODECS + assert "h264_videotoolbox" in VALID_VIDEO_CODECS + assert "h264_nvenc" in VALID_VIDEO_CODECS + assert len(VALID_VIDEO_CODECS) == 10 def test_delta_timestamps_with_episodes_filter(tmp_path, empty_lerobot_dataset_factory): diff --git a/tests/datasets/test_streaming_video_encoder.py b/tests/datasets/test_streaming_video_encoder.py new file mode 100644 index 000000000..a85db6a8d --- /dev/null +++ b/tests/datasets/test_streaming_video_encoder.py @@ -0,0 +1,730 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for streaming video encoding and hardware-accelerated encoding.""" + +import queue +import threading +from unittest.mock import patch + +import av +import numpy as np +import pytest + +from lerobot.datasets.video_utils import ( + VALID_VIDEO_CODECS, + StreamingVideoEncoder, + _CameraEncoderThread, + _get_codec_options, + detect_available_hw_encoders, + resolve_vcodec, +) +from lerobot.utils.constants import OBS_IMAGES + +# ─── _get_codec_options tests ─── + + +class TestGetCodecOptions: + def test_libsvtav1_defaults(self): + opts = _get_codec_options("libsvtav1") + assert opts["g"] == "2" + assert opts["crf"] == "30" + assert opts["preset"] == "12" + + def test_libsvtav1_custom_preset(self): + opts = _get_codec_options("libsvtav1", preset=8) + assert opts["preset"] == "8" + + def test_h264_options(self): + opts = _get_codec_options("h264", g=10, crf=23) + assert opts["g"] == "10" + assert opts["crf"] == "23" + assert "preset" not in opts + + def test_videotoolbox_options(self): + opts = _get_codec_options("h264_videotoolbox", g=2, crf=30) + assert opts["g"] == "2" + # CRF 30 maps to quality = max(1, min(100, 100 - 30*2)) = 40 + assert opts["q:v"] == "40" + assert "crf" not in opts + + def test_nvenc_options(self): + opts = _get_codec_options("h264_nvenc", g=2, crf=25) + assert opts["rc"] == "constqp" + assert opts["qp"] == "25" + assert "crf" not in opts + # NVENC doesn't support g + assert "g" not in opts + + def test_vaapi_options(self): + opts = _get_codec_options("h264_vaapi", crf=28) + assert opts["qp"] == "28" + + def test_qsv_options(self): + opts = _get_codec_options("h264_qsv", crf=25) + assert opts["global_quality"] == "25" + + def test_no_g_no_crf(self): + opts = _get_codec_options("h264", g=None, crf=None) + assert "g" not in opts + assert "crf" not in opts + + +# ─── HW encoder detection tests ─── + + +class TestHWEncoderDetection: + def test_detect_available_hw_encoders_returns_list(self): + result = detect_available_hw_encoders() + assert isinstance(result, list) + + def test_detect_available_hw_encoders_only_valid(self): + from lerobot.datasets.video_utils import HW_ENCODERS + + result = detect_available_hw_encoders() + for encoder in result: + assert encoder in HW_ENCODERS + + def test_resolve_vcodec_passthrough(self): + assert resolve_vcodec("libsvtav1") == "libsvtav1" + assert resolve_vcodec("h264") == "h264" + + def test_resolve_vcodec_auto_fallback(self): + """When no HW encoders are available, auto should fall back to libsvtav1.""" + with patch("lerobot.datasets.video_utils.detect_available_hw_encoders", return_value=[]): + assert resolve_vcodec("auto") == "libsvtav1" + + def test_resolve_vcodec_auto_picks_hw(self): + """When a HW encoder is available, auto should pick it.""" + with patch( + "lerobot.datasets.video_utils.detect_available_hw_encoders", + return_value=["h264_videotoolbox"], + ): + assert resolve_vcodec("auto") == "h264_videotoolbox" + + def test_resolve_vcodec_auto_returns_valid(self): + """Test that resolve_vcodec('auto') returns a known valid codec.""" + result = resolve_vcodec("auto") + assert result in VALID_VIDEO_CODECS + + def test_hw_encoder_names_accepted_in_validation(self): + """Test that HW encoder names pass validation in VALID_VIDEO_CODECS.""" + assert "auto" in VALID_VIDEO_CODECS + assert "h264_videotoolbox" in VALID_VIDEO_CODECS + assert "h264_nvenc" in VALID_VIDEO_CODECS + + def test_resolve_vcodec_invalid_raises(self): + """Test that resolve_vcodec raises ValueError for invalid codecs.""" + with pytest.raises(ValueError, match="Invalid vcodec"): + resolve_vcodec("not_a_real_codec") + + +# ─── _CameraEncoderThread tests ─── + + +class TestCameraEncoderThread: + def test_encodes_valid_mp4(self, tmp_path): + """Test that the encoder thread creates a valid MP4 file with correct frame count.""" + num_frames = 30 + height, width = 64, 96 + fps = 30 + video_path = tmp_path / "test_output" / "test.mp4" + + frame_queue: queue.Queue = queue.Queue(maxsize=60) + result_queue: queue.Queue = queue.Queue(maxsize=1) + stop_event = threading.Event() + + encoder_thread = _CameraEncoderThread( + video_path=video_path, + fps=fps, + vcodec="libsvtav1", + pix_fmt="yuv420p", + g=2, + crf=30, + preset=13, + frame_queue=frame_queue, + result_queue=result_queue, + stop_event=stop_event, + ) + encoder_thread.start() + + # Feed frames (HWC uint8) + for _ in range(num_frames): + frame = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) + frame_queue.put(frame) + + # Send sentinel + frame_queue.put(None) + encoder_thread.join(timeout=60) + assert not encoder_thread.is_alive() + + # Check result + status, data = result_queue.get(timeout=5) + assert status == "ok" + assert data is not None # Stats should be returned + assert "mean" in data + assert "std" in data + assert "min" in data + assert "max" in data + assert "count" in data + + # Verify the MP4 file is valid + assert video_path.exists() + with av.open(str(video_path)) as container: + stream = container.streams.video[0] + # The frame count should match + total_frames = sum(1 for _ in container.decode(stream)) + assert total_frames == num_frames + + def test_handles_chw_input(self, tmp_path): + """Test that CHW format input is handled correctly.""" + num_frames = 5 + fps = 30 + video_path = tmp_path / "test_chw" / "test.mp4" + + frame_queue: queue.Queue = queue.Queue(maxsize=60) + result_queue: queue.Queue = queue.Queue(maxsize=1) + stop_event = threading.Event() + + encoder_thread = _CameraEncoderThread( + video_path=video_path, + fps=fps, + vcodec="libsvtav1", + pix_fmt="yuv420p", + g=2, + crf=30, + preset=13, + frame_queue=frame_queue, + result_queue=result_queue, + stop_event=stop_event, + ) + encoder_thread.start() + + # Feed CHW frames + for _ in range(num_frames): + frame = np.random.randint(0, 255, (3, 64, 96), dtype=np.uint8) + frame_queue.put(frame) + + frame_queue.put(None) + encoder_thread.join(timeout=60) + + status, _ = result_queue.get(timeout=5) + assert status == "ok" + assert video_path.exists() + + def test_stop_event_cancellation(self, tmp_path): + """Test that setting the stop event causes the thread to exit.""" + fps = 30 + video_path = tmp_path / "test_cancel" / "test.mp4" + + frame_queue: queue.Queue = queue.Queue(maxsize=60) + result_queue: queue.Queue = queue.Queue(maxsize=1) + stop_event = threading.Event() + + encoder_thread = _CameraEncoderThread( + video_path=video_path, + fps=fps, + vcodec="libsvtav1", + pix_fmt="yuv420p", + g=2, + crf=30, + preset=13, + frame_queue=frame_queue, + result_queue=result_queue, + stop_event=stop_event, + ) + encoder_thread.start() + + # Feed a few frames + for _ in range(3): + frame = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + frame_queue.put(frame) + + # Signal stop instead of sending sentinel + stop_event.set() + encoder_thread.join(timeout=10) + assert not encoder_thread.is_alive() + + +# ─── StreamingVideoEncoder tests ─── + + +class TestStreamingVideoEncoder: + def test_single_camera_episode(self, tmp_path): + """Test encoding a single camera episode.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p", g=2, crf=30, preset=13) + + video_keys = [f"{OBS_IMAGES}.laptop"] + encoder.start_episode(video_keys, tmp_path) + + num_frames = 20 + for _ in range(num_frames): + frame = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(f"{OBS_IMAGES}.laptop", frame) + + results = encoder.finish_episode() + assert f"{OBS_IMAGES}.laptop" in results + + mp4_path, stats = results[f"{OBS_IMAGES}.laptop"] + assert mp4_path.exists() + assert stats is not None + + # Verify frame count + with av.open(str(mp4_path)) as container: + stream = container.streams.video[0] + total_frames = sum(1 for _ in container.decode(stream)) + assert total_frames == num_frames + + encoder.close() + + def test_multi_camera_episode(self, tmp_path): + """Test encoding multiple cameras simultaneously.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p", g=2, crf=30) + + video_keys = [f"{OBS_IMAGES}.laptop", f"{OBS_IMAGES}.phone"] + encoder.start_episode(video_keys, tmp_path) + + num_frames = 15 + for _ in range(num_frames): + frame0 = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + frame1 = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(video_keys[0], frame0) + encoder.feed_frame(video_keys[1], frame1) + + results = encoder.finish_episode() + + for key in video_keys: + assert key in results + mp4_path, stats = results[key] + assert mp4_path.exists() + assert stats is not None + + encoder.close() + + def test_sequential_episodes(self, tmp_path): + """Test that multiple sequential episodes work correctly.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p", g=2, crf=30) + video_keys = [f"{OBS_IMAGES}.cam"] + + for ep in range(3): + encoder.start_episode(video_keys, tmp_path) + num_frames = 10 + ep * 5 + for _ in range(num_frames): + frame = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(f"{OBS_IMAGES}.cam", frame) + results = encoder.finish_episode() + + mp4_path, stats = results[f"{OBS_IMAGES}.cam"] + assert mp4_path.exists() + + with av.open(str(mp4_path)) as container: + stream = container.streams.video[0] + total_frames = sum(1 for _ in container.decode(stream)) + assert total_frames == num_frames + + encoder.close() + + def test_cancel_episode(self, tmp_path): + """Test that canceling an episode cleans up properly.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p", g=2, crf=30) + video_keys = [f"{OBS_IMAGES}.cam"] + + encoder.start_episode(video_keys, tmp_path) + + for _ in range(5): + frame = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(f"{OBS_IMAGES}.cam", frame) + + encoder.cancel_episode() + + # Should be able to start a new episode after cancel + encoder.start_episode(video_keys, tmp_path) + for _ in range(5): + frame = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(f"{OBS_IMAGES}.cam", frame) + results = encoder.finish_episode() + + assert f"{OBS_IMAGES}.cam" in results + encoder.close() + + def test_feed_without_start_raises(self, tmp_path): + """Test that feeding frames without starting an episode raises.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p") + with pytest.raises(RuntimeError, match="No active episode"): + encoder.feed_frame("cam", np.zeros((64, 96, 3), dtype=np.uint8)) + encoder.close() + + def test_finish_without_start_raises(self, tmp_path): + """Test that finishing without starting raises.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p") + with pytest.raises(RuntimeError, match="No active episode"): + encoder.finish_episode() + encoder.close() + + def test_close_is_idempotent(self, tmp_path): + """Test that close() can be called multiple times safely.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p") + encoder.close() + encoder.close() # Should not raise + + def test_video_duration_matches_frame_count(self, tmp_path): + """Test that encoded video duration matches num_frames / fps.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p", g=2, crf=30, preset=13) + video_keys = [f"{OBS_IMAGES}.cam"] + encoder.start_episode(video_keys, tmp_path) + + num_frames = 90 # 3 seconds at 30fps + for _ in range(num_frames): + frame = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(f"{OBS_IMAGES}.cam", frame) + + results = encoder.finish_episode() + mp4_path, _ = results[f"{OBS_IMAGES}.cam"] + + expected_duration = num_frames / 30.0 # 3.0 seconds + + with av.open(str(mp4_path)) as container: + stream = container.streams.video[0] + total_frames = sum(1 for _ in container.decode(stream)) + if stream.duration is not None: + actual_duration = float(stream.duration * stream.time_base) + else: + actual_duration = float(container.duration / av.time_base) + + assert total_frames == num_frames + # Allow small tolerance for duration due to codec framing + assert abs(actual_duration - expected_duration) < 0.5, ( + f"Video duration {actual_duration:.2f}s != expected {expected_duration:.2f}s" + ) + + encoder.close() + + def test_multi_camera_start_episode_called_once(self, tmp_path): + """Test that with multiple cameras, no frames are lost due to double start_episode.""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p", g=2, crf=30) + + video_keys = [f"{OBS_IMAGES}.cam1", f"{OBS_IMAGES}.cam2"] + encoder.start_episode(video_keys, tmp_path) + + num_frames = 30 + for _ in range(num_frames): + frame0 = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + frame1 = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(video_keys[0], frame0) + encoder.feed_frame(video_keys[1], frame1) + + results = encoder.finish_episode() + + # Both cameras should have all frames + for key in video_keys: + mp4_path, stats = results[key] + assert mp4_path.exists() + with av.open(str(mp4_path)) as container: + stream = container.streams.video[0] + total_frames = sum(1 for _ in container.decode(stream)) + assert total_frames == num_frames, ( + f"Camera {key}: expected {num_frames} frames, got {total_frames}" + ) + + encoder.close() + + def test_encoder_threads_passed_to_thread(self, tmp_path): + """Test that encoder_threads is stored and passed through to encoder threads.""" + encoder = StreamingVideoEncoder( + fps=30, vcodec="libsvtav1", pix_fmt="yuv420p", g=2, crf=30, encoder_threads=2 + ) + assert encoder.encoder_threads == 2 + + video_keys = [f"{OBS_IMAGES}.cam"] + encoder.start_episode(video_keys, tmp_path) + + # Verify the thread received the encoder_threads value + thread = encoder._threads[f"{OBS_IMAGES}.cam"] + assert thread.encoder_threads == 2 + + # Feed some frames and finish to ensure it works end-to-end + num_frames = 10 + for _ in range(num_frames): + frame = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(f"{OBS_IMAGES}.cam", frame) + + results = encoder.finish_episode() + mp4_path, stats = results[f"{OBS_IMAGES}.cam"] + assert mp4_path.exists() + assert stats is not None + + with av.open(str(mp4_path)) as container: + stream = container.streams.video[0] + total_frames = sum(1 for _ in container.decode(stream)) + assert total_frames == num_frames + + encoder.close() + + def test_encoder_threads_none_by_default(self, tmp_path): + """Test that encoder_threads defaults to None (codec auto-detect).""" + encoder = StreamingVideoEncoder(fps=30, vcodec="libsvtav1", pix_fmt="yuv420p") + assert encoder.encoder_threads is None + encoder.close() + + def test_graceful_frame_dropping(self, tmp_path): + """Test that full queue drops frames instead of crashing.""" + encoder = StreamingVideoEncoder( + fps=30, vcodec="libsvtav1", pix_fmt="yuv420p", g=2, crf=30, preset=13, queue_maxsize=1 + ) + video_keys = [f"{OBS_IMAGES}.cam"] + encoder.start_episode(video_keys, tmp_path) + + # Feed many frames quickly - with queue_maxsize=1, some will be dropped + num_frames = 50 + for _ in range(num_frames): + frame = np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8) + encoder.feed_frame(f"{OBS_IMAGES}.cam", frame) + + # Should not raise - frames are dropped gracefully + results = encoder.finish_episode() + assert f"{OBS_IMAGES}.cam" in results + + mp4_path, _ = results[f"{OBS_IMAGES}.cam"] + assert mp4_path.exists() + + # Some frames should have been dropped (queue was tiny) + dropped = encoder._dropped_frames.get(f"{OBS_IMAGES}.cam", 0) + # We can't guarantee drops but can verify no crash occurred + assert dropped >= 0 + + encoder.close() + + +# ─── Integration tests with LeRobotDataset ─── + + +class TestStreamingEncoderIntegration: + def test_add_frame_save_episode_streaming(self, tmp_path): + """Full integration test: add_frame -> save_episode with streaming encoding.""" + from lerobot.datasets.lerobot_dataset import LeRobotDataset + + features = { + "observation.images.cam": { + "dtype": "video", + "shape": (64, 96, 3), + "names": ["height", "width", "channels"], + }, + "action": {"dtype": "float32", "shape": (6,), "names": ["j1", "j2", "j3", "j4", "j5", "j6"]}, + } + + dataset = LeRobotDataset.create( + repo_id="test/streaming", + fps=30, + features=features, + root=tmp_path / "streaming_test", + use_videos=True, + streaming_encoding=True, + ) + + assert dataset._streaming_encoder is not None + + num_frames = 20 + for _ in range(num_frames): + frame = { + "observation.images.cam": np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8), + "action": np.random.randn(6).astype(np.float32), + "task": "test task", + } + dataset.add_frame(frame) + + dataset.save_episode() + + # Verify dataset metadata + assert dataset.meta.total_episodes == 1 + assert dataset.meta.total_frames == num_frames + + # Verify stats exist for the video key + assert dataset.meta.stats is not None + assert "observation.images.cam" in dataset.meta.stats + assert "action" in dataset.meta.stats + + dataset.finalize() + + def test_streaming_disabled_creates_pngs(self, tmp_path): + """Test that disabling streaming encoding falls back to PNG path.""" + from lerobot.datasets.lerobot_dataset import LeRobotDataset + + features = { + "observation.images.cam": { + "dtype": "video", + "shape": (64, 96, 3), + "names": ["height", "width", "channels"], + }, + "action": {"dtype": "float32", "shape": (6,), "names": ["j1", "j2", "j3", "j4", "j5", "j6"]}, + } + + dataset = LeRobotDataset.create( + repo_id="test/no_streaming", + fps=30, + features=features, + root=tmp_path / "no_streaming_test", + use_videos=True, + streaming_encoding=False, + ) + + assert dataset._streaming_encoder is None + + num_frames = 5 + for _ in range(num_frames): + frame = { + "observation.images.cam": np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8), + "action": np.random.randn(6).astype(np.float32), + "task": "test task", + } + dataset.add_frame(frame) + + # With streaming disabled, PNG files should be written + images_dir = dataset.root / "images" + assert images_dir.exists() + + dataset.save_episode() + dataset.finalize() + + def test_multi_episode_streaming(self, tmp_path): + """Test recording multiple episodes with streaming encoding.""" + from lerobot.datasets.lerobot_dataset import LeRobotDataset + + features = { + "observation.images.cam": { + "dtype": "video", + "shape": (64, 96, 3), + "names": ["height", "width", "channels"], + }, + "action": {"dtype": "float32", "shape": (2,), "names": ["j1", "j2"]}, + } + + dataset = LeRobotDataset.create( + repo_id="test/multi_ep", + fps=30, + features=features, + root=tmp_path / "multi_ep_test", + use_videos=True, + streaming_encoding=True, + ) + + for ep in range(3): + num_frames = 10 + ep * 5 + for _ in range(num_frames): + frame = { + "observation.images.cam": np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8), + "action": np.random.randn(2).astype(np.float32), + "task": f"task_{ep}", + } + dataset.add_frame(frame) + dataset.save_episode() + + assert dataset.meta.total_episodes == 3 + assert dataset.meta.total_frames == 10 + 15 + 20 + + dataset.finalize() + + def test_clear_episode_buffer_cancels_streaming(self, tmp_path): + """Test that clearing episode buffer cancels streaming encoding.""" + from lerobot.datasets.lerobot_dataset import LeRobotDataset + + features = { + "observation.images.cam": { + "dtype": "video", + "shape": (64, 96, 3), + "names": ["height", "width", "channels"], + }, + "action": {"dtype": "float32", "shape": (2,), "names": ["j1", "j2"]}, + } + + dataset = LeRobotDataset.create( + repo_id="test/cancel", + fps=30, + features=features, + root=tmp_path / "cancel_test", + use_videos=True, + streaming_encoding=True, + ) + + # Add some frames + for _ in range(5): + frame = { + "observation.images.cam": np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8), + "action": np.random.randn(2).astype(np.float32), + "task": "task", + } + dataset.add_frame(frame) + + # Cancel and re-record + dataset.clear_episode_buffer() + + # Record a new episode + for _ in range(10): + frame = { + "observation.images.cam": np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8), + "action": np.random.randn(2).astype(np.float32), + "task": "task", + } + dataset.add_frame(frame) + dataset.save_episode() + + assert dataset.meta.total_episodes == 1 + assert dataset.meta.total_frames == 10 + + dataset.finalize() + + def test_multi_camera_add_frame_streaming(self, tmp_path): + """Test that start_episode is called once with multiple video keys.""" + from lerobot.datasets.lerobot_dataset import LeRobotDataset + + features = { + "observation.images.cam1": { + "dtype": "video", + "shape": (64, 96, 3), + "names": ["height", "width", "channels"], + }, + "observation.images.cam2": { + "dtype": "video", + "shape": (64, 96, 3), + "names": ["height", "width", "channels"], + }, + "action": {"dtype": "float32", "shape": (2,), "names": ["j1", "j2"]}, + } + + dataset = LeRobotDataset.create( + repo_id="test/multi_cam", + fps=30, + features=features, + root=tmp_path / "multi_cam_test", + use_videos=True, + streaming_encoding=True, + ) + + num_frames = 15 + for _ in range(num_frames): + frame = { + "observation.images.cam1": np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8), + "observation.images.cam2": np.random.randint(0, 255, (64, 96, 3), dtype=np.uint8), + "action": np.random.randn(2).astype(np.float32), + "task": "test task", + } + dataset.add_frame(frame) + + dataset.save_episode() + + assert dataset.meta.total_episodes == 1 + assert dataset.meta.total_frames == num_frames + + dataset.finalize() From a0c5d193919cccc1125c7d03f4af94013d400b9d Mon Sep 17 00:00:00 2001 From: Yueci Deng Date: Mon, 23 Feb 2026 23:32:59 +0800 Subject: [PATCH 16/21] add metadata_buffer_size to dataset creation (#2998) Signed-off-by: Steven Palma Co-authored-by: Steven Palma --- src/lerobot/datasets/lerobot_dataset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/lerobot/datasets/lerobot_dataset.py b/src/lerobot/datasets/lerobot_dataset.py index 65b475e26..83d452a44 100644 --- a/src/lerobot/datasets/lerobot_dataset.py +++ b/src/lerobot/datasets/lerobot_dataset.py @@ -1653,6 +1653,7 @@ class LeRobotDataset(torch.utils.data.Dataset): video_backend: str | None = None, batch_encoding_size: int = 1, vcodec: str = "libsvtav1", + metadata_buffer_size: int = 10, streaming_encoding: bool = False, encoder_queue_maxsize: int = 30, encoder_threads: int | None = None, @@ -1667,6 +1668,7 @@ class LeRobotDataset(torch.utils.data.Dataset): features=features, root=root, use_videos=use_videos, + metadata_buffer_size=metadata_buffer_size, ) obj.repo_id = obj.meta.repo_id obj.root = obj.meta.root From 544cbc5f3874d20f7d0f388ef318dc4838c0906f Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Mon, 23 Feb 2026 16:39:04 +0100 Subject: [PATCH 17/21] feat(motors): add RobStride CAN implementation (#2821) * feat(motors): add initial implementation of robstride Co-authored-by: Virgile * chore(motors): solve some linter * remove kp/kd attribute * code uniformisation between damiao and robstride * remove normalization warning * remove non valid baudrates and small docstring update * remove all useless files. Only keeping robstride.py and table.py * typing for mypy * reduce NameOrId usage * align signature with damiao * put the same helper than in the damiao implementation * bug correction : expect a response after each bus.send --------- Co-authored-by: Virgile --- pyproject.toml | 4 +- src/lerobot/motors/robstride/__init__.py | 18 + src/lerobot/motors/robstride/robstride.py | 1003 +++++++++++++++++++++ src/lerobot/motors/robstride/tables.py | 120 +++ src/lerobot/scripts/lerobot_setup_can.py | 1 + 5 files changed, 1145 insertions(+), 1 deletion(-) create mode 100644 src/lerobot/motors/robstride/__init__.py create mode 100644 src/lerobot/motors/robstride/robstride.py create mode 100644 src/lerobot/motors/robstride/tables.py diff --git a/pyproject.toml b/pyproject.toml index 0ca1f0432..ea3df4a6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,11 +98,13 @@ pygame-dep = ["pygame>=2.5.1,<2.7.0"] placo-dep = ["placo>=0.9.6,<0.10.0"] transformers-dep = ["transformers>=4.57.1,<5.0.0"] grpcio-dep = ["grpcio==1.73.1", "protobuf>=6.31.1,<6.32.0"] +can-dep = ["python-can>=4.2.0,<5.0.0"] # Motors feetech = ["feetech-servo-sdk>=1.0.0,<2.0.0"] dynamixel = ["dynamixel-sdk>=3.7.31,<3.9.0"] -damiao = ["python-can>=4.2.0,<5.0.0"] +damiao = ["lerobot[can-dep]"] +robstride = ["lerobot[can-dep]"] # Robots openarms = ["lerobot[damiao]"] diff --git a/src/lerobot/motors/robstride/__init__.py b/src/lerobot/motors/robstride/__init__.py new file mode 100644 index 000000000..7933ac6fa --- /dev/null +++ b/src/lerobot/motors/robstride/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .robstride import RobstrideMotorsBus +from .tables import * diff --git a/src/lerobot/motors/robstride/robstride.py b/src/lerobot/motors/robstride/robstride.py new file mode 100644 index 000000000..f47e41509 --- /dev/null +++ b/src/lerobot/motors/robstride/robstride.py @@ -0,0 +1,1003 @@ +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(Virgile) : Robustify mode control , only the MIT protocole is implemented for now + +import logging +import time +from contextlib import contextmanager +from copy import deepcopy +from functools import cached_property +from types import SimpleNamespace +from typing import TYPE_CHECKING, Any, TypedDict + +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected +from lerobot.utils.import_utils import _can_available + +if TYPE_CHECKING or _can_available: + import can +else: + can = SimpleNamespace(Message=object, interface=None) +import numpy as np + +from lerobot.utils.errors import DeviceNotConnectedError +from lerobot.utils.utils import enter_pressed, move_cursor_up + +from ..motors_bus import Motor, MotorCalibration, MotorsBusBase, NameOrID, Value +from .tables import ( + AVAILABLE_BAUDRATES, + CAN_CMD_CLEAR_FAULT, + CAN_CMD_DISABLE, + CAN_CMD_ENABLE, + CAN_CMD_SET_ZERO, + DEFAULT_BAUDRATE, + DEFAULT_TIMEOUT_MS, + MODEL_RESOLUTION, + MOTOR_LIMIT_PARAMS, + NORMALIZED_DATA, + PARAM_TIMEOUT, + RUNNING_TIMEOUT, + STATE_CACHE_TTL_S, + ControlMode, + MotorType, +) + +logger = logging.getLogger(__name__) + + +class MotorState(TypedDict): + position: float + velocity: float + torque: float + temp_mos: float + temp_rotor: float + + +class RobstrideMotorsBus(MotorsBusBase): + """ + The Robstride implementation for a MotorsBus using CAN bus communication. + + This class uses python-can for CAN bus communication with Robstride motors. + The motors need to be switched to MIT control mode to be compatible with this implementation. + More details on the protocol can be found in the documentation links below: + - python-can documentation: https://python-can.readthedocs.io/en/stable/ + - Robstride CAN protocol: https://github.com/RobStride/MotorStudio + """ + + # CAN-specific settings + available_baudrates = deepcopy(AVAILABLE_BAUDRATES) + default_baudrate = DEFAULT_BAUDRATE + default_timeout = DEFAULT_TIMEOUT_MS + + # Motor configuration + model_resolution_table = deepcopy(MODEL_RESOLUTION) + normalized_data = deepcopy(NORMALIZED_DATA) + + def __init__( + self, + port: str, + motors: dict[str, Motor], + calibration: dict[str, MotorCalibration] | None = None, + can_interface: str = "auto", + use_can_fd: bool = True, + bitrate: int = 1000000, + data_bitrate: int | None = 5000000, + ): + """ + Initialize the Robstride motors bus. + + Args: + port: CAN interface name (e.g., "can0" for Linux, "/dev/cu.usbmodem*" for macOS) + motors: Dictionary mapping motor names to Motor objects + calibration: Optional calibration data + can_interface: CAN interface type - "auto" (default), "socketcan" (Linux), or "slcan" (macOS/serial) + use_can_fd: Whether to use CAN FD mode (default: True for OpenArms) + bitrate: Nominal bitrate in bps (default: 1000000 = 1 Mbps) + data_bitrate: Data bitrate for CAN FD in bps (default: 5000000 = 5 Mbps), ignored if use_can_fd is False + """ + super().__init__(port, motors, calibration) + self.port = port + self.can_interface = can_interface + self.use_can_fd = use_can_fd + self.bitrate = bitrate + self.data_bitrate = data_bitrate + self.canbus: can.BusABC | None = None + self._is_connected = False + + # Map motor names to CAN IDs + self._motor_can_ids: dict[str, int] = {} + self._recv_id_to_motor: dict[int, str] = {} + + # Store motor types and recv IDs + self._motor_types: dict[str, MotorType] = {} + # Dynamic gains storage (Damiao-style update path via write/sync_write) + self._gains: dict[str, dict[str, float]] = {} + for name, motor in self.motors.items(): + if motor.motor_type_str is not None: + self._motor_types[name] = getattr(MotorType, motor.motor_type_str.upper()) + else: + # Default to O0if not specified + self._motor_types[name] = MotorType.O0 + + # Damiao-style defaults: fixed gains at startup for every motor. + self._gains[name] = {"kp": 10.0, "kd": 0.5} + + # Map recv_id to motor name for filtering responses + if motor.recv_id is not None: + self._recv_id_to_motor[motor.recv_id] = name + # Motor Mode + self.enabled: dict[str, bool] = {} + self.operation_mode: dict[str, ControlMode] = {} + self._last_known_states: dict[str, MotorState] = { + name: { + "position": 0.0, + "velocity": 0.0, + "torque": 0.0, + "temp_mos": 0.0, + "temp_rotor": 0.0, + } + for name in self.motors + } + self.last_feedback_time: dict[str, float | None] = {} + self._id_to_name: dict[int, str] = {} + for name in self.motors: + self.enabled[name] = False + self.operation_mode[name] = ControlMode.MIT # default mode + self.last_feedback_time[name] = None + + for name, motor in self.motors.items(): + key = motor.recv_id if motor.recv_id is not None else motor.id + self._id_to_name[key] = name + + @property + def is_connected(self) -> bool: + """Check if the CAN bus is connected.""" + return self._is_connected and self.canbus is not None + + def _bus(self) -> can.BusABC: + if self.canbus is None: + raise DeviceNotConnectedError(f"{self.__class__.__name__}('{self.port}') is not connected.") + return self.canbus + + @check_if_already_connected + def connect(self, handshake: bool = True) -> None: + """ + Open the CAN bus and initialize communication. + + Args: + handshake: If True, ping all motors to verify they're present + """ + try: + # Auto-detect interface type based on port name + if self.can_interface == "auto": + if self.port.startswith("/dev/"): + self.can_interface = "slcan" + logger.info(f"Auto-detected slcan interface for port {self.port}") + else: + self.can_interface = "socketcan" + logger.info(f"Auto-detected socketcan interface for port {self.port}") + + kwargs = { + "channel": self.port, + "bitrate": self.bitrate, + "interface": self.can_interface, + } + + if self.can_interface == "socketcan" and self.use_can_fd and self.data_bitrate is not None: + kwargs.update({"data_bitrate": self.data_bitrate, "fd": True}) + logger.info( + f"Connected to {self.port} with CAN FD (bitrate={self.bitrate}, data_bitrate={self.data_bitrate})" + ) + else: + logger.info(f"Connected to {self.port} with {self.can_interface} (bitrate={self.bitrate})") + + self.canbus = can.interface.Bus(**kwargs) + + self._is_connected = True + + if handshake: + self._handshake() + + logger.debug(f"{self.__class__.__name__} connected via {self.can_interface}.") + except Exception as e: + self._is_connected = False + raise ConnectionError(f"Failed to connect to CAN bus: {e}") from e + + def _query_status_via_clear_fault(self, motor: NameOrID) -> tuple[bool, can.Message | None]: + motor_name = self._get_motor_name(motor) + motor_id = self._get_motor_id(motor_name) + recv_id = self._get_motor_recv_id(motor_name) + data = [0xFF] * 7 + [CAN_CMD_CLEAR_FAULT] + msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False) + self._bus().send(msg) + return self._recv_status_via_clear_fault(expected_recv_id=recv_id) + + def _recv_status_via_clear_fault( + self, expected_recv_id: int | None = None, timeout: float = RUNNING_TIMEOUT + ) -> tuple[bool, can.Message | None]: + """ + Poll the bus for a response to a fault-clear request. + + Args: + expected_recv_id: Only accept frames from this CAN ID when provided. + timeout: Maximum time spent polling the bus in seconds. + + Returns: + Tuple where the first element is True if a fault frame was received, + and the second element is the CAN message (or None on timeout). + """ + start_time = time.time() + + while time.time() - start_time < timeout: + msg = self._bus().recv(timeout=RUNNING_TIMEOUT / 10) + if not msg: + continue + + if expected_recv_id is not None and msg.data[0] != expected_recv_id: + continue + + # Fault-status frame heuristic (doc-based) + fault_bits = int.from_bytes(msg.data[1:5], "little") + if fault_bits != 0 and msg.data[5] == msg.data[6] == msg.data[7] == 0: + logger.error( + f"Motor fault received from CAN ID 0x{msg.arbitration_id:02X}: " + f"fault_bits=0x{fault_bits:08X}" + ) + return True, msg + + # Otherwise: valid normal response + return False, msg + + return False, None + + def update_motor_state(self, motor: NameOrID) -> bool: + has_fault, msg = self._query_status_via_clear_fault(motor) + if msg is None: + logger.warning(f"No response received from motor '{motor}' during state update.") + raise ConnectionError(f"No response received from motor '{motor}' during state update.") + if has_fault: + logger.error(f"Fault reported by motor '{motor}' during state update. msg={msg.data.hex()}") + raise RuntimeError(f"Fault reported by motor '{motor}' during state update.") + + self._decode_motor_state(msg.data) # updates cache + return True + + def _handshake(self) -> None: + logger.info("Starting handshake with motors...") + missing_motors = [] + faulted_motors = [] + + for motor_name in self.motors: + has_fault, msg = self._query_status_via_clear_fault(motor_name) + if msg is None: + missing_motors.append(motor_name) + elif has_fault: + faulted_motors.append(motor_name) + else: + # CLEAR_FAULT responses are not guaranteed to always match the MIT feedback layout + # on all firmware versions. Handshake should not fail just because cache warm-up fails. + try: + self._decode_motor_state(msg.data) + except Exception as e: + logger.debug( + "Handshake cache warm-up decode failed for motor '%s': %s", + motor_name, + e, + ) + time.sleep(0.01) + + if missing_motors or faulted_motors: + details = [] + if missing_motors: + details.append(f"did not respond: {missing_motors}") + if faulted_motors: + details.append(f"reported fault: {faulted_motors}") + raise ConnectionError("Handshake failed. " + "; ".join(details)) + + logger.info("Handshake successful. All motors ready.") + + def _switch_operation_mode(self, motor: NameOrID, mode: ControlMode) -> None: + """Switch the operation mode of a motor.""" + motor_name = self._get_motor_name(motor) + motor_id = self._get_motor_id(motor_name) + recv_id = self._get_motor_recv_id(motor_name) + data = [0xFF] * 8 + data[6] = mode.value + data[7] = 0xFC + msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False) + self._bus().send(msg) + msg = self._recv_motor_response(expected_recv_id=recv_id, timeout=PARAM_TIMEOUT) + if msg is not None: + self.operation_mode[motor_name] = mode + + @check_if_not_connected + def disconnect(self, disable_torque: bool = True) -> None: + """ + Close the CAN bus connection. + + Args: + disable_torque: If True, disable torque on all motors before disconnecting + """ + if disable_torque: + try: + self.disable_torque() + except Exception as e: + logger.warning(f"Failed to disable torque during disconnect: {e}") + + if self.canbus: + self.canbus.shutdown() + self.canbus = None + self._is_connected = False + logger.debug(f"{self.__class__.__name__} disconnected.") + + def configure_motors(self) -> None: + """Configure all motors with default settings.""" + # Robstride motors don't require much configuration in MIT mode + # Just ensure they're enabled + for motor in self.motors: + self._enable_motor(self._get_motor_name(motor)) + self._switch_operation_mode(motor, ControlMode.MIT) + time.sleep(0.01) + + def switch_to_mode(self, mode: ControlMode) -> None: + """Switch operation mode on selected motors.""" + for motor in self.motors: + self._switch_operation_mode(motor, mode) + time.sleep(0.01) + + def _enable_motor(self, motor: NameOrID) -> None: + """Enable a single motor.""" + motor_id = self._get_motor_id(motor) + recv_id = self._get_motor_recv_id(motor) + data = [0xFF] * 7 + [CAN_CMD_ENABLE] + msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False) + self._bus().send(msg) + self._recv_motor_response(expected_recv_id=recv_id, timeout=PARAM_TIMEOUT) + + def _disable_motor(self, motor: NameOrID) -> None: + """Disable a single motor.""" + motor_id = self._get_motor_id(motor) + recv_id = self._get_motor_recv_id(motor) + data = [0xFF] * 7 + [CAN_CMD_DISABLE] + msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False) + self._bus().send(msg) + self._recv_motor_response(expected_recv_id=recv_id) + + def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + """Enable torque on selected motors.""" + motors = self._get_motors_list(motors) + for motor in motors: + for _ in range(num_retry + 1): + try: + self._get_motor_name(motor) + self._enable_motor(self._get_motor_name(motor)) + break + except Exception as e: + if _ == num_retry: + raise e + time.sleep(0.01) + + def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + """Disable torque on selected motors.""" + motors = self._get_motors_list(motors) + for motor in motors: + for _ in range(num_retry + 1): + try: + self._disable_motor(self._get_motor_name(motor)) + break + except Exception as e: + if _ == num_retry: + raise e + time.sleep(0.01) + + @contextmanager + def torque_disabled(self, motors: str | list[str] | None = None): + """ + Context manager that guarantees torque is re-enabled. + + This helper is useful to temporarily disable torque when configuring motors. + + Examples: + >>> with bus.torque_disabled(): + ... # Safe operations here with torque disabled + ... pass + """ + self.disable_torque(motors) + try: + yield + finally: + self.enable_torque(motors) + + def set_zero_position(self, motors: str | list[str] | None = None) -> None: + """Set current position as zero for selected motors.""" + motors = self._get_motors_list(motors) + for motor in motors: + motor_id = self._get_motor_id(motor) + recv_id = self._get_motor_recv_id(motor) + data = [0xFF] * 7 + [CAN_CMD_SET_ZERO] + msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False) + self._bus().send(msg) + self._recv_motor_response(expected_recv_id=recv_id) + time.sleep(0.01) + + def _recv_motor_response( + self, expected_recv_id: int | None = None, timeout: float = 0.001 + ) -> can.Message | None: + """ + Receive a response from a motor. + + Args: + expected_recv_id: If provided, only return messages from this CAN ID + timeout: Timeout in seconds (default: 1ms for high-speed operation) + + Returns: + CAN message if received, None otherwise + """ + try: + start_time = time.time() + messages_seen = [] + while time.time() - start_time < timeout: + msg = self._bus().recv(timeout=RUNNING_TIMEOUT / 10) # 100us timeout for fast polling + if msg: + messages_seen.append(f"0x{msg.arbitration_id:02X}") + # If no filter specified, return any message + if expected_recv_id is None: + return msg + # Otherwise, only return if it matches the expected recv_id + if msg.data[0] == expected_recv_id: + return msg + else: + logger.debug( + f"Ignoring message from CAN ID 0x{msg.arbitration_id:02X}, expected 0x{expected_recv_id:02X}" + ) + + # Only log warnings if we're in debug mode to reduce overhead + if logger.isEnabledFor(logging.DEBUG): + if messages_seen: + logger.debug( + f"Received {len(messages_seen)} message(s) from IDs {set(messages_seen)}, but expected 0x{expected_recv_id:02X}" + ) + else: + logger.debug(f"No CAN messages received (expected from 0x{expected_recv_id:02X})") + except Exception as e: + logger.debug(f"Failed to receive CAN message: {e}") + return None + + def _recv_all_responses( + self, expected_recv_ids: list[int], timeout: float = 0.002 + ) -> dict[int, can.Message]: + """ + Efficiently receive responses from multiple motors at once. + Uses the OpenArms pattern: collect all available messages within timeout. + + Args: + expected_recv_ids: List of CAN IDs we expect responses from + timeout: Total timeout in seconds (default: 2ms) + + Returns: + Dictionary mapping recv_id to CAN message + """ + responses: dict[int, can.Message] = {} + expected_set = set(expected_recv_ids) + start_time = time.time() + + try: + while len(responses) < len(expected_recv_ids) and (time.time() - start_time) < timeout: + msg = self._bus().recv(timeout=RUNNING_TIMEOUT / 10) # 100us poll timeout + if msg and msg.data[0] in expected_set: + responses[msg.data[0]] = msg + if len(responses) == len(expected_recv_ids): + break # Got all responses, exit early + except Exception as e: + logger.debug(f"Error receiving responses: {e}") + + return responses + + def _speed_control( + self, + motor: NameOrID, + velocity_deg_per_sec: float, + current_limit_a: float, + ) -> None: + """ + Send a Velocity Mode Control Command (Command 11) to a single motor. + + Args: + motor: Motor name or CAN ID. + velocity_rad_per_sec: Target speed in rad/s (32-bit float). + current_limit_a: Current limit in A (32-bit float). + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + motor_id = self._get_motor_id(motor) + motor_name = self._get_motor_name(motor) + # Optional: ensure the motor is in velocity control mode + + if self.operation_mode[motor_name] != ControlMode.VEL: + raise RuntimeError(f"Motor '{motor_name}' is not in velocity control mode.") + # Convert to rad/s to match protocol specification + + velocity_rad_per_sec = np.radians(velocity_deg_per_sec) + + # Encode float32 little-endian without struct (byte list) + def _float32_to_le_bytes(x: float) -> list[int]: + b = np.float32(x).tobytes() # 4 bytes, little-endian + return [b[0], b[1], b[2], b[3]] + + speed_bytes = _float32_to_le_bytes(velocity_rad_per_sec) + limit_bytes = _float32_to_le_bytes(current_limit_a) + + data = speed_bytes + limit_bytes # 8 octets : [0–3]=speed, [4–7]=current limit + + msg = can.Message( + arbitration_id=motor_id, + data=data, + is_extended_id=False, + ) + self._bus().send(msg) + + # Si le proto renvoie une réponse type état, on peut la décoder comme pour MIT + recv_id = self._get_motor_recv_id(motor) + if recv_id is not None: + resp = self._recv_motor_response(expected_recv_id=recv_id) + if resp: + self._decode_motor_state(resp.data) + + def _mit_control( + self, + motor: NameOrID, + kp: float, + kd: float, + position_degrees: float, + velocity_deg_per_sec: float, + torque: float, + *, + wait_for_response: bool = True, + ) -> None: + """ + Send MIT control command to a motor. + + Args: + motor: Motor name or ID + kp: Position gain + kd: Velocity gain + position_degrees: Target position (degrees) + velocity_deg_per_sec: Target velocity (degrees/s) + torque: Target torque (N·m) + """ + motor_name = self._get_motor_name(motor) + motor_type = self._motor_types[motor_name] + if self.operation_mode[motor_name] != ControlMode.MIT: + raise RuntimeError(f"Motor '{motor_name}' is not in MIT control mode.") + motor_id = self._get_motor_id(motor) + data = self._encode_mit_packet(motor_type, kp, kd, position_degrees, velocity_deg_per_sec, torque) + msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False) + self._bus().send(msg) + + if wait_for_response: + recv_id = self._get_motor_recv_id(motor) + msg = self._recv_motor_response(expected_recv_id=recv_id) + if msg: + self._process_response(motor_name, msg) + + def _encode_mit_packet( + self, + motor_type: MotorType, + kp: float, + kd: float, + position_degrees: float, + velocity_deg_per_sec: float, + torque: float, + ) -> list[int]: + """Encode an MIT control command payload from physical units.""" + position_rad = np.radians(position_degrees) + velocity_rad_per_sec = np.radians(velocity_deg_per_sec) + pmax, vmax, tmax = MOTOR_LIMIT_PARAMS[motor_type] + + kp_uint = self._float_to_uint(kp, 0, 500, 12) + kd_uint = self._float_to_uint(kd, 0, 5, 12) + q_uint = self._float_to_uint(position_rad, -pmax, pmax, 16) + dq_uint = self._float_to_uint(velocity_rad_per_sec, -vmax, vmax, 12) + tau_uint = self._float_to_uint(torque, -tmax, tmax, 12) + + data = [0] * 8 + data[0] = (q_uint >> 8) & 0xFF + data[1] = q_uint & 0xFF + data[2] = dq_uint >> 4 + data[3] = ((dq_uint & 0xF) << 4) | ((kp_uint >> 8) & 0xF) + data[4] = kp_uint & 0xFF + data[5] = kd_uint >> 4 + data[6] = ((kd_uint & 0xF) << 4) | ((tau_uint >> 8) & 0xF) + data[7] = tau_uint & 0xFF + return data + + def _mit_control_batch( + self, + commands: dict[NameOrID, tuple[float, float, float, float, float]], + ) -> None: + """Send MIT commands in batch and update cache from collected responses.""" + if not commands: + return + + recv_id_to_motor: dict[int, str] = {} + for motor, (kp, kd, position_degrees, velocity_deg_per_sec, torque) in commands.items(): + motor_name = self._get_motor_name(motor) + if self.operation_mode[motor_name] != ControlMode.MIT: + raise RuntimeError(f"Motor '{motor_name}' is not in MIT control mode.") + + motor_id = self._get_motor_id(motor) + motor_type = self._motor_types[motor_name] + data = self._encode_mit_packet(motor_type, kp, kd, position_degrees, velocity_deg_per_sec, torque) + msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False) + self._bus().send(msg) + recv_id_to_motor[self._get_motor_recv_id(motor)] = motor_name + + responses = self._recv_all_responses(list(recv_id_to_motor.keys()), timeout=RUNNING_TIMEOUT) + for recv_id, motor_name in recv_id_to_motor.items(): + if msg := responses.get(recv_id): + self._process_response(motor_name, msg) + + def _float_to_uint(self, x: float, x_min: float, x_max: float, bits: int) -> int: + """Convert float to unsigned integer for CAN transmission.""" + x = max(x_min, min(x_max, x)) # Clamp to range + span = x_max - x_min + data_norm = (x - x_min) / span + return int(data_norm * ((1 << bits) - 1)) + + def _uint_to_float(self, x: int, x_min: float, x_max: float, bits: int) -> float: + """Convert unsigned integer from CAN to float.""" + span = x_max - x_min + data_norm = float(x) / ((1 << bits) - 1) + return data_norm * span + x_min + + def _decode_motor_state(self, data: bytearray | bytes) -> tuple[float, float, float, float]: + """ + Decode motor state from CAN data. + + Returns: + Tuple of (position_degrees, velocity_deg_per_sec, torque, temp_mos) + """ + if len(data) < 8: + raise ValueError("Invalid motor state data") + + # Extract encoded values + motor_id = data[0] + motor_name = self._id_to_name[motor_id] + q_uint = (data[1] << 8) | data[2] + dq_uint = (data[3] << 4) | (data[4] >> 4) + tau_uint = ((data[4] & 0x0F) << 8) | data[5] + t_mos = (data[6] << 8) | data[7] + + motor_type = self._motor_types[motor_name] + # Get motor limits + pmax, vmax, tmax = MOTOR_LIMIT_PARAMS[motor_type] + + # Decode to physical values (radians) + position_rad = self._uint_to_float(q_uint, -pmax, pmax, 16) + velocity_rad_per_sec = self._uint_to_float(dq_uint, -vmax, vmax, 12) + torque = self._uint_to_float(tau_uint, -tmax, tmax, 12) + + # Convert to degrees + position_degrees = np.degrees(position_rad) + velocity_deg_per_sec = np.degrees(velocity_rad_per_sec) + + # Update cached state + self.last_feedback_time[motor_name] = time.time() + self._last_known_states[motor_name] = { + "position": position_degrees, + "velocity": velocity_deg_per_sec, + "torque": torque, + "temp_mos": t_mos / 10, + # Not available in Robstride MIT feedback. + "temp_rotor": 0.0, + } + return position_degrees, velocity_deg_per_sec, torque, t_mos / 10 + + def _process_response(self, motor: str, msg: can.Message) -> None: + """Decode a feedback frame and update the cache for one motor.""" + try: + self._decode_motor_state(msg.data) + except Exception as e: + logger.warning(f"Failed to decode response from {motor}: {e}") + + def _get_cached_value(self, motor: str, data_name: str) -> Value: + """Retrieve a specific value from the state cache.""" + state = self._last_known_states[motor] + mapping: dict[str, Any] = { + "Present_Position": state["position"], + "Present_Velocity": state["velocity"], + "Present_Torque": state["torque"], + "Temperature_MOS": state["temp_mos"], + } + if data_name == "Temperature_Rotor": + raise NotImplementedError("Rotor temperature reading not accessible.") + if data_name not in mapping: + raise ValueError(f"Unknown data_name: {data_name}") + return mapping[data_name] + + @check_if_not_connected + def read( + self, + data_name: str, + motor: str, + ) -> Value: + """Read a value from a single motor. Positions are always in degrees.""" + + # Refresh motor to get latest state + t_init = time.time() + if ( + self.last_feedback_time[motor] is None + or t_init - (self.last_feedback_time[motor] or 0) > STATE_CACHE_TTL_S + ): + self.update_motor_state(motor) + + return self._get_cached_value(motor, data_name) + + @check_if_not_connected + def write( + self, + data_name: str, + motor: str, + value: Value, + ) -> None: + """Write a value to a single motor. Positions are always in degrees.""" + motor_name = self._get_motor_name(motor) + + if data_name in ("Kp", "Kd"): + self._gains[motor_name][data_name.lower()] = float(value) + elif data_name == "Goal_Position": + # Use MIT control with position in degrees + kp = self._gains[motor_name]["kp"] + kd = self._gains[motor_name]["kd"] + self._mit_control(motor, kp, kd, value, 0, 0) + elif data_name == "Goal_Velocity": + # Use Velocity control mode + if self.operation_mode[motor_name] != ControlMode.VEL: + raise RuntimeError(f"Motor '{motor_name}' is not in velocity control mode.") + current_limit_a = 5.0 # Example current limit / not specified in doc. This mode is rarely used and primarily intended for diagnostics + self._speed_control(motor, value, current_limit_a) + else: + raise ValueError(f"Writing {data_name} not supported in MIT mode") + + def sync_read( + self, + data_name: str, + motors: str | list[str] | None = None, + ) -> dict[str, Value]: + """ + Read the same value from multiple motors simultaneously. + Uses batched operations: sends all refresh commands, then collects all responses. + This is MUCH faster than sequential reads (OpenArms pattern). + """ + target_motors = self._get_motors_list(motors) + self._batch_refresh(target_motors) + return {motor: self._get_cached_value(motor, data_name) for motor in target_motors} + + @check_if_not_connected + def sync_write( + self, + data_name: str, + values: dict[str, Value], + ) -> None: + """ + Write different values to multiple motors simultaneously. Positions are always in degrees. + Uses batched operations: sends all commands first, then collects responses when MIT mode is used, otherwise send cmd and wait for response for each motor). + """ + if data_name in ("Kp", "Kd"): + key = data_name.lower() + for motor, val in values.items(): + motor_name = self._get_motor_name(motor) + self._gains[motor_name][key] = float(val) + elif data_name == "Goal_Position": + commands: dict[NameOrID, tuple[float, float, float, float, float]] = {} + for motor, value_degrees in values.items(): + motor_name = self._get_motor_name(motor) + commands[motor] = ( + self._gains[motor_name]["kp"], + self._gains[motor_name]["kd"], + float(value_degrees), + 0.0, + 0.0, + ) + self._mit_control_batch(commands) + else: + # Fall back to individual writes for other data types + for motor, value in values.items(): + self.write(data_name, motor, value) + + def sync_read_all_states( + self, + motors: str | list[str] | None = None, + *, + num_retry: int = 0, + ) -> dict[str, MotorState]: + """ + Read ALL motor states (position, velocity, torque) with Robstride TTL refresh policy. + """ + target_motors = self._get_motors_list(motors) + self._batch_refresh(target_motors) + return {motor: self._last_known_states[motor].copy() for motor in target_motors} + + def _batch_refresh(self, motors: list[str]) -> None: + """Refresh a set of motors and update the feedback cache.""" + init_time = time.time() + updated_motors: list[str] = [] + + for motor in motors: + if ( + self.last_feedback_time[motor] is not None + and (init_time - (self.last_feedback_time[motor] or 0)) < STATE_CACHE_TTL_S + ): + continue + motor_id = self._get_motor_id(motor) + data = [0xFF] * 7 + [CAN_CMD_CLEAR_FAULT] + msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False) + self._bus().send(msg) + updated_motors.append(motor) + + expected_recv_ids = [self._get_motor_recv_id(motor) for motor in updated_motors] + responses = self._recv_all_responses(expected_recv_ids, timeout=RUNNING_TIMEOUT) + + for response in responses.values(): + payload_motor_name = self._recv_id_to_motor.get(response.data[0]) + if payload_motor_name is not None: + self._process_response(payload_motor_name, response) + else: + # Fallback: still attempt to decode based on payload byte0 mapping. + self._decode_motor_state(response.data) + + for motor in updated_motors: + recv_id = self._get_motor_recv_id(motor) + if recv_id not in responses: + logger.warning(f"Packet drop: {motor} (ID: 0x{recv_id:02X}). Using last known state.") + + def read_calibration(self) -> dict[str, MotorCalibration]: + """Read calibration data from motors.""" + # Robstride motors don't store calibration internally + # Return existing calibration or empty dict + return self.calibration if self.calibration else {} + + def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None: + """Write calibration data to motors.""" + # Robstride motors don't store calibration internally + # Just cache it in memory + if cache: + self.calibration = calibration_dict + + def record_ranges_of_motion( + self, motors: str | list[str] | None = None, display_values: bool = True + ) -> tuple[dict[str, Value], dict[str, Value]]: + """ + Interactively record the min/max values of each motor in degrees. + + Move the joints by hand (with torque disabled) while the method streams live positions. + Press Enter to finish. + """ + target_motors = self._get_motors_list(motors) + + # Disable torque for manual movement + self.disable_torque(target_motors) + time.sleep(0.1) + + # Get initial positions (already in degrees) + start_positions = self.sync_read("Present_Position", target_motors) + mins = start_positions.copy() + maxes = start_positions.copy() + + print("\nMove joints through their full range of motion. Press ENTER when done.") + user_pressed_enter = False + + while not user_pressed_enter: + positions = self.sync_read("Present_Position", target_motors) + + for motor in target_motors: + if motor in positions: + mins[motor] = int( + min( + positions[motor], + mins.get(motor, positions[motor]), + ) + ) + maxes[motor] = int( + max( + positions[motor], + maxes.get(motor, positions[motor]), + ) + ) + + if display_values: + print("\n" + "=" * 50) + print(f"{'MOTOR':<20} | {'MIN (deg)':>12} | {'POS (deg)':>12} | {'MAX (deg)':>12}") + print("-" * 50) + for motor in target_motors: + if motor in positions: + print( + f"{motor:<20} | {mins[motor]:>12.1f} | {positions[motor]:>12.1f} | {maxes[motor]:>12.1f}" + ) + + if enter_pressed(): + user_pressed_enter = True + + if display_values and not user_pressed_enter: + # Move cursor up to overwrite the previous output + move_cursor_up(len(target_motors) + 4) + + time.sleep(0.05) + + # Re-enable torque + self.enable_torque(target_motors) + + # Validate ranges + for motor in target_motors: + if (motor in mins) and (motor in maxes) and (abs(maxes[motor] - mins[motor]) < 5.0): + raise ValueError(f"Motor {motor} has insufficient range of motion (< 5 degrees)") + + return mins, maxes + + def _get_motors_list(self, motors: str | list[str] | None) -> list[str]: + """Convert motor specification to list of motor names.""" + if motors is None: + return list(self.motors.keys()) + elif isinstance(motors, str): + return [motors] + elif isinstance(motors, list): + return motors + else: + raise TypeError(f"Invalid motors type: {type(motors)}") + + def _get_motor_id(self, motor: NameOrID) -> int: + """Get CAN ID for a motor.""" + if isinstance(motor, str): + if motor in self.motors: + return self.motors[motor].id + else: + raise ValueError(f"Unknown motor: {motor}") + else: + return motor + + def _get_motor_name(self, motor: NameOrID) -> str: + """Get motor name from name or ID.""" + if isinstance(motor, str): + return motor + else: + for name, m in self.motors.items(): + if m.id == motor: + return name + raise ValueError(f"Unknown motor ID: {motor}") + + def _get_motor_recv_id(self, motor: NameOrID) -> int: + """Return the expected ID found in feedback payload byte0 for this motor. + + Robstride MIT feedback frames encode an ID in data[0]. Some setups expose it as + `motor.recv_id`; otherwise we fall back to the configured `motor.id`. + """ + motor_name = self._get_motor_name(motor) + motor_obj = self.motors[motor_name] + + recv_id = getattr(motor_obj, "recv_id", None) + if recv_id is None: + logger.debug( + "Motor '%s' has no recv_id; falling back to motor.id=%s for feedback demux.", + motor_name, + motor_obj.id, + ) + return motor_obj.id + + return recv_id + + @cached_property + def is_calibrated(self) -> bool: + """Check if motors are calibrated.""" + return bool(self.calibration) diff --git a/src/lerobot/motors/robstride/tables.py b/src/lerobot/motors/robstride/tables.py new file mode 100644 index 000000000..2fc1a97b0 --- /dev/null +++ b/src/lerobot/motors/robstride/tables.py @@ -0,0 +1,120 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Configuration tables for Damiao motors.""" + +from enum import IntEnum + + +# Motor type definitions +class MotorType(IntEnum): + O0 = 0 + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + O5 = 5 + ELO5 = 6 + O6 = 7 + + +class CommMode(IntEnum): + PrivateProtocole = 0 + CANopen = 1 + MIT = 2 + + +# Control modes +class ControlMode(IntEnum): + MIT = 0 + POS_VEL = 1 + VEL = 2 + + +# Motor limit parameters [PMAX, VMAX, TMAX] +# PMAX: Maximum position (rad) +# VMAX: Maximum velocity (rad/s) +# TMAX: Maximum torque (N·m) +MOTOR_LIMIT_PARAMS: dict[MotorType, tuple[float, float, float]] = { + MotorType.O0: (12.57, 33, 14), + MotorType.O1: (12.57, 44, 17), + MotorType.O2: (12.57, 33, 20), + MotorType.O3: (12.57, 33, 60), + MotorType.O4: (12.57, 33, 120), + MotorType.O5: (12.57, 50, 5.5), + MotorType.ELO5: (12.57, 50, 6), + MotorType.O6: (112.5, 50, 36), +} + +# Motor model names +MODEL_NAMES = { + MotorType.O0: "O0", + MotorType.O1: "O1", + MotorType.O2: "O2", + MotorType.O3: "O3", + MotorType.O4: "O4", + MotorType.O5: "O5", + MotorType.ELO5: "ELO5", + MotorType.O6: "O6", +} + +# Motor resolution table (encoder counts per revolution) +MODEL_RESOLUTION = { + "O0": 65536, + "O1": 65536, + "O2": 65536, + "O3": 65536, + "O4": 65536, + "O5": 65536, + "ELO5": 65536, + "O6": 65536, +} + +# CAN baudrates supported by Robstride motors +AVAILABLE_BAUDRATES = [ + 1000000, # 4: 1 mbps (default) +] +DEFAULT_BAUDRATE = 1000000 + +# Default timeout in milliseconds +DEFAULT_TIMEOUT_MS = 0 # disabled by default, otherwise 20000 is 1s + + +# Data that should be normalized +NORMALIZED_DATA = ["Present_Position", "Goal_Position"] + + +# MIT control parameter ranges +MIT_KP_RANGE = (0.0, 500.0) +MIT_KD_RANGE = (0.0, 5.0) + +# CAN frame command IDs +CAN_CMD_ENABLE = 0xFC +CAN_CMD_DISABLE = 0xFD +CAN_CMD_SET_ZERO = 0xFE +CAN_CMD_CLEAR_FAULT = 0xFB + + +CAN_CMD_QUERY_PARAM = 0x33 +CAN_CMD_WRITE_PARAM = 0x55 +CAN_CMD_SAVE_PARAM = 0xAA + +# CAN ID for parameter operations +CAN_PARAM_ID = 0x7FF + + +RUNNING_TIMEOUT = 0.001 +PARAM_TIMEOUT = 0.01 + +STATE_CACHE_TTL_S = 0.02 diff --git a/src/lerobot/scripts/lerobot_setup_can.py b/src/lerobot/scripts/lerobot_setup_can.py index a31727ea4..b28fca44d 100644 --- a/src/lerobot/scripts/lerobot_setup_can.py +++ b/src/lerobot/scripts/lerobot_setup_can.py @@ -152,6 +152,7 @@ def test_motor(bus, motor_id: int, timeout: float, use_fd: bool): ) try: bus.send(disable_msg) + bus.recv(timeout=0.1) # Clear any pending responses except Exception: print(f"Error sending message to motor 0x{motor_id:02X}") From fcabfd32a5a45f0a8210179a6cc8f605815f0dab Mon Sep 17 00:00:00 2001 From: Yuta Nakagawa Date: Tue, 24 Feb 2026 01:11:46 +0900 Subject: [PATCH 18/21] chore(docs): update the document for Phone teleop to clarify how to use the examples (#2991) * update the document for Phone teleope to clarify how to use the examples * Update docs/source/phone_teleop.mdx Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Yuta Nakagawa --------- Signed-off-by: Yuta Nakagawa Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Steven Palma --- docs/source/phone_teleop.mdx | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/docs/source/phone_teleop.mdx b/docs/source/phone_teleop.mdx index 06e524975..678783e7b 100644 --- a/docs/source/phone_teleop.mdx +++ b/docs/source/phone_teleop.mdx @@ -66,12 +66,13 @@ Run on of the examples scripts to teleoperate, record a dataset, replay a datase All scripts assume you configured your robot (e.g., SO-100 follower) and set the correct serial port. -Additionally you need to **copy the urdf of the robot to the examples folder**. For the examples in this tutorial (Using SO100/SO101) it is highly recommended to use the urdf in the [SO-ARM100 repo](https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf) +Additionally you need to **copy the URDF of the robot into the examples folder**. For the examples in this tutorial (using SO100/SO101), copy the `SO101` folder from the [SO-ARM100 repo](https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101) into the `examples/phone_to_so100/` directory, so that the URDF file path becomes `examples/phone_to_so100/SO101/so101_new_calib.urdf`. - Run this example to teleoperate: ```bash - python examples/phone_to_so100/teleoperate.py + cd examples/phone_to_so100 + python teleoperate.py ``` After running the example: @@ -84,19 +85,22 @@ Additionally you can customize mapping or safety limits by editing the processor - Run this example to record a dataset, which saves absolute end effector observations and actions: ```bash - python examples/phone_to_so100/record.py + cd examples/phone_to_so100 + python record.py ``` - Run this example to replay recorded episodes: ```bash - python examples/phone_to_so100/replay.py + cd examples/phone_to_so100 + python replay.py ``` - Run this example to evaluate a pretrained policy: ```bash - python examples/phone_to_so100/evaluate.py + cd examples/phone_to_so100 + python evaluate.py ``` ### Important pipeline steps and options From 7dbbaa3727e8866ce6649e895d152f129a0c6d32 Mon Sep 17 00:00:00 2001 From: Guilherme Miotto Date: Mon, 23 Feb 2026 17:11:55 +0100 Subject: [PATCH 19/21] Small comment fix (#2990) Co-authored-by: Steven Palma --- src/lerobot/policies/smolvla/configuration_smolvla.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lerobot/policies/smolvla/configuration_smolvla.py b/src/lerobot/policies/smolvla/configuration_smolvla.py index c32c8a60e..c696265f2 100644 --- a/src/lerobot/policies/smolvla/configuration_smolvla.py +++ b/src/lerobot/policies/smolvla/configuration_smolvla.py @@ -85,7 +85,7 @@ class SmolVLAConfig(PreTrainedConfig): scheduler_decay_lr: float = 2.5e-6 vlm_model_name: str = "HuggingFaceTB/SmolVLM2-500M-Video-Instruct" # Select the VLM backbone. - load_vlm_weights: bool = False # Set to True in case of training the expert from scratch. True when init from pretrained SmolVLA weights + load_vlm_weights: bool = False # Set to False in case of training the expert from scratch. True when init from pretrained SmolVLA weights add_image_special_tokens: bool = False # Whether to use special image tokens around image features. From 0f44adbeecffca72e9b0f41c58b8222402ff4613 Mon Sep 17 00:00:00 2001 From: Yuan Haokuan <138340416+WilbertYuan@users.noreply.github.com> Date: Tue, 24 Feb 2026 00:51:13 +0800 Subject: [PATCH 20/21] docs: fix HF_USER export command to correctly parse username (#2932) * Fix HF_USER extraction command in documentation Updated command to extract the username from hf auth output. Signed-off-by: Yuan Haokuan <138340416+WilbertYuan@users.noreply.github.com> * Correct HF_USER variable assignment in documentation Fix the variable extraction from hf auth output. Signed-off-by: Yuan Haokuan <138340416+WilbertYuan@users.noreply.github.com> * Update docs/source/il_robots.mdx Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Yuan Haokuan <138340416+WilbertYuan@users.noreply.github.com> --------- Signed-off-by: Yuan Haokuan <138340416+WilbertYuan@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Steven Palma --- docs/source/il_robots.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx index 7fc770b0c..bad88f88e 100644 --- a/docs/source/il_robots.mdx +++ b/docs/source/il_robots.mdx @@ -165,7 +165,7 @@ huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential Then store your Hugging Face repository name in a variable: ```bash -HF_USER=$(hf auth whoami | head -n 1) +HF_USER=$(hf auth whoami | awk -F': *' 'NR==1 {print $2}') echo $HF_USER ``` From 7fd71c83a3c5ba496b6a19aad96a048b7c42410f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Pa=C4=BEo?= Date: Mon, 23 Feb 2026 20:41:20 +0100 Subject: [PATCH 21/21] docs: add WSL evdev installation note (#2855) Add a note in the installation guide explaining that users on WSL need to install evdev to avoid build issues. See: https://github.com/huggingface/lerobot/issues/2528 Signed-off-by: Steven Palma Co-authored-by: Steven Palma --- docs/source/installation.mdx | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx index 8cc83843e..a112377c1 100644 --- a/docs/source/installation.mdx +++ b/docs/source/installation.mdx @@ -40,6 +40,13 @@ conda install ffmpeg -c conda-forge > > - _[On Linux only]_ If you want to bring your own ffmpeg: Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`. +> [!NOTE] +> When installing LeRobot inside WSL (Windows Subsystem for Linux), make sure to install `evdev` with the following command: +> +> ```bash +> conda install evdev -c conda-forge +> ``` + ## Step 3: Install LeRobot 🤗 ### From Source