Compare commits

...

20 Commits

Author SHA1 Message Date
Steven Palma 62135d846f license + peft-dep + init groot + flat import layering utils dataset 2026-04-12 16:43:24 +02:00
Steven Palma 718d2fc59d fix fast tests 2026-04-12 14:46:23 +02:00
Steven Palma 8e75f61b31 update fast ci tests 2026-04-12 14:11:50 +02:00
Steven Palma 2bf33ccb98 fix leaking imports in minimal testing 2026-04-12 13:52:45 +02:00
Steven Palma 27292a3432 complete migration 2026-04-12 12:19:26 +02:00
Steven Palma 87528186c0 address minor review comments 2026-04-12 11:27:59 +02:00
Steven Palma 8ef4d78178 upgrade uv lock 2026-04-12 10:40:11 +02:00
Steven Palma 5ccf99b930 add explicit transitative deps 2026-04-12 10:20:44 +02:00
Steven Palma 1624fc1797 is_available checks centralized 2026-04-12 09:56:03 +02:00
Steven Palma b132e2b5d6 docs and examples imports update 2026-04-12 09:43:13 +02:00
Steven Palma 89b4652de0 fix diffusion tests ci 2026-04-11 21:23:12 +02:00
Steven Palma 5940126fb5 fix test imports 2026-04-11 21:07:53 +02:00
Steven Palma c9636bb53f fix policy imports 2026-04-11 20:39:03 +02:00
Steven Palma af0d72bd42 refactor import fixes 2026-04-11 18:02:59 +02:00
Steven Palma d626964119 big imports refactor 2026-04-11 15:03:24 +02:00
Steven Palma 964acd0151 refactor: more changes 2026-04-11 11:13:15 +02:00
Steven Palma 4767f51971 Merge branch 'main' into feat/minimal_default_install 2026-04-10 20:57:38 +02:00
Steven Palma 4c39981908 refactor: minor improvements 2026-04-10 18:31:07 +02:00
Steven Palma 882a6b0965 refactor: several fixes 2026-04-10 15:35:31 +02:00
Steven Palma e2381633cd feat(dependecies): minimal default tag install 2026-04-10 14:22:13 +02:00
343 changed files with 3394 additions and 2076 deletions
+32 -6
View File
@@ -12,7 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# This workflow handles fast testing. # This workflow validates each optional-dependency tier in isolation.
# Each tier installs a different extra and runs the full test suite.
# Tests that require an extra not installed in the current tier are
# skipped automatically via pytest.importorskip guards.
name: Fast Tests name: Fast Tests
on: on:
@@ -54,8 +57,9 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
# This job runs pytests with the default dependencies. # This job runs pytests in isolated dependency tiers.
# It runs everytime we commit to a PR or push to main # Each tier installs a different extra and runs the full suite;
# tests gated behind other extras skip automatically.
fast-pytest-tests: fast-pytest-tests:
name: Fast Pytest Tests name: Fast Pytest Tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -89,8 +93,9 @@ jobs:
version: ${{ env.UV_VERSION }} version: ${{ env.UV_VERSION }}
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
- name: Install lerobot with test extras # ── Tier 1: Base ──────────────────────────────────────
run: uv sync --locked --extra "test" - name: "Tier 1 — Install: base"
run: uv sync --locked --extra test
- name: Login to Hugging Face - name: Login to Hugging Face
if: env.HF_USER_TOKEN != '' if: env.HF_USER_TOKEN != ''
@@ -98,5 +103,26 @@ jobs:
uv run hf auth login --token "$HF_USER_TOKEN" --add-to-git-credential uv run hf auth login --token "$HF_USER_TOKEN" --add-to-git-credential
uv run hf auth whoami uv run hf auth whoami
- name: Run pytest - name: "Tier 1 — Test: base"
run: uv run pytest tests -vv --maxfail=10
# ── Tier 2: Dataset ──────────────────────────────────
- name: "Tier 2 — Install: dataset"
run: uv sync --locked --extra test --extra dataset
- name: "Tier 2 — Test: dataset"
run: uv run pytest tests -vv --maxfail=10
# ── Tier 3: Hardware ─────────────────────────────────
- name: "Tier 3 — Install: hardware"
run: uv sync --locked --extra test --extra hardware
- name: "Tier 3 — Test: hardware"
run: uv run pytest tests -vv --maxfail=10
# ── Tier 4: Viz ──────────────────────────────────────
- name: "Tier 4 — Install: viz"
run: uv sync --locked --extra test --extra viz
- name: "Tier 4 — Test: viz"
run: uv run pytest tests -vv --maxfail=10 run: uv run pytest tests -vv --maxfail=10
+1 -1
View File
@@ -216,7 +216,7 @@ class MyBenchmarkEnvConfig(EnvConfig):
def get_env_processors(self): def get_env_processors(self):
"""Override if your benchmark needs observation/action transforms.""" """Override if your benchmark needs observation/action transforms."""
from lerobot.processor.pipeline import PolicyProcessorPipeline from lerobot.processor import PolicyProcessorPipeline
from lerobot.processor.env_processor import MyBenchmarkProcessorStep from lerobot.processor.env_processor import MyBenchmarkProcessorStep
return ( return (
PolicyProcessorPipeline(steps=[MyBenchmarkProcessorStep()]), PolicyProcessorPipeline(steps=[MyBenchmarkProcessorStep()]),
+1 -1
View File
@@ -170,7 +170,7 @@ python -m lerobot.async_inference.robot_client \
```python ```python
import threading import threading
from lerobot.robots.so_follower import SO100FollowerConfig from lerobot.robots.so_follower import SO100FollowerConfig
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.async_inference.configs import RobotClientConfig from lerobot.async_inference.configs import RobotClientConfig
from lerobot.async_inference.robot_client import RobotClient from lerobot.async_inference.robot_client import RobotClient
from lerobot.async_inference.helpers import visualize_action_queue_size from lerobot.async_inference.helpers import visualize_action_queue_size
+1 -1
View File
@@ -41,7 +41,7 @@ The script:
```python ```python
# New usage pattern (after migration) # New usage pattern (after migration)
from lerobot.policies.factory import make_policy, make_pre_post_processors from lerobot.policies import make_policy, make_pre_post_processors
# Load model and processors separately # Load model and processors separately
policy = make_policy(config, ds_meta=dataset.meta) policy = make_policy(config, ds_meta=dataset.meta)
+4 -4
View File
@@ -47,9 +47,9 @@ Here is a template to get you started, customize the parameters and methods as n
```python ```python
# configuration_my_custom_policy.py # configuration_my_custom_policy.py
from dataclasses import dataclass, field from dataclasses import dataclass, field
from lerobot.configs.policies import PreTrainedConfig from lerobot.configs import PreTrainedConfig
from lerobot.optim.optimizers import AdamWConfig from lerobot.optim import AdamWConfig
from lerobot.optim.schedulers import CosineDecayWithWarmupSchedulerConfig from lerobot.optim import CosineDecayWithWarmupSchedulerConfig
@PreTrainedConfig.register_subclass("my_custom_policy") @PreTrainedConfig.register_subclass("my_custom_policy")
@dataclass @dataclass
@@ -120,7 +120,7 @@ import torch
import torch.nn as nn import torch.nn as nn
from typing import Any from typing import Any
from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies import PreTrainedPolicy
from lerobot.utils.constants import ACTION from lerobot.utils.constants import ACTION
from .configuration_my_custom_policy import MyCustomPolicyConfig from .configuration_my_custom_policy import MyCustomPolicyConfig
+4 -6
View File
@@ -79,9 +79,8 @@ The following examples show how to use the camera API to configure and capture f
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
```python ```python
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCamera, OpenCVCameraConfig
from lerobot.cameras.opencv.camera_opencv import OpenCVCamera from lerobot.cameras import ColorMode, Cv2Rotation
from lerobot.cameras.configs import ColorMode, Cv2Rotation
# Construct an `OpenCVCameraConfig` with your desired FPS, resolution, color mode, and rotation. # Construct an `OpenCVCameraConfig` with your desired FPS, resolution, color mode, and rotation.
config = OpenCVCameraConfig( config = OpenCVCameraConfig(
@@ -126,9 +125,8 @@ with OpenCVCamera(config) as camera:
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
```python ```python
from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig
from lerobot.cameras.realsense.camera_realsense import RealSenseCamera from lerobot.cameras import ColorMode, Cv2Rotation
from lerobot.cameras.configs import ColorMode, Cv2Rotation
# Create a `RealSenseCameraConfig` specifying your cameras serial number and enabling depth. # Create a `RealSenseCameraConfig` specifying your cameras serial number and enabling depth.
config = RealSenseCameraConfig( config = RealSenseCameraConfig(
+6 -7
View File
@@ -95,7 +95,7 @@ After completing your annotation:
When you load a dataset with subtask annotations, the subtask information is automatically available: When you load a dataset with subtask annotations, the subtask information is automatically available:
```python ```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
# Load a dataset with subtask annotations # Load a dataset with subtask annotations
dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated") dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated")
@@ -133,11 +133,10 @@ if has_subtasks:
The `TokenizerProcessor` automatically handles subtask tokenization for Vision-Language Action (VLA) models: The `TokenizerProcessor` automatically handles subtask tokenization for Vision-Language Action (VLA) models:
```python ```python
from lerobot.processor.tokenizer_processor import TokenizerProcessor from lerobot.processor import TokenizerProcessorStep
from lerobot.processor.pipeline import ProcessorPipeline
# Create a tokenizer processor # Create a tokenizer processor step
tokenizer_processor = TokenizerProcessor( tokenizer_processor = TokenizerProcessorStep(
tokenizer_name_or_path="google/paligemma-3b-pt-224", tokenizer_name_or_path="google/paligemma-3b-pt-224",
padding="max_length", padding="max_length",
max_length=64, max_length=64,
@@ -158,7 +157,7 @@ When subtasks are available in the batch, the tokenizer processor adds:
```python ```python
import torch import torch
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated") dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated")
@@ -182,7 +181,7 @@ for batch in dataloader:
Try loading a dataset with subtask annotations: Try loading a dataset with subtask annotations:
```python ```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
# Example dataset with subtask annotations # Example dataset with subtask annotations
dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated") dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated")
+2 -2
View File
@@ -66,10 +66,10 @@ The SDK gives you:
Follow our [Installation Guide](./installation) to install LeRobot. Follow our [Installation Guide](./installation) to install LeRobot.
In addition to the base installation, install the EarthRover Mini dependencies: In addition to the base installation, install the EarthRover Mini with hardware dependencies:
```bash ```bash
pip install -e . pip install -e ".[hardware]"
``` ```
## How It Works ## How It Works
+3 -3
View File
@@ -173,8 +173,8 @@ observation = {
The `make_env_pre_post_processors` function follows the same pattern as `make_pre_post_processors` for policies: The `make_env_pre_post_processors` function follows the same pattern as `make_pre_post_processors` for policies:
```python ```python
from lerobot.envs.factory import make_env_pre_post_processors from lerobot.envs import make_env_pre_post_processors, PushtEnv
from lerobot.envs.configs import LiberoEnv, PushtEnv from lerobot.envs.configs import LiberoEnv
# For LIBERO: Returns LiberoProcessorStep in preprocessor # For LIBERO: Returns LiberoProcessorStep in preprocessor
libero_cfg = LiberoEnv(task="libero_spatial", camera_name=["agentview"]) libero_cfg = LiberoEnv(task="libero_spatial", camera_name=["agentview"])
@@ -257,7 +257,7 @@ def eval_main(cfg: EvalPipelineConfig):
The `LiberoProcessorStep` demonstrates a real-world environment processor: The `LiberoProcessorStep` demonstrates a real-world environment processor:
```python ```python
from lerobot.processor.pipeline import ObservationProcessorStep from lerobot.processor import ObservationProcessorStep
@dataclass @dataclass
@ProcessorStepRegistry.register(name="libero_processor") @ProcessorStepRegistry.register(name="libero_processor")
+3 -3
View File
@@ -34,7 +34,7 @@ Finally, your environment must implement the standard `gym.vector.VectorEnv` int
Loading an environment from the Hub is as simple as: Loading an environment from the Hub is as simple as:
```python ```python
from lerobot.envs.factory import make_env from lerobot.envs import make_env
# Load a hub environment (requires explicit consent to run remote code) # Load a hub environment (requires explicit consent to run remote code)
env = make_env("lerobot/cartpole-env", trust_remote_code=True) env = make_env("lerobot/cartpole-env", trust_remote_code=True)
@@ -191,7 +191,7 @@ api.upload_folder(
### Basic Usage ### Basic Usage
```python ```python
from lerobot.envs.factory import make_env from lerobot.envs import make_env
# Load from the hub # Load from the hub
envs_dict = make_env( envs_dict = make_env(
@@ -314,7 +314,7 @@ env = make_env("trusted-org/verified-env@a1b2c3d4", trust_remote_code=True)
Here's a complete example using the reference CartPole environment: Here's a complete example using the reference CartPole environment:
```python ```python
from lerobot.envs.factory import make_env from lerobot.envs import make_env
import numpy as np import numpy as np
# Load the environment # Load the environment
+3 -3
View File
@@ -58,10 +58,10 @@ pip install -e .
cd .. cd ..
# 5. Install LeRobot # 5. Install LeRobot (evaluation extra for env/policy evaluation)
git clone https://github.com/huggingface/lerobot.git git clone https://github.com/huggingface/lerobot.git
cd lerobot cd lerobot
pip install -e . pip install -e ".[evaluation]"
cd .. cd ..
@@ -262,7 +262,7 @@ def main(cfg: EvalPipelineConfig):
"""Run random action rollout for IsaacLab Arena environment.""" """Run random action rollout for IsaacLab Arena environment."""
logging.info(pformat(asdict(cfg))) logging.info(pformat(asdict(cfg)))
from lerobot.envs.factory import make_env from lerobot.envs import make_env
env_dict = make_env( env_dict = make_env(
cfg.env, cfg.env,
+3 -3
View File
@@ -74,7 +74,7 @@ EnvHub exposes every LeIsaac-supported task in a uniform interface. The examples
# envhub_random_action.py # envhub_random_action.py
import torch import torch
from lerobot.envs.factory import make_env from lerobot.envs import make_env
# Load from the hub # Load from the hub
envs_dict = make_env("LightwheelAI/leisaac_env:envs/so101_pick_orange.py", n_envs=1, trust_remote_code=True) envs_dict = make_env("LightwheelAI/leisaac_env:envs/so101_pick_orange.py", n_envs=1, trust_remote_code=True)
@@ -142,7 +142,7 @@ from lerobot.teleoperators import ( # noqa: F401
) )
from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import init_logging from lerobot.utils.utils import init_logging
from lerobot.envs.factory import make_env from lerobot.envs import make_env
@dataclass @dataclass
@@ -282,7 +282,7 @@ Note: when working with `bi_so101_fold_cloth`, call `initialize()` immediately a
```python ```python
import torch import torch
from lerobot.envs.factory import make_env from lerobot.envs import make_env
# Load from the hub # Load from the hub
envs_dict = make_env("LightwheelAI/leisaac_env:envs/bi_so101_fold_cloth.py", n_envs=1, trust_remote_code=True) envs_dict = make_env("LightwheelAI/leisaac_env:envs/bi_so101_fold_cloth.py", n_envs=1, trust_remote_code=True)
+19 -22
View File
@@ -58,8 +58,8 @@ lerobot-teleoperate \
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
```python ```python
from lerobot.teleoperators.so_leader import SO101LeaderConfig, SO101Leader from lerobot.teleoperators.so_leader import SO101Leader, SO101LeaderConfig
from lerobot.robots.so_follower import SO101FollowerConfig, SO101Follower from lerobot.robots.so_follower import SO101Follower, SO101FollowerConfig
robot_config = SO101FollowerConfig( robot_config = SO101FollowerConfig(
port="/dev/tty.usbmodem58760431541", port="/dev/tty.usbmodem58760431541",
@@ -116,9 +116,9 @@ lerobot-teleoperate \
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
```python ```python
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader from lerobot.teleoperators.koch_leader import KochLeader, KochLeaderConfig
from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower from lerobot.robots.koch_follower import KochFollower, KochFollowerConfig
camera_config = { camera_config = {
"front": OpenCVCameraConfig(index_or_path=0, width=1920, height=1080, fps=30) "front": OpenCVCameraConfig(index_or_path=0, width=1920, height=1080, fps=30)
@@ -195,13 +195,12 @@ lerobot-record \
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
```python ```python
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.datasets.utils import hw_to_dataset_features from lerobot.utils.feature_utils import hw_to_dataset_features
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.teleoperators.so_leader.config_so100_leader import SO100LeaderConfig from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig
from lerobot.teleoperators.so_leader.so100_leader import SO100Leader from lerobot.common.control_utils import init_keyboard_listener
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun from lerobot.utils.visualization_utils import init_rerun
from lerobot.scripts.lerobot_record import record_loop from lerobot.scripts.lerobot_record import record_loop
@@ -410,9 +409,8 @@ lerobot-replay \
```python ```python
import time import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.robots.so_follower.config_so100_follower import SO100FollowerConfig from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.so100_follower import SO100Follower
from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
@@ -532,15 +530,14 @@ lerobot-record \
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
```python ```python
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.datasets.utils import hw_to_dataset_features from lerobot.utils.feature_utils import hw_to_dataset_features
from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.policies.act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies import make_pre_post_processors
from lerobot.robots.so_follower.config_so100_follower import SO100FollowerConfig from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.so100_follower import SO100Follower
from lerobot.scripts.lerobot_record import record_loop from lerobot.scripts.lerobot_record import record_loop
from lerobot.utils.control_utils import init_keyboard_listener from lerobot.common.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun from lerobot.utils.visualization_utils import init_rerun
+44 -16
View File
@@ -116,6 +116,8 @@ brew install ffmpeg
## Step 3: Install LeRobot 🤗 ## Step 3: Install LeRobot 🤗
The base `lerobot` install is intentionally **lightweight** — it includes only core ML dependencies (PyTorch, torchvision, numpy, opencv, einops, draccus, huggingface-hub, gymnasium, safetensors). Heavier dependencies are gated behind optional extras so you only install what you need.
### From Source ### From Source
First, clone the repository and navigate into the directory: First, clone the repository and navigate into the directory:
@@ -131,12 +133,16 @@ Then, install the library in editable mode. This is useful if you plan to contri
<hfoptions id="install_lerobot_src"> <hfoptions id="install_lerobot_src">
<hfoption id="conda"> <hfoption id="conda">
```bash ```bash
pip install -e . pip install -e ".[core_scripts]" # For robot workflows (recording, replaying, calibrate)
pip install -e ".[training]" # For training policies
pip install -e ".[all]" # Everything (all policies, envs, hardware, dev tools)
``` ```
</hfoption> </hfoption>
<hfoption id="uv"> <hfoption id="uv">
```bash ```bash
uv pip install -e . uv pip install -e ".[core_scripts]" # For robot workflows (recording, replaying, calibrate)
uv pip install -e ".[training]" # For training policies
uv pip install -e ".[all]" # Everything (all policies, envs, hardware, dev tools)
``` ```
</hfoption> </hfoption>
</hfoptions> </hfoptions>
@@ -162,26 +168,48 @@ uv pip install lerobot
</hfoptions> </hfoptions>
<!-- prettier-ignore-end --> <!-- prettier-ignore-end -->
_This installs only the default dependencies._ _This installs only the core ML dependencies. You will need to add extras for most workflows._
**Extra Features:** **Feature Extras:**
To install additional functionality, use one of the following (If you are using `uv`, replace `pip install` with `uv pip install` in the commands below.): LeRobot provides **feature-scoped extras** that map to common workflows. If you are using `uv`, replace `pip install` with `uv pip install` in the commands below.
| Extra | What it adds | Typical use case |
| ---------- | ------------------------------------------- | ----------------------------------- |
| `dataset` | `datasets`, `av`, `torchcodec`, `jsonlines` | Loading & creating datasets |
| `training` | `dataset` + `accelerate`, `wandb` | Training policies |
| `hardware` | `pynput`, `pyserial`, `deepdiff` | Connecting to real robots |
| `viz` | `rerun-sdk` | Visualization during recording/eval |
**Composite Extras** combine feature extras for common CLI scripts:
| Extra | Includes | Typical use case |
| -------------- | ------------------------------ | ------------------------------------------------------- |
| `core_scripts` | `dataset` + `hardware` + `viz` | `lerobot-record`, `lerobot-replay`, `lerobot-calibrate` |
| `evaluation` | `av` | `lerobot-eval` (add policy + env extras as needed) |
| `dataset_viz` | `dataset` + `viz` | `lerobot-dataset-viz`, `lerobot-imgtransform-viz` |
```bash ```bash
pip install 'lerobot[all]' # All available features pip install 'lerobot[core_scripts]' # Record, replay, calibrate
pip install 'lerobot[aloha,pusht]' # Specific features (Aloha & Pusht) pip install 'lerobot[training]' # Train policies
pip install 'lerobot[feetech]' # Feetech motor support pip install 'lerobot[core_scripts,training]' # Record + train
pip install 'lerobot[all]' # Everything
``` ```
_Replace `[...]` with your desired features._ **Policy, environment, and hardware extras** are still available for specific dependencies:
**Available Tags:** ```bash
For a full list of optional dependencies, see: pip install 'lerobot[pi]' # Pi0/Pi0.5/Pi0-FAST policy deps
https://pypi.org/project/lerobot/ pip install 'lerobot[smolvla]' # SmolVLA policy deps
pip install 'lerobot[diffusion]' # Diffusion policy deps (diffusers)
pip install 'lerobot[aloha,pusht]' # Simulation environments
pip install 'lerobot[feetech]' # Feetech motor support
```
_Multiple extras can be combined (e.g., `.[core_scripts,pi,pusht]`). For a full list of available extras, refer to `pyproject.toml`._
### Troubleshooting ### Troubleshooting
If you encounter build errors, you may need to install additional dependencies: `cmake`, `build-essential`, and `ffmpeg libs`. If you encounter build errors, you may need to install additional system dependencies: `cmake`, `build-essential`, and `ffmpeg libs`.
To install these for Linux run: To install these for Linux run:
```bash ```bash
@@ -196,8 +224,8 @@ LeRobot provides optional extras for specific functionalities. Multiple extras c
### Simulations ### Simulations
Install environment packages: `aloha` ([gym-aloha](https://github.com/huggingface/gym-aloha)), or `pusht` ([gym-pusht](https://github.com/huggingface/gym-pusht)) Install environment packages: `aloha` ([gym-aloha](https://github.com/huggingface/gym-aloha)), or `pusht` ([gym-pusht](https://github.com/huggingface/gym-pusht)).
Example: These automatically include the `dataset` extra.
```bash ```bash
pip install -e ".[aloha]" # or "[pusht]" for example pip install -e ".[aloha]" # or "[pusht]" for example
@@ -213,7 +241,7 @@ pip install -e ".[feetech]" # or "[dynamixel]" for example
### Experiment Tracking ### Experiment Tracking
To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with Weights and Biases is included in the `training` extra. To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with:
```bash ```bash
wandb login wandb login
+4 -4
View File
@@ -19,10 +19,10 @@ This means that your favorite policy can be used like this:
```python ```python
import torch import torch
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies import make_pre_post_processors
from lerobot.policies.your_policy import YourPolicy from lerobot.policies.your_policy import YourPolicy
from lerobot.processor.pipeline import RobotProcessorPipeline, PolicyProcessorPipeline from lerobot.processor import RobotProcessorPipeline, PolicyProcessorPipeline
dataset = LeRobotDataset("hf_user/dataset", episodes=[0]) dataset = LeRobotDataset("hf_user/dataset", episodes=[0])
sample = dataset[10] sample = dataset[10]
@@ -260,7 +260,7 @@ Since processor pipelines can add new features (like velocity fields), change te
These functions work together by starting with robot hardware specifications (`create_initial_features()`) then simulating the entire pipeline transformation (`aggregate_pipeline_dataset_features()`) to compute the final feature dictionary that gets passed to `LeRobotDataset.create()`, ensuring perfect alignment between what processors output and what datasets expect to store. These functions work together by starting with robot hardware specifications (`create_initial_features()`) then simulating the entire pipeline transformation (`aggregate_pipeline_dataset_features()`) to compute the final feature dictionary that gets passed to `LeRobotDataset.create()`, ensuring perfect alignment between what processors output and what datasets expect to store.
```python ```python
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features from lerobot.datasets import aggregate_pipeline_dataset_features
# Start with robot's raw features # Start with robot's raw features
initial_features = create_initial_features( initial_features = create_initial_features(
+5 -5
View File
@@ -89,7 +89,7 @@ A core v3 principle is **decoupling storage from the user API**: data is stored
```python ```python
import torch import torch
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
repo_id = "yaak-ai/L2D-v3" repo_id = "yaak-ai/L2D-v3"
@@ -135,7 +135,7 @@ for batch in data_loader:
Use `StreamingLeRobotDataset` to iterate directly from the Hub without local copies. This allows to stream large datasets without the need to downloading them onto disk or loading them onto memory, and is a key feature of the new dataset format. Use `StreamingLeRobotDataset` to iterate directly from the Hub without local copies. This allows to stream large datasets without the need to downloading them onto disk or loading them onto memory, and is a key feature of the new dataset format.
```python ```python
from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset from lerobot.datasets import StreamingLeRobotDataset
repo_id = "yaak-ai/L2D-v3" repo_id = "yaak-ai/L2D-v3"
dataset = StreamingLeRobotDataset(repo_id) # streams directly from the Hub dataset = StreamingLeRobotDataset(repo_id) # streams directly from the Hub
@@ -167,8 +167,8 @@ Currently, transforms are applied during **training time only**, not during reco
Use the `image_transforms` parameter when loading a dataset for training: Use the `image_transforms` parameter when loading a dataset for training:
```python ```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.datasets.transforms import ImageTransforms, ImageTransformsConfig, ImageTransformConfig from lerobot.transforms import ImageTransforms, ImageTransformsConfig, ImageTransformConfig
# Option 1: Use default transform configuration (disabled by default) # Option 1: Use default transform configuration (disabled by default)
transforms_config = ImageTransformsConfig( transforms_config = ImageTransformsConfig(
@@ -290,7 +290,7 @@ python -m lerobot.datasets.v30.convert_dataset_v21_to_v30 --repo-id=<HF_USER/DAT
When creating or recording datasets, you **must** call `dataset.finalize()` to properly close parquet writers. See the [PR #1903](https://github.com/huggingface/lerobot/pull/1903) for more details. When creating or recording datasets, you **must** call `dataset.finalize()` to properly close parquet writers. See the [PR #1903](https://github.com/huggingface/lerobot/pull/1903) for more details.
```python ```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
# Create dataset and record episodes # Create dataset and record episodes
dataset = LeRobotDataset.create(...) dataset = LeRobotDataset.create(...)
+2 -2
View File
@@ -4,10 +4,10 @@ This guide shows you how to train policies on multiple GPUs using [Hugging Face
## Installation ## Installation
First, ensure you have accelerate installed: `accelerate` is included in the `training` extra. Install it with:
```bash ```bash
pip install accelerate pip install 'lerobot[training]'
``` ```
## Training with Multiple GPUs ## Training with Multiple GPUs
+2 -1
View File
@@ -45,7 +45,8 @@ Modify the examples to use `PhoneOS.IOS` or `PhoneOS.ANDROID` in `PhoneConfig`.
Teleoperation example: Teleoperation example:
```python ```python
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS from lerobot.teleoperators.phone import Phone, PhoneConfig
from lerobot.teleoperators.phone.config_phone import PhoneOS
teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID
teleop_device = Phone(teleop_config) teleop_device = Phone(teleop_config)
+1 -2
View File
@@ -110,8 +110,7 @@ lerobot-edit-dataset \
Or equivalently in Python: Or equivalently in Python:
```python ```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset, recompute_stats
from lerobot.datasets.dataset_tools import recompute_stats
dataset = LeRobotDataset("your_dataset") dataset = LeRobotDataset("your_dataset")
recompute_stats(dataset, relative_action=True, chunk_size=50, relative_exclude_joints=["gripper"]) recompute_stats(dataset, relative_action=True, chunk_size=50, relative_exclude_joints=["gripper"])
+1 -2
View File
@@ -116,8 +116,7 @@ lerobot-edit-dataset \
Or equivalently in Python: Or equivalently in Python:
```python ```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset, recompute_stats
from lerobot.datasets.dataset_tools import recompute_stats
dataset = LeRobotDataset("your_dataset") dataset = LeRobotDataset("your_dataset")
recompute_stats(dataset, relative_action=True, chunk_size=50, relative_exclude_joints=["gripper"]) recompute_stats(dataset, relative_action=True, chunk_size=50, relative_exclude_joints=["gripper"])
+2 -3
View File
@@ -60,11 +60,10 @@ When `use_relative_actions=true`, the training script automatically:
### Recomputing stats for an existing dataset ### Recomputing stats for an existing dataset
If you want to precompute relative action stats offline, use `recompute_stats` from If you want to precompute relative action stats offline, use `recompute_stats` from
`lerobot.datasets.dataset_tools`: `lerobot.datasets`:
```python ```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset, recompute_stats
from lerobot.datasets.dataset_tools import recompute_stats
dataset = LeRobotDataset("your_org/your_dataset") dataset = LeRobotDataset("your_org/your_dataset")
dataset = recompute_stats( dataset = recompute_stats(
+2 -3
View File
@@ -39,9 +39,8 @@ The snippet below provides a simplified pseudo-example of how RTC operates with
```python ```python
from lerobot.policies.pi0 import PI0Policy, PI0Config from lerobot.policies.pi0 import PI0Policy, PI0Config
from lerobot.configs.types import RTCAttentionSchedule from lerobot.configs import RTCAttentionSchedule
from lerobot.policies.rtc.configuration_rtc import RTCConfig from lerobot.policies.rtc import RTCConfig, ActionQueue
from lerobot.policies.rtc.action_queue import ActionQueue
# Load Pi0 with RTC enabled # Load Pi0 with RTC enabled
policy_cfg = PI0Config() policy_cfg = PI0Config()
+1 -1
View File
@@ -418,7 +418,7 @@ Create a custom preprocessing pipeline for your environment:
```python ```python
from lerobot.processor import PolicyProcessorPipeline from lerobot.processor import PolicyProcessorPipeline
from lerobot.policies.xvla.processor_xvla import ( from lerobot.policies.xvla import (
XVLAImageToFloatProcessorStep, XVLAImageToFloatProcessorStep,
XVLAImageNetNormalizeProcessorStep, XVLAImageNetNormalizeProcessorStep,
XVLAAddDomainIdProcessorStep, XVLAAddDomainIdProcessorStep,
+1 -1
View File
@@ -35,7 +35,7 @@ from pprint import pformat
import draccus import draccus
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.robots import ( # noqa: F401 from lerobot.robots import ( # noqa: F401
Robot, Robot,
RobotConfig, RobotConfig,
+2 -8
View File
@@ -31,17 +31,11 @@ from pprint import pprint
import torch import torch
from huggingface_hub import HfApi from huggingface_hub import HfApi
import lerobot from lerobot.datasets import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata
from lerobot.datasets.lerobot_dataset import LeRobotDataset
def main(): def main():
# We ported a number of existing datasets ourselves, use this to see the list: # Browse datasets created/ported by the community on the hub using the hub api:
print("List of available datasets:")
pprint(lerobot.available_datasets)
# You can also browse through the datasets created/ported by the community on the hub using the hub api:
hub_api = HfApi() hub_api = HfApi()
repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])] repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])]
pprint(repo_ids) pprint(repo_ids)
+1 -1
View File
@@ -231,7 +231,7 @@ class AggregateProgress(PipelineStep):
import pyarrow as pa import pyarrow as pa
import pyarrow.parquet as pq import pyarrow.parquet as pq
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.utils.utils import init_logging from lerobot.utils.utils import init_logging
init_logging() init_logging()
@@ -26,8 +26,8 @@ import torch
from torchvision.transforms import v2 from torchvision.transforms import v2
from torchvision.transforms.functional import to_pil_image from torchvision.transforms.functional import to_pil_image
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.datasets.transforms import ImageTransformConfig, ImageTransforms, ImageTransformsConfig from lerobot.transforms import ImageTransformConfig, ImageTransforms, ImageTransformsConfig
def save_image(tensor, filename): def save_image(tensor, filename):
+2 -2
View File
@@ -29,7 +29,8 @@ Usage:
import numpy as np import numpy as np
from lerobot.datasets.dataset_tools import ( from lerobot.datasets import (
LeRobotDataset,
add_features, add_features,
delete_episodes, delete_episodes,
merge_datasets, merge_datasets,
@@ -37,7 +38,6 @@ from lerobot.datasets.dataset_tools import (
remove_feature, remove_feature,
split_dataset, split_dataset,
) )
from lerobot.datasets.lerobot_dataset import LeRobotDataset
def main(): def main():
+20 -19
View File
@@ -112,17 +112,18 @@ from hil_utils import (
teleop_smooth_move_to, teleop_smooth_move_to,
) )
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401 from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401
from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401 from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401
from lerobot.configs import parser from lerobot.common.control_utils import is_headless, predict_action
from lerobot.configs.policies import PreTrainedConfig from lerobot.configs import PreTrainedConfig, parser
from lerobot.datasets.feature_utils import build_dataset_frame, combine_feature_dicts, hw_to_dataset_features from lerobot.datasets import (
from lerobot.datasets.image_writer import safe_stop_image_writer LeRobotDataset,
from lerobot.datasets.lerobot_dataset import LeRobotDataset VideoEncodingManager,
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features aggregate_pipeline_dataset_features,
from lerobot.datasets.video_utils import VideoEncodingManager create_initial_features,
from lerobot.policies.factory import get_policy_class, make_policy, make_pre_post_processors safe_stop_image_writer,
from lerobot.policies.pretrained import PreTrainedPolicy )
from lerobot.policies import PreTrainedPolicy, get_policy_class, make_policy, make_pre_post_processors
from lerobot.policies.rtc import ActionInterpolator, ActionQueue, LatencyTracker, RTCConfig from lerobot.policies.rtc import ActionInterpolator, ActionQueue, LatencyTracker, RTCConfig
from lerobot.policies.utils import make_robot_action from lerobot.policies.utils import make_robot_action
from lerobot.processor import ( from lerobot.processor import (
@@ -131,18 +132,18 @@ from lerobot.processor import (
RelativeActionsProcessorStep, RelativeActionsProcessorStep,
TransitionKey, TransitionKey,
create_transition, create_transition,
rename_stats,
to_relative_actions,
) )
from lerobot.processor.relative_action_processor import to_relative_actions
from lerobot.processor.rename_processor import rename_stats
from lerobot.robots import Robot, RobotConfig, make_robot_from_config from lerobot.robots import Robot, RobotConfig, make_robot_from_config
from lerobot.robots.bi_openarm_follower.config_bi_openarm_follower import BiOpenArmFollowerConfig from lerobot.robots.bi_openarm_follower import BiOpenArmFollowerConfig
from lerobot.robots.so_follower.config_so_follower import SOFollowerRobotConfig # noqa: F401 from lerobot.robots.so_follower import SOFollowerRobotConfig # noqa: F401
from lerobot.teleoperators import Teleoperator, TeleoperatorConfig, make_teleoperator_from_config from lerobot.teleoperators import Teleoperator, TeleoperatorConfig, make_teleoperator_from_config
from lerobot.teleoperators.openarm_mini.config_openarm_mini import OpenArmMiniConfig # noqa: F401 from lerobot.teleoperators.openarm_mini import OpenArmMiniConfig # noqa: F401
from lerobot.teleoperators.so_leader.config_so_leader import SOLeaderTeleopConfig # noqa: F401 from lerobot.teleoperators.so_leader import SOLeaderTeleopConfig # noqa: F401
from lerobot.utils import get_safe_torch_device
from lerobot.utils.constants import ACTION, OBS_STATE, OBS_STR from lerobot.utils.constants import ACTION, OBS_STATE, OBS_STR
from lerobot.utils.control_utils import is_headless, predict_action from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts, hw_to_dataset_features
from lerobot.utils.device_utils import get_safe_torch_device
from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import init_logging, log_say from lerobot.utils.utils import init_logging, log_say
from lerobot.utils.visualization_utils import init_rerun, log_rerun_data from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
+1 -3
View File
@@ -19,13 +19,12 @@ import time
from dataclasses import dataclass, field from dataclasses import dataclass, field
from pathlib import Path from pathlib import Path
from lerobot.common.control_utils import is_headless
from lerobot.processor import ( from lerobot.processor import (
IdentityProcessorStep, IdentityProcessorStep,
RobotAction, RobotAction,
RobotObservation, RobotObservation,
RobotProcessorPipeline, RobotProcessorPipeline,
)
from lerobot.processor.converters import (
observation_to_transition, observation_to_transition,
robot_action_observation_to_transition, robot_action_observation_to_transition,
transition_to_observation, transition_to_observation,
@@ -33,7 +32,6 @@ from lerobot.processor.converters import (
) )
from lerobot.robots import Robot from lerobot.robots import Robot
from lerobot.teleoperators import Teleoperator from lerobot.teleoperators import Teleoperator
from lerobot.utils.control_utils import is_headless
from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.robot_utils import precise_sleep
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
+5 -5
View File
@@ -14,15 +14,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from lerobot.datasets.feature_utils import hw_to_dataset_features from lerobot.common.control_utils import init_keyboard_listener
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.policies import make_pre_post_processors
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies.act import ACTPolicy
from lerobot.processor import make_default_processors from lerobot.processor import make_default_processors
from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
from lerobot.scripts.lerobot_record import record_loop from lerobot.scripts.lerobot_record import record_loop
from lerobot.utils.constants import ACTION, OBS_STR from lerobot.utils.constants import ACTION, OBS_STR
from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.feature_utils import hw_to_dataset_features
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun from lerobot.utils.visualization_utils import init_rerun
+4 -5
View File
@@ -14,16 +14,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from lerobot.datasets.feature_utils import hw_to_dataset_features from lerobot.common.control_utils import init_keyboard_listener
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.processor import make_default_processors from lerobot.processor import make_default_processors
from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient
from lerobot.scripts.lerobot_record import record_loop from lerobot.scripts.lerobot_record import record_loop
from lerobot.teleoperators.keyboard import KeyboardTeleop, KeyboardTeleopConfig from lerobot.teleoperators.keyboard import KeyboardTeleop, KeyboardTeleopConfig
from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig
from lerobot.utils.constants import ACTION, OBS_STR from lerobot.utils.constants import ACTION, OBS_STR
from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.feature_utils import hw_to_dataset_features
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun from lerobot.utils.visualization_utils import init_rerun
+2 -3
View File
@@ -16,9 +16,8 @@
import time import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient
from lerobot.utils.constants import ACTION from lerobot.utils.constants import ACTION
from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
+7 -10
View File
@@ -14,19 +14,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.configs.types import FeatureType, PolicyFeature from lerobot.common.control_utils import init_keyboard_listener
from lerobot.datasets.feature_utils import combine_feature_dicts from lerobot.configs import FeatureType, PolicyFeature
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset, aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.model.kinematics import RobotKinematics from lerobot.model.kinematics import RobotKinematics
from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.policies import make_pre_post_processors
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies.act import ACTPolicy
from lerobot.processor import ( from lerobot.processor import (
RobotProcessorPipeline, RobotProcessorPipeline,
make_default_teleop_action_processor, make_default_teleop_action_processor,
)
from lerobot.processor.converters import (
observation_to_transition, observation_to_transition,
robot_action_observation_to_transition, robot_action_observation_to_transition,
transition_to_observation, transition_to_observation,
@@ -39,7 +36,7 @@ from lerobot.robots.so_follower.robot_kinematic_processor import (
) )
from lerobot.scripts.lerobot_record import record_loop from lerobot.scripts.lerobot_record import record_loop
from lerobot.types import RobotAction, RobotObservation from lerobot.types import RobotAction, RobotObservation
from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.feature_utils import combine_feature_dicts
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun from lerobot.utils.visualization_utils import init_rerun
+8 -9
View File
@@ -14,13 +14,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.datasets.feature_utils import combine_feature_dicts from lerobot.common.control_utils import init_keyboard_listener
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset, aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.model.kinematics import RobotKinematics from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotProcessorPipeline from lerobot.processor import (
from lerobot.processor.converters import ( RobotProcessorPipeline,
observation_to_transition, observation_to_transition,
robot_action_observation_to_transition, robot_action_observation_to_transition,
transition_to_observation, transition_to_observation,
@@ -35,11 +34,11 @@ from lerobot.robots.so_follower.robot_kinematic_processor import (
InverseKinematicsEEToJoints, InverseKinematicsEEToJoints,
) )
from lerobot.scripts.lerobot_record import record_loop from lerobot.scripts.lerobot_record import record_loop
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS from lerobot.teleoperators.phone import Phone, PhoneConfig
from lerobot.teleoperators.phone.config_phone import PhoneOS
from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction
from lerobot.teleoperators.phone.teleop_phone import Phone
from lerobot.types import RobotAction, RobotObservation from lerobot.types import RobotAction, RobotObservation
from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.feature_utils import combine_feature_dicts
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun from lerobot.utils.visualization_utils import init_rerun
+3 -3
View File
@@ -16,10 +16,10 @@
import time import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.model.kinematics import RobotKinematics from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotProcessorPipeline from lerobot.processor import (
from lerobot.processor.converters import ( RobotProcessorPipeline,
robot_action_observation_to_transition, robot_action_observation_to_transition,
transition_to_robot_action, transition_to_robot_action,
) )
+4 -4
View File
@@ -16,8 +16,8 @@
import time import time
from lerobot.model.kinematics import RobotKinematics from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotProcessorPipeline from lerobot.processor import (
from lerobot.processor.converters import ( RobotProcessorPipeline,
robot_action_observation_to_transition, robot_action_observation_to_transition,
transition_to_robot_action, transition_to_robot_action,
) )
@@ -28,9 +28,9 @@ from lerobot.robots.so_follower.robot_kinematic_processor import (
GripperVelocityToJoint, GripperVelocityToJoint,
InverseKinematicsEEToJoints, InverseKinematicsEEToJoints,
) )
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS from lerobot.teleoperators.phone import Phone, PhoneConfig
from lerobot.teleoperators.phone.config_phone import PhoneOS
from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction
from lerobot.teleoperators.phone.teleop_phone import Phone
from lerobot.types import RobotAction, RobotObservation from lerobot.types import RobotAction, RobotObservation
from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.visualization_utils import init_rerun, log_rerun_data from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
+1 -2
View File
@@ -22,8 +22,7 @@ from pathlib import Path
import numpy as np import numpy as np
import tensorflow_datasets as tfds import tensorflow_datasets as tfds
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.datasets import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.utils.utils import get_elapsed_time_in_days_hours_minutes_seconds from lerobot.utils.utils import get_elapsed_time_in_days_hours_minutes_seconds
DROID_SHARDS = 2048 DROID_SHARDS = 2048
@@ -36,7 +36,7 @@ class AggregateDatasets(PipelineStep):
def run(self, data=None, rank: int = 0, world_size: int = 1): def run(self, data=None, rank: int = 0, world_size: int = 1):
import logging import logging
from lerobot.datasets.aggregate import aggregate_datasets from lerobot.datasets import aggregate_datasets
from lerobot.utils.utils import init_logging from lerobot.utils.utils import init_logging
init_logging() init_logging()
+2 -3
View File
@@ -26,8 +26,7 @@ from huggingface_hub import HfApi
from huggingface_hub.constants import REPOCARD_NAME from huggingface_hub.constants import REPOCARD_NAME
from port_droid import DROID_SHARDS from port_droid import DROID_SHARDS
from lerobot.datasets.dataset_metadata import CODEBASE_VERSION, LeRobotDatasetMetadata from lerobot.datasets import CODEBASE_VERSION, LeRobotDatasetMetadata, create_lerobot_dataset_card
from lerobot.datasets.utils import create_lerobot_dataset_card
from lerobot.utils.utils import init_logging from lerobot.utils.utils import init_logging
@@ -155,7 +154,7 @@ class UploadDataset(PipelineStep):
from datasets.utils.tqdm import disable_progress_bars from datasets.utils.tqdm import disable_progress_bars
from huggingface_hub import CommitOperationAdd, preupload_lfs_files from huggingface_hub import CommitOperationAdd, preupload_lfs_files
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.datasets import LeRobotDatasetMetadata
from lerobot.utils.utils import init_logging from lerobot.utils.utils import init_logging
init_logging() init_logging()
+4 -9
View File
@@ -109,15 +109,10 @@ except ImportError:
MATPLOTLIB_AVAILABLE = False MATPLOTLIB_AVAILABLE = False
plt = None plt = None
from lerobot.configs import parser from lerobot.configs import DatasetConfig, PreTrainedConfig, RTCAttentionSchedule, parser
from lerobot.configs.default import DatasetConfig from lerobot.datasets import LeRobotDataset, LeRobotDatasetMetadata, resolve_delta_timestamps
from lerobot.configs.policies import PreTrainedConfig from lerobot.policies import get_policy_class, make_pre_post_processors
from lerobot.configs.types import RTCAttentionSchedule from lerobot.policies.rtc import RTCConfig
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata
from lerobot.datasets.factory import resolve_delta_timestamps
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.policies.factory import get_policy_class, make_pre_post_processors
from lerobot.policies.rtc.configuration_rtc import RTCConfig
from lerobot.policies.rtc.debug_visualizer import RTCDebugVisualizer from lerobot.policies.rtc.debug_visualizer import RTCDebugVisualizer
from lerobot.utils.hub import HubMixin from lerobot.utils.hub import HubMixin
from lerobot.utils.utils import init_logging from lerobot.utils.utils import init_logging
+7 -11
View File
@@ -101,26 +101,21 @@ from threading import Event, Lock, Thread
import torch import torch
from torch import Tensor from torch import Tensor
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401 from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401
from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401 from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401
from lerobot.cameras.zmq.configuration_zmq import ZMQCameraConfig # noqa: F401 from lerobot.cameras.zmq import ZMQCameraConfig # noqa: F401
from lerobot.configs import parser from lerobot.configs import PreTrainedConfig, RTCAttentionSchedule, parser
from lerobot.configs.policies import PreTrainedConfig from lerobot.policies import get_policy_class, make_pre_post_processors
from lerobot.configs.types import RTCAttentionSchedule
from lerobot.datasets.feature_utils import build_dataset_frame, hw_to_dataset_features
from lerobot.policies.factory import get_policy_class, make_pre_post_processors
from lerobot.policies.rtc import ActionInterpolator, ActionQueue, LatencyTracker, RTCConfig from lerobot.policies.rtc import ActionInterpolator, ActionQueue, LatencyTracker, RTCConfig
from lerobot.processor import ( from lerobot.processor import (
NormalizerProcessorStep, NormalizerProcessorStep,
RelativeActionsProcessorStep, RelativeActionsProcessorStep,
TransitionKey, TransitionKey,
create_transition, create_transition,
)
from lerobot.processor.factory import (
make_default_robot_action_processor, make_default_robot_action_processor,
make_default_robot_observation_processor, make_default_robot_observation_processor,
to_relative_actions,
) )
from lerobot.processor.relative_action_processor import to_relative_actions
from lerobot.rl.process import ProcessSignalHandler from lerobot.rl.process import ProcessSignalHandler
from lerobot.robots import ( # noqa: F401 from lerobot.robots import ( # noqa: F401
Robot, Robot,
@@ -133,6 +128,7 @@ from lerobot.robots import ( # noqa: F401
) )
from lerobot.robots.utils import make_robot_from_config from lerobot.robots.utils import make_robot_from_config
from lerobot.utils.constants import OBS_IMAGES, OBS_STATE from lerobot.utils.constants import OBS_IMAGES, OBS_STATE
from lerobot.utils.feature_utils import build_dataset_frame, hw_to_dataset_features
from lerobot.utils.hub import HubMixin from lerobot.utils.hub import HubMixin
from lerobot.utils.utils import init_logging from lerobot.utils.utils import init_logging
+7 -10
View File
@@ -14,19 +14,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.configs.types import FeatureType, PolicyFeature from lerobot.common.control_utils import init_keyboard_listener
from lerobot.datasets.feature_utils import combine_feature_dicts from lerobot.configs import FeatureType, PolicyFeature
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset, aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.model.kinematics import RobotKinematics from lerobot.model.kinematics import RobotKinematics
from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.policies import make_pre_post_processors
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies.act import ACTPolicy
from lerobot.processor import ( from lerobot.processor import (
RobotProcessorPipeline, RobotProcessorPipeline,
make_default_teleop_action_processor, make_default_teleop_action_processor,
)
from lerobot.processor.converters import (
observation_to_transition, observation_to_transition,
robot_action_observation_to_transition, robot_action_observation_to_transition,
transition_to_observation, transition_to_observation,
@@ -39,7 +36,7 @@ from lerobot.robots.so_follower.robot_kinematic_processor import (
) )
from lerobot.scripts.lerobot_record import record_loop from lerobot.scripts.lerobot_record import record_loop
from lerobot.types import RobotAction, RobotObservation from lerobot.types import RobotAction, RobotObservation
from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.feature_utils import combine_feature_dicts
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun from lerobot.utils.visualization_utils import init_rerun
+6 -7
View File
@@ -15,13 +15,12 @@
# limitations under the License. # limitations under the License.
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.datasets.feature_utils import combine_feature_dicts from lerobot.common.control_utils import init_keyboard_listener
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset, aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.model.kinematics import RobotKinematics from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotProcessorPipeline from lerobot.processor import (
from lerobot.processor.converters import ( RobotProcessorPipeline,
observation_to_transition, observation_to_transition,
robot_action_observation_to_transition, robot_action_observation_to_transition,
transition_to_observation, transition_to_observation,
@@ -36,7 +35,7 @@ from lerobot.robots.so_follower.robot_kinematic_processor import (
from lerobot.scripts.lerobot_record import record_loop from lerobot.scripts.lerobot_record import record_loop
from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig
from lerobot.types import RobotAction, RobotObservation from lerobot.types import RobotAction, RobotObservation
from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.feature_utils import combine_feature_dicts
from lerobot.utils.utils import log_say from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun from lerobot.utils.visualization_utils import init_rerun
+3 -3
View File
@@ -17,10 +17,10 @@
import time import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.model.kinematics import RobotKinematics from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotProcessorPipeline from lerobot.processor import (
from lerobot.processor.converters import ( RobotProcessorPipeline,
robot_action_observation_to_transition, robot_action_observation_to_transition,
transition_to_robot_action, transition_to_robot_action,
) )
+2 -2
View File
@@ -17,8 +17,8 @@
import time import time
from lerobot.model.kinematics import RobotKinematics from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotProcessorPipeline from lerobot.processor import (
from lerobot.processor.converters import ( RobotProcessorPipeline,
robot_action_observation_to_transition, robot_action_observation_to_transition,
robot_action_to_transition, robot_action_to_transition,
transition_to_robot_action, transition_to_robot_action,
+5 -7
View File
@@ -18,13 +18,11 @@ from pathlib import Path
import torch import torch
from lerobot.configs.types import FeatureType from lerobot.configs import FeatureType
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.datasets import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.datasets.feature_utils import dataset_to_policy_features from lerobot.policies import make_pre_post_processors
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.policies.diffusion import DiffusionConfig, DiffusionPolicy
from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.utils.feature_utils import dataset_to_policy_features
from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy
from lerobot.policies.factory import make_pre_post_processors
def main(): def main():
+5 -7
View File
@@ -19,14 +19,12 @@ from pathlib import Path
import torch import torch
from lerobot.configs.types import FeatureType from lerobot.configs import FeatureType
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.datasets import LeRobotDatasetMetadata, StreamingLeRobotDataset
from lerobot.datasets.feature_utils import dataset_to_policy_features from lerobot.policies import make_pre_post_processors
from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset from lerobot.policies.act import ACTConfig, ACTPolicy
from lerobot.policies.act.configuration_act import ACTConfig
from lerobot.policies.act.modeling_act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors
from lerobot.utils.constants import ACTION from lerobot.utils.constants import ACTION
from lerobot.utils.feature_utils import dataset_to_policy_features
def main(): def main():
@@ -4,13 +4,11 @@ from pathlib import Path
import torch import torch
from lerobot.configs.types import FeatureType from lerobot.configs import FeatureType
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.datasets import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.datasets.feature_utils import dataset_to_policy_features from lerobot.policies import make_pre_post_processors
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.policies.act import ACTConfig, ACTPolicy
from lerobot.policies.act.configuration_act import ACTConfig from lerobot.utils.feature_utils import dataset_to_policy_features
from lerobot.policies.act.modeling_act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors
def make_delta_timestamps(delta_indices: list[int] | None, fps: int) -> list[float]: def make_delta_timestamps(delta_indices: list[int] | None, fps: int) -> list[float]:
+4 -4
View File
@@ -1,9 +1,9 @@
import torch import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.datasets import LeRobotDatasetMetadata
from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.policies import make_pre_post_processors
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies.act import ACTPolicy
from lerobot.policies.utils import build_inference_frame, make_robot_action from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
+1 -1
View File
@@ -3,7 +3,7 @@ import threading
from lerobot.async_inference.configs import RobotClientConfig from lerobot.async_inference.configs import RobotClientConfig
from lerobot.async_inference.helpers import visualize_action_queue_size from lerobot.async_inference.helpers import visualize_action_queue_size
from lerobot.async_inference.robot_client import RobotClient from lerobot.async_inference.robot_client import RobotClient
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.robots.so_follower import SO100FollowerConfig from lerobot.robots.so_follower import SO100FollowerConfig
@@ -4,13 +4,11 @@ from pathlib import Path
import torch import torch
from lerobot.configs.types import FeatureType from lerobot.configs import FeatureType
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.datasets import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.datasets.feature_utils import dataset_to_policy_features from lerobot.policies import make_pre_post_processors
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.policies.diffusion import DiffusionConfig, DiffusionPolicy
from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.utils.feature_utils import dataset_to_policy_features
from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy
from lerobot.policies.factory import make_pre_post_processors
def make_delta_timestamps(delta_indices: list[int] | None, fps: int) -> list[float]: def make_delta_timestamps(delta_indices: list[int] | None, fps: int) -> list[float]:
@@ -1,9 +1,9 @@
import torch import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.datasets import LeRobotDatasetMetadata
from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy from lerobot.policies import make_pre_post_processors
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies.diffusion import DiffusionPolicy
from lerobot.policies.utils import build_inference_frame, make_robot_action from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
+4 -4
View File
@@ -1,11 +1,11 @@
import torch import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.datasets.feature_utils import hw_to_dataset_features from lerobot.policies import make_pre_post_processors
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies.pi0 import PI0Policy
from lerobot.policies.pi0.modeling_pi0 import PI0Policy
from lerobot.policies.utils import build_inference_frame, make_robot_action from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.utils.feature_utils import hw_to_dataset_features
MAX_EPISODES = 5 MAX_EPISODES = 5
MAX_STEPS_PER_EPISODE = 20 MAX_STEPS_PER_EPISODE = 20
+4 -4
View File
@@ -6,17 +6,17 @@ from queue import Empty, Full
import torch import torch
import torch.optim as optim import torch.optim as optim
from lerobot.datasets.feature_utils import hw_to_dataset_features from lerobot.datasets import LeRobotDataset
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.envs.configs import HILSerlProcessorConfig, HILSerlRobotEnvConfig from lerobot.envs.configs import HILSerlProcessorConfig, HILSerlRobotEnvConfig
from lerobot.policies.sac.configuration_sac import SACConfig from lerobot.policies import SACConfig
from lerobot.policies.sac.modeling_sac import SACPolicy from lerobot.policies.sac.modeling_sac import SACPolicy
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
from lerobot.rl.buffer import ReplayBuffer from lerobot.rl.buffer import ReplayBuffer
from lerobot.rl.gym_manipulator import make_robot_env from lerobot.rl.gym_manipulator import make_robot_env
from lerobot.robots.so_follower import SO100FollowerConfig from lerobot.robots.so_follower import SO100FollowerConfig
from lerobot.teleoperators import TeleopEvents
from lerobot.teleoperators.so_leader import SO100LeaderConfig from lerobot.teleoperators.so_leader import SO100LeaderConfig
from lerobot.teleoperators.utils import TeleopEvents from lerobot.utils.feature_utils import hw_to_dataset_features
LOG_EVERY = 10 LOG_EVERY = 10
SEND_EVERY = 10 SEND_EVERY = 10
@@ -1,8 +1,7 @@
import torch import torch
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
from lerobot.policies.factory import make_policy, make_pre_post_processors from lerobot.policies import RewardClassifierConfig, make_policy, make_pre_post_processors
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
def main(): def main():
@@ -1,11 +1,11 @@
import torch import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.datasets.feature_utils import hw_to_dataset_features from lerobot.policies import make_pre_post_processors
from lerobot.policies.factory import make_pre_post_processors from lerobot.policies.smolvla import SmolVLAPolicy
from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy
from lerobot.policies.utils import build_inference_frame, make_robot_action from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.utils.feature_utils import hw_to_dataset_features
MAX_EPISODES = 5 MAX_EPISODES = 5
MAX_STEPS_PER_EPISODE = 20 MAX_STEPS_PER_EPISODE = 20
+82 -39
View File
@@ -58,45 +58,74 @@ classifiers = [
keywords = ["lerobot", "huggingface", "robotics", "machine learning", "artificial intelligence"] keywords = ["lerobot", "huggingface", "robotics", "machine learning", "artificial intelligence"]
dependencies = [ dependencies = [
# Core ML
# Hugging Face dependencies
"datasets>=4.0.0,<5.0.0",
"diffusers>=0.27.2,<0.36.0",
"huggingface-hub>=1.0.0,<2.0.0",
"accelerate>=1.10.0,<2.0.0",
# Core dependencies
"numpy>=2.0.0,<2.3.0", # NOTE: Explicitly listing numpy helps the resolver converge faster. Upper bound imposed by opencv-python-headless.
"setuptools>=71.0.0,<81.0.0",
"cmake>=3.29.0.1,<4.2.0",
"packaging>=24.2,<26.0",
"torch>=2.7,<2.11.0", "torch>=2.7,<2.11.0",
"torchcodec>=0.3.0,<0.11.0; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", # NOTE: Windows support starts at version 0.7 (needs torch==2.8), ffmpeg>=8 support starts at version 0.8.1 (needs torch==2.9), system-wide ffmpeg support starts at version 0.10 (needs torch==2.10).
"torchvision>=0.22.0,<0.26.0", "torchvision>=0.22.0,<0.26.0",
"numpy>=2.0.0,<2.3.0", # NOTE: Explicitly listing numpy helps the resolver converge faster. Upper bound imposed by opencv-python-headless.
"einops>=0.8.0,<0.9.0",
"opencv-python-headless>=4.9.0,<4.14.0", "opencv-python-headless>=4.9.0,<4.14.0",
"av>=15.0.0,<16.0.0", "Pillow>=10.0.0,<13.0.0",
"jsonlines>=4.0.0,<5.0.0", "einops>=0.8.0,<0.9.0",
"pynput>=1.7.8,<1.9.0",
"pyserial>=3.5,<4.0",
"wandb>=0.24.0,<0.25.0", # Config & Hub
"draccus==0.10.0", # TODO: Relax version constraint "draccus==0.10.0", # TODO: Relax version constraint
"gymnasium>=1.1.1,<2.0.0", "huggingface-hub>=1.0.0,<2.0.0",
"rerun-sdk>=0.24.0,<0.27.0", "requests>=2.32.0,<3.0.0",
# Support dependencies # Environments
"deepdiff>=7.0.1,<9.0.0", # NOTE: gymnasium is used in lerobot.envs (lerobot-train, lerobot-eval), policies/factory,
"imageio[ffmpeg]>=2.34.0,<3.0.0", # and robots/unitree. Moving it to an optional extra would require import guards across many
# tightly-coupled modules. Candidate for a future refactor to decouple envs from the core.
"gymnasium>=1.1.1,<2.0.0",
# Serialization & checkpointing
"safetensors>=0.4.3,<1.0.0",
# Lightweight utilities
"packaging>=24.2,<26.0",
"termcolor>=2.4.0,<4.0.0", "termcolor>=2.4.0,<4.0.0",
"tqdm>=4.66.0,<5.0.0",
# Build tools (required by opencv-python-headless on some platforms)
"cmake>=3.29.0.1,<4.2.0",
"setuptools>=71.0.0,<81.0.0",
] ]
# Optional dependencies # Optional dependencies
[project.optional-dependencies] [project.optional-dependencies]
# ── Feature-scoped extras ──────────────────────────────────
dataset = [
"datasets>=4.0.0,<5.0.0",
"pandas>=2.0.0,<3.0.0", # NOTE: Transitive dependency of datasets
"pyarrow>=21.0.0,<30.0.0", # NOTE: Transitive dependency of datasets
"lerobot[av-dep]",
"torchcodec>=0.3.0,<0.11.0; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", # NOTE: Windows support starts at version 0.7 (needs torch==2.8), ffmpeg>=8 support starts at version 0.8.1 (needs torch==2.9), system-wide ffmpeg support starts at version 0.10 (needs torch==2.10).
"jsonlines>=4.0.0,<5.0.0",
]
training = [
"lerobot[dataset]",
"accelerate>=1.10.0,<2.0.0",
"wandb>=0.24.0,<0.25.0",
]
hardware = [
"pynput>=1.7.8,<1.9.0",
"pyserial>=3.5,<4.0",
"deepdiff>=7.0.1,<9.0.0",
]
viz = [
"rerun-sdk>=0.24.0,<0.27.0",
]
# ── User-facing composite extras (map to CLI scripts) ─────
# lerobot-record, lerobot-replay, lerobot-calibrate, lerobot-teleoperate, etc.
core_scripts = ["lerobot[dataset]", "lerobot[hardware]", "lerobot[viz]"]
# lerobot-eval -- base evaluation framework. You also need the policy's extra (e.g., lerobot[pi])
# and the environment's extra (e.g., lerobot[pusht]) if evaluating in simulation.
evaluation = ["lerobot[av-dep]"]
# lerobot-dataset-viz, lerobot-imgtransform-viz
dataset_viz = ["lerobot[dataset]", "lerobot[viz]"]
# Common # Common
av-dep = ["av>=15.0.0,<16.0.0"]
pygame-dep = ["pygame>=2.5.1,<2.7.0"] pygame-dep = ["pygame>=2.5.1,<2.7.0"]
placo-dep = ["placo>=0.9.6,<0.9.17"] placo-dep = ["placo>=0.9.6,<0.9.17"]
transformers-dep = ["transformers==5.3.0"] # TODO(Steven): https://github.com/huggingface/lerobot/pull/3249 transformers-dep = ["transformers==5.3.0"] # TODO(Steven): https://github.com/huggingface/lerobot/pull/3249
@@ -104,6 +133,7 @@ grpcio-dep = ["grpcio==1.73.1", "protobuf>=6.31.1,<6.32.0"]
can-dep = ["python-can>=4.2.0,<5.0.0"] can-dep = ["python-can>=4.2.0,<5.0.0"]
peft-dep = ["peft>=0.18.0,<1.0.0"] peft-dep = ["peft>=0.18.0,<1.0.0"]
scipy-dep = ["scipy>=1.14.0,<2.0.0"] scipy-dep = ["scipy>=1.14.0,<2.0.0"]
diffusers-dep = ["diffusers>=0.27.2,<0.36.0"]
qwen-vl-utils-dep = ["qwen-vl-utils>=0.0.11,<0.1.0"] qwen-vl-utils-dep = ["qwen-vl-utils>=0.0.11,<0.1.0"]
matplotlib-dep = ["matplotlib>=3.10.3,<4.0.0", "contourpy>=1.3.0,<2.0.0"] # NOTE: Explicitly listing contourpy helps the resolver converge faster. matplotlib-dep = ["matplotlib>=3.10.3,<4.0.0", "contourpy>=1.3.0,<2.0.0"] # NOTE: Explicitly listing contourpy helps the resolver converge faster.
@@ -136,28 +166,28 @@ intelrealsense = [
phone = ["hebi-py>=2.8.0,<2.12.0", "teleop>=0.1.0,<0.2.0", "fastapi<1.0", "lerobot[scipy-dep]"] phone = ["hebi-py>=2.8.0,<2.12.0", "teleop>=0.1.0,<0.2.0", "fastapi<1.0", "lerobot[scipy-dep]"]
# Policies # Policies
diffusion = ["lerobot[diffusers-dep]"]
wallx = [ wallx = [
"lerobot[transformers-dep]", "lerobot[transformers-dep]",
"lerobot[peft]", "lerobot[peft-dep]",
"lerobot[scipy-dep]", "lerobot[scipy-dep]",
"torchdiffeq>=0.2.4,<0.3.0", "torchdiffeq>=0.2.4,<0.3.0",
"lerobot[qwen-vl-utils-dep]", "lerobot[qwen-vl-utils-dep]",
] ]
pi = ["lerobot[transformers-dep]", "lerobot[scipy-dep]"] pi = ["lerobot[transformers-dep]", "lerobot[scipy-dep]"]
smolvla = ["lerobot[transformers-dep]", "num2words>=0.5.14,<0.6.0", "accelerate>=1.7.0,<2.0.0", "safetensors>=0.4.3,<1.0.0"] smolvla = ["lerobot[transformers-dep]", "num2words>=0.5.14,<0.6.0", "accelerate>=1.7.0,<2.0.0"]
multi_task_dit = ["lerobot[transformers-dep]"] multi_task_dit = ["lerobot[transformers-dep]", "lerobot[diffusers-dep]"]
groot = [ groot = [
"lerobot[transformers-dep]", "lerobot[transformers-dep]",
"lerobot[peft]", "lerobot[peft-dep]",
"lerobot[diffusers-dep]",
"dm-tree>=0.1.8,<1.0.0", "dm-tree>=0.1.8,<1.0.0",
"timm>=1.0.0,<1.1.0", "timm>=1.0.0,<1.1.0",
"safetensors>=0.4.3,<1.0.0",
"Pillow>=10.0.0,<13.0.0",
"decord>=0.6.0,<1.0.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64')", "decord>=0.6.0,<1.0.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64')",
"ninja>=1.11.1,<2.0.0", "ninja>=1.11.1,<2.0.0",
"flash-attn>=2.5.9,<3.0.0 ; sys_platform != 'darwin'" "flash-attn>=2.5.9,<3.0.0 ; sys_platform != 'darwin'"
] ]
sarm = ["lerobot[transformers-dep]", "faker>=33.0.0,<35.0.0", "lerobot[matplotlib-dep]", "lerobot[qwen-vl-utils-dep]"] sarm = ["lerobot[transformers-dep]", "pydantic>=2.0.0,<3.0.0", "faker>=33.0.0,<35.0.0", "lerobot[matplotlib-dep]", "lerobot[qwen-vl-utils-dep]"]
xvla = ["lerobot[transformers-dep]"] xvla = ["lerobot[transformers-dep]"]
hilserl = ["lerobot[transformers-dep]", "gym-hil>=0.1.13,<0.2.0", "lerobot[grpcio-dep]", "lerobot[placo-dep]"] hilserl = ["lerobot[transformers-dep]", "gym-hil>=0.1.13,<0.2.0", "lerobot[grpcio-dep]", "lerobot[placo-dep]"]
@@ -166,31 +196,42 @@ async = ["lerobot[grpcio-dep]", "lerobot[matplotlib-dep]"]
peft = ["lerobot[transformers-dep]", "lerobot[peft-dep]"] peft = ["lerobot[transformers-dep]", "lerobot[peft-dep]"]
# Development # Development
dev = ["pre-commit>=3.7.0,<5.0.0", "debugpy>=1.8.1,<1.9.0", "lerobot[grpcio-dep]", "grpcio-tools==1.73.1", "mypy>=1.19.1"] dev = ["pre-commit>=3.7.0,<5.0.0", "debugpy>=1.8.1,<1.9.0", "lerobot[grpcio-dep]", "grpcio-tools==1.73.1", "mypy>=1.19.1", "ruff>=0.14.1"]
test = ["pytest>=8.1.0,<9.0.0", "pytest-timeout>=2.4.0,<3.0.0", "pytest-cov>=5.0.0,<8.0.0", "mock-serial>=0.0.1,<0.1.0 ; sys_platform != 'win32'"] test = ["pytest>=8.1.0,<9.0.0", "pytest-timeout>=2.4.0,<3.0.0", "pytest-cov>=5.0.0,<8.0.0", "mock-serial>=0.0.1,<0.1.0 ; sys_platform != 'win32'"]
video_benchmark = ["scikit-image>=0.23.2,<0.26.0", "pandas>=2.2.2,<2.4.0"] video_benchmark = ["scikit-image>=0.23.2,<0.26.0", "pandas>=2.2.2,<2.4.0"]
# Simulation # Simulation
# NOTE: Explicitly listing scipy helps flatten the dependecy tree. # NOTE: Explicitly listing scipy helps flatten the dependecy tree.
aloha = ["gym-aloha>=0.1.2,<0.2.0", "lerobot[scipy-dep]"] aloha = ["lerobot[dataset]", "gym-aloha>=0.1.2,<0.2.0", "lerobot[scipy-dep]"]
pusht = ["gym-pusht>=0.1.5,<0.2.0", "pymunk>=6.6.0,<7.0.0"] # TODO: Fix pymunk version in gym-pusht instead pusht = ["lerobot[dataset]", "gym-pusht>=0.1.5,<0.2.0", "pymunk>=6.6.0,<7.0.0"] # TODO: Fix pymunk version in gym-pusht instead
libero = ["lerobot[transformers-dep]", "hf-libero>=0.1.3,<0.2.0; sys_platform == 'linux'", "lerobot[scipy-dep]"] libero = ["lerobot[dataset]", "lerobot[transformers-dep]", "hf-libero>=0.1.3,<0.2.0; sys_platform == 'linux'", "lerobot[scipy-dep]"]
metaworld = ["metaworld==3.0.0", "lerobot[scipy-dep]"] metaworld = ["lerobot[dataset]", "metaworld==3.0.0", "lerobot[scipy-dep]"]
# All # All
all = [ all = [
# Feature-scoped extras
"lerobot[dataset]",
"lerobot[training]",
"lerobot[hardware]",
"lerobot[viz]",
# NOTE(resolver hint): scipy is pulled in transitively via lerobot[scipy-dep] through # NOTE(resolver hint): scipy is pulled in transitively via lerobot[scipy-dep] through
# multiple extras (aloha, metaworld, pi, wallx, phone). Listing it explicitly # multiple extras (aloha, metaworld, pi, wallx, phone). Listing it explicitly
# helps pip's resolver converge by constraining scipy early, before it encounters # helps pip's resolver converge by constraining scipy early, before it encounters
# the loose scipy requirements from transitive deps like dm-control and metaworld. # the loose scipy requirements from transitive deps like dm-control and metaworld.
"scipy>=1.14.0,<2.0.0", "scipy>=1.14.0,<2.0.0",
"lerobot[dynamixel]", "lerobot[dynamixel]",
"lerobot[feetech]",
"lerobot[damiao]",
"lerobot[robstride]",
"lerobot[gamepad]", "lerobot[gamepad]",
"lerobot[hopejr]", "lerobot[hopejr]",
"lerobot[lekiwi]", "lerobot[lekiwi]",
"lerobot[openarms]",
"lerobot[reachy2]", "lerobot[reachy2]",
"lerobot[kinematics]", "lerobot[kinematics]",
"lerobot[intelrealsense]", "lerobot[intelrealsense]",
"lerobot[diffusion]",
"lerobot[multi_task_dit]",
"lerobot[wallx]", "lerobot[wallx]",
"lerobot[pi]", "lerobot[pi]",
"lerobot[smolvla]", "lerobot[smolvla]",
@@ -267,7 +308,9 @@ ignore = [
] ]
[tool.ruff.lint.per-file-ignores] [tool.ruff.lint.per-file-ignores]
"__init__.py" = ["F401", "F403"] "__init__.py" = ["F401", "F403", "E402"]
# E402: conditional-import guards (TYPE_CHECKING / is_package_available) must precede the imports they protect
"src/lerobot/scripts/convert_dataset_v21_to_v30.py" = ["E402"]
"src/lerobot/policies/wall_x/**" = ["N801", "N812", "SIM102", "SIM108", "SIM210", "SIM211", "B006", "B007", "SIM118"] # Supprese these as they are coming from original Qwen2_5_vl code TODO(pepijn): refactor original "src/lerobot/policies/wall_x/**" = ["N801", "N812", "SIM102", "SIM108", "SIM210", "SIM211", "B006", "B007", "SIM118"] # Supprese these as they are coming from original Qwen2_5_vl code TODO(pepijn): refactor original
[tool.ruff.lint.isort] [tool.ruff.lint.isort]
+26 -175
View File
@@ -13,188 +13,39 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" """
This file contains lists of available environments, dataset and policies to reflect the current state of LeRobot library. LeRobot -- PyTorch library for real-world robotics.
We do not want to import all the dependencies, but instead we keep it lightweight to ensure fast access to these variables.
Example: Provides datasets, pretrained policies, and tools for training, evaluation,
```python data collection, and robot control. Integrates with Hugging Face Hub for
import lerobot model and dataset sharing.
print(lerobot.available_envs)
print(lerobot.available_tasks_per_env)
print(lerobot.available_datasets)
print(lerobot.available_datasets_per_env)
print(lerobot.available_real_world_datasets)
print(lerobot.available_policies)
print(lerobot.available_policies_per_env)
print(lerobot.available_robots)
print(lerobot.available_cameras)
print(lerobot.available_motors)
```
When implementing a new dataset loadable with LeRobotDataset follow these steps: The base install is intentionally lightweight. Feature-specific dependencies
- Update `available_datasets_per_env` in `lerobot/__init__.py` are gated behind optional extras::
When implementing a new environment (e.g. `gym_aloha`), follow these steps: pip install 'lerobot[dataset]' # dataset loading & creation
- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py` pip install 'lerobot[training]' # training loop + wandb
pip install 'lerobot[hardware]' # real robot control
When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps: pip install 'lerobot[core_scripts]' # dataset + hardware + viz (record, replay, calibrate, etc.)
- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py` pip install 'lerobot[all]' # everything
- Set the required `name` class attribute.
- Update variables in `tests/test_available.py` by importing your new Policy class
""" """
import itertools from lerobot.__version__ import __version__
from lerobot.__version__ import __version__ # noqa: F401 # Maps optional extras to the CLI entry-points they unlock.
available_extras: dict[str, list[str]] = {
# TODO(rcadene): Improve policies and envs. As of now, an item in `available_policies` "dataset": ["lerobot-dataset-viz", "lerobot-imgtransform-viz", "lerobot-edit-dataset"],
# refers to a yaml file AND a modeling name. Same for `available_envs` which refers to "training": ["lerobot-train"],
# a yaml file AND a environment name. The difference should be more obvious. "hardware": [
available_tasks_per_env = { "lerobot-calibrate",
"aloha": [ "lerobot-find-port",
"AlohaInsertion-v0", "lerobot-find-cameras",
"AlohaTransferCube-v0", "lerobot-find-joint-limits",
"lerobot-setup-motors",
], ],
"pusht": ["PushT-v0"], "core_scripts": ["lerobot-record", "lerobot-replay", "lerobot-teleoperate"],
} "evaluation": ["lerobot-eval"],
available_envs = list(available_tasks_per_env.keys())
available_datasets_per_env = {
"aloha": [
"lerobot/aloha_sim_insertion_human",
"lerobot/aloha_sim_insertion_scripted",
"lerobot/aloha_sim_transfer_cube_human",
"lerobot/aloha_sim_transfer_cube_scripted",
"lerobot/aloha_sim_insertion_human_image",
"lerobot/aloha_sim_insertion_scripted_image",
"lerobot/aloha_sim_transfer_cube_human_image",
"lerobot/aloha_sim_transfer_cube_scripted_image",
],
# TODO(alexander-soare): Add "lerobot/pusht_keypoints". Right now we can't because this is too tightly
# coupled with tests.
"pusht": ["lerobot/pusht", "lerobot/pusht_image"],
} }
available_real_world_datasets = [ __all__ = ["__version__", "available_extras"]
"lerobot/aloha_mobile_cabinet",
"lerobot/aloha_mobile_chair",
"lerobot/aloha_mobile_elevator",
"lerobot/aloha_mobile_shrimp",
"lerobot/aloha_mobile_wash_pan",
"lerobot/aloha_mobile_wipe_wine",
"lerobot/aloha_static_battery",
"lerobot/aloha_static_candy",
"lerobot/aloha_static_coffee",
"lerobot/aloha_static_coffee_new",
"lerobot/aloha_static_cups_open",
"lerobot/aloha_static_fork_pick_up",
"lerobot/aloha_static_pingpong_test",
"lerobot/aloha_static_pro_pencil",
"lerobot/aloha_static_screw_driver",
"lerobot/aloha_static_tape",
"lerobot/aloha_static_thread_velcro",
"lerobot/aloha_static_towel",
"lerobot/aloha_static_vinh_cup",
"lerobot/aloha_static_vinh_cup_left",
"lerobot/aloha_static_ziploc_slide",
"lerobot/umi_cup_in_the_wild",
"lerobot/unitreeh1_fold_clothes",
"lerobot/unitreeh1_rearrange_objects",
"lerobot/unitreeh1_two_robot_greeting",
"lerobot/unitreeh1_warehouse",
"lerobot/nyu_rot_dataset",
"lerobot/utokyo_saytap",
"lerobot/imperialcollege_sawyer_wrist_cam",
"lerobot/utokyo_xarm_bimanual",
"lerobot/tokyo_u_lsmo",
"lerobot/utokyo_pr2_opening_fridge",
"lerobot/cmu_franka_exploration_dataset",
"lerobot/cmu_stretch",
"lerobot/asu_table_top",
"lerobot/utokyo_pr2_tabletop_manipulation",
"lerobot/utokyo_xarm_pick_and_place",
"lerobot/ucsd_kitchen_dataset",
"lerobot/austin_buds_dataset",
"lerobot/dlr_sara_grid_clamp",
"lerobot/conq_hose_manipulation",
"lerobot/columbia_cairlab_pusht_real",
"lerobot/dlr_sara_pour",
"lerobot/dlr_edan_shared_control",
"lerobot/ucsd_pick_and_place_dataset",
"lerobot/berkeley_cable_routing",
"lerobot/nyu_franka_play_dataset",
"lerobot/austin_sirius_dataset",
"lerobot/cmu_play_fusion",
"lerobot/berkeley_gnm_sac_son",
"lerobot/nyu_door_opening_surprising_effectiveness",
"lerobot/berkeley_fanuc_manipulation",
"lerobot/jaco_play",
"lerobot/viola",
"lerobot/kaist_nonprehensile",
"lerobot/berkeley_mvp",
"lerobot/uiuc_d3field",
"lerobot/berkeley_gnm_recon",
"lerobot/austin_sailor_dataset",
"lerobot/utaustin_mutex",
"lerobot/roboturk",
"lerobot/stanford_hydra_dataset",
"lerobot/berkeley_autolab_ur5",
"lerobot/stanford_robocook",
"lerobot/toto",
"lerobot/fmb",
"lerobot/droid_100",
"lerobot/berkeley_rpt",
"lerobot/stanford_kuka_multimodal_dataset",
"lerobot/iamlab_cmu_pickup_insert",
"lerobot/taco_play",
"lerobot/berkeley_gnm_cory_hall",
"lerobot/usc_cloth_sim",
]
available_datasets = sorted(
set(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets))
)
# lists all available policies from `lerobot/policies`
available_policies = ["act", "diffusion", "tdmpc", "vqbet"]
# lists all available robots from `lerobot/robots`
available_robots = [
"koch",
"koch_bimanual",
"aloha",
"so100",
"so101",
]
# lists all available cameras from `lerobot/cameras`
available_cameras = [
"opencv",
"intelrealsense",
]
# lists all available motors from `lerobot/motors`
available_motors = [
"dynamixel",
"feetech",
]
# keys and values refer to yaml files
available_policies_per_env = {
"aloha": ["act"],
"pusht": ["diffusion", "vqbet"],
"koch_real": ["act_koch_real"],
"aloha_real": ["act_aloha_real"],
}
env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
env_dataset_pairs = [
(env, dataset) for env, datasets in available_datasets_per_env.items() for dataset in datasets
]
env_dataset_policy_triplets = [
(env, dataset, policy)
for env, datasets in available_datasets_per_env.items()
for dataset in datasets
for policy in available_policies_per_env[env]
]
+30
View File
@@ -0,0 +1,30 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Async inference server/client.
Requires: ``pip install 'lerobot[async]'``
Available modules (import directly)::
from lerobot.async_inference.policy_server import ...
from lerobot.async_inference.robot_client import ...
"""
from lerobot.utils.import_utils import require_package
require_package("grpcio", extra="async", import_name="grpc")
__all__: list[str] = []
+2 -2
View File
@@ -22,8 +22,7 @@ from typing import Any
import torch import torch
from lerobot.configs.types import PolicyFeature from lerobot.configs import PolicyFeature
from lerobot.datasets.feature_utils import build_dataset_frame, hw_to_dataset_features
# NOTE: Configs need to be loaded for the client to be able to instantiate the policy config # NOTE: Configs need to be loaded for the client to be able to instantiate the policy config
from lerobot.policies import ( # noqa: F401 from lerobot.policies import ( # noqa: F401
@@ -36,6 +35,7 @@ from lerobot.policies import ( # noqa: F401
) )
from lerobot.robots.robot import Robot from lerobot.robots.robot import Robot
from lerobot.utils.constants import OBS_IMAGES, OBS_STATE, OBS_STR from lerobot.utils.constants import OBS_IMAGES, OBS_STATE, OBS_STR
from lerobot.utils.feature_utils import build_dataset_frame, hw_to_dataset_features
from lerobot.utils.utils import init_logging from lerobot.utils.utils import init_logging
Action = torch.Tensor Action = torch.Tensor
+1 -1
View File
@@ -38,7 +38,7 @@ import draccus
import grpc import grpc
import torch import torch
from lerobot.policies.factory import get_policy_class, make_pre_post_processors from lerobot.policies import get_policy_class, make_pre_post_processors
from lerobot.processor import PolicyProcessorPipeline from lerobot.processor import PolicyProcessorPipeline
from lerobot.transport import ( from lerobot.transport import (
services_pb2, # type: ignore services_pb2, # type: ignore
+2 -2
View File
@@ -47,8 +47,8 @@ import draccus
import grpc import grpc
import torch import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401 from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401
from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401 from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401
from lerobot.robots import ( # noqa: F401 from lerobot.robots import ( # noqa: F401
Robot, Robot,
RobotConfig, RobotConfig,
+6
View File
@@ -15,3 +15,9 @@
from .camera import Camera from .camera import Camera
from .configs import CameraConfig, ColorMode, Cv2Backends, Cv2Rotation from .configs import CameraConfig, ColorMode, Cv2Backends, Cv2Rotation
from .utils import make_cameras_from_configs from .utils import make_cameras_from_configs
# NOTE: Camera submodule configs and implementations (OpenCVCameraConfig, RealSenseCamera, etc.)
# are intentionally NOT re-exported here to avoid pulling backend-specific dependencies.
# Import from submodules: ``from lerobot.cameras.opencv import OpenCVCameraConfig``
__all__ = ["Camera", "CameraConfig", "ColorMode", "Cv2Backends", "Cv2Rotation", "make_cameras_from_configs"]
@@ -14,3 +14,5 @@
from .configuration_reachy2_camera import Reachy2CameraConfig from .configuration_reachy2_camera import Reachy2CameraConfig
from .reachy2_camera import Reachy2Camera from .reachy2_camera import Reachy2Camera
__all__ = ["Reachy2Camera", "Reachy2CameraConfig"]
@@ -14,3 +14,5 @@
from .camera_realsense import RealSenseCamera from .camera_realsense import RealSenseCamera
from .configuration_realsense import RealSenseCameraConfig from .configuration_realsense import RealSenseCameraConfig
__all__ = ["RealSenseCamera", "RealSenseCameraConfig"]
+2 -2
View File
@@ -31,8 +31,8 @@ import cv2
import numpy as np import numpy as np
import zmq import zmq
from lerobot.cameras.configs import ColorMode from ..configs import ColorMode
from lerobot.cameras.opencv import OpenCVCamera, OpenCVCameraConfig from ..opencv import OpenCVCamera, OpenCVCameraConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
+30
View File
@@ -0,0 +1,30 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cross-cutting modules that bridge multiple lerobot packages.
Unlike ``lerobot.utils`` (which must remain dependency-free), modules here
are allowed to import from ``lerobot.policies``, ``lerobot.processor``,
``lerobot.configs``, etc. They are deliberately NOT re-exported from the
top-level ``lerobot`` package.
Available modules (import directly)::
from lerobot.common.control_utils import predict_action, ...
from lerobot.common.train_utils import save_checkpoint, ...
from lerobot.common.wandb_utils import WandBLogger, ...
"""
__all__: list[str] = []
@@ -12,26 +12,25 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import annotations
######################################################################################## ########################################################################################
# Utilities # Utilities
######################################################################################## ########################################################################################
import logging import logging
import traceback import traceback
from contextlib import nullcontext from contextlib import nullcontext
from copy import copy from copy import copy
from functools import cache from functools import cache
from typing import Any from typing import TYPE_CHECKING, Any
import numpy as np import numpy as np
import torch import torch
from deepdiff import DeepDiff
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.policies import PreTrainedPolicy, prepare_observation_for_inference
from lerobot.datasets.utils import DEFAULT_FEATURES
from lerobot.policies.pretrained import PreTrainedPolicy if TYPE_CHECKING:
from lerobot.policies.utils import prepare_observation_for_inference from lerobot.datasets import LeRobotDataset
from lerobot.processor import PolicyProcessorPipeline from lerobot.processor import PolicyProcessorPipeline
from lerobot.robots import Robot from lerobot.robots import Robot
from lerobot.types import PolicyAction from lerobot.types import PolicyAction
@@ -218,6 +217,13 @@ def sanity_check_dataset_robot_compatibility(
Raises: Raises:
ValueError: If any of the checked metadata fields do not match. ValueError: If any of the checked metadata fields do not match.
""" """
from lerobot.utils.import_utils import require_package
require_package("deepdiff", extra="hardware")
from deepdiff import DeepDiff
from lerobot.utils.constants import DEFAULT_FEATURES
fields = [ fields = [
("robot_type", dataset.meta.robot_type, robot.robot_type), ("robot_type", dataset.meta.robot_type, robot.robot_type),
("fps", dataset.fps, fps), ("fps", dataset.fps, fps),
@@ -19,10 +19,13 @@ from torch.optim import Optimizer
from torch.optim.lr_scheduler import LRScheduler from torch.optim.lr_scheduler import LRScheduler
from lerobot.configs.train import TrainPipelineConfig from lerobot.configs.train import TrainPipelineConfig
from lerobot.datasets.io_utils import load_json, write_json from lerobot.optim import (
from lerobot.optim.optimizers import load_optimizer_state, save_optimizer_state load_optimizer_state,
from lerobot.optim.schedulers import load_scheduler_state, save_scheduler_state load_scheduler_state,
from lerobot.policies.pretrained import PreTrainedPolicy save_optimizer_state,
save_scheduler_state,
)
from lerobot.policies import PreTrainedPolicy
from lerobot.processor import PolicyProcessorPipeline from lerobot.processor import PolicyProcessorPipeline
from lerobot.utils.constants import ( from lerobot.utils.constants import (
CHECKPOINTS_DIR, CHECKPOINTS_DIR,
@@ -31,6 +34,7 @@ from lerobot.utils.constants import (
TRAINING_STATE_DIR, TRAINING_STATE_DIR,
TRAINING_STEP, TRAINING_STEP,
) )
from lerobot.utils.io_utils import load_json, write_json
from lerobot.utils.random_utils import load_rng_state, save_rng_state from lerobot.utils.random_utils import load_rng_state, save_rng_state
+47
View File
@@ -0,0 +1,47 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Public API for lerobot configuration types and base config classes.
NOTE: TrainPipelineConfig, EvalPipelineConfig, and TrainRLServerPipelineConfig
are intentionally NOT re-exported here to avoid circular dependencies
(they import lerobot.envs and lerobot.policies at module level).
Import them directly: ``from lerobot.configs.train import TrainPipelineConfig``
"""
from .default import DatasetConfig, EvalConfig, PeftConfig, WandBConfig
from .policies import PreTrainedConfig
from .types import (
FeatureType,
NormalizationMode,
PipelineFeatureType,
PolicyFeature,
RTCAttentionSchedule,
)
__all__ = [
# Types
"FeatureType",
"NormalizationMode",
"PipelineFeatureType",
"PolicyFeature",
"RTCAttentionSchedule",
# Config classes
"DatasetConfig",
"EvalConfig",
"PeftConfig",
"PreTrainedConfig",
"WandBConfig",
]
+2 -2
View File
@@ -16,8 +16,8 @@
from dataclasses import dataclass, field from dataclasses import dataclass, field
from lerobot.datasets.transforms import ImageTransformsConfig from lerobot.transforms import ImageTransformsConfig
from lerobot.datasets.video_utils import get_safe_default_codec from lerobot.utils.import_utils import get_safe_default_codec
@dataclass @dataclass
+3 -2
View File
@@ -19,8 +19,9 @@ from pathlib import Path
from lerobot import envs, policies # noqa: F401 from lerobot import envs, policies # noqa: F401
from lerobot.configs import parser from lerobot.configs import parser
from lerobot.configs.default import EvalConfig
from lerobot.configs.policies import PreTrainedConfig from .default import EvalConfig
from .policies import PreTrainedConfig
logger = getLogger(__name__) logger = getLogger(__name__)
+3 -3
View File
@@ -26,13 +26,13 @@ from huggingface_hub import hf_hub_download
from huggingface_hub.constants import CONFIG_NAME from huggingface_hub.constants import CONFIG_NAME
from huggingface_hub.errors import HfHubHTTPError from huggingface_hub.errors import HfHubHTTPError
from lerobot.configs.types import FeatureType, PolicyFeature from lerobot.optim import LRSchedulerConfig, OptimizerConfig
from lerobot.optim.optimizers import OptimizerConfig
from lerobot.optim.schedulers import LRSchedulerConfig
from lerobot.utils.constants import ACTION, OBS_STATE from lerobot.utils.constants import ACTION, OBS_STATE
from lerobot.utils.device_utils import auto_select_torch_device, is_amp_available, is_torch_device_available from lerobot.utils.device_utils import auto_select_torch_device, is_amp_available, is_torch_device_available
from lerobot.utils.hub import HubMixin from lerobot.utils.hub import HubMixin
from .types import FeatureType, PolicyFeature
T = TypeVar("T", bound="PreTrainedConfig") T = TypeVar("T", bound="PreTrainedConfig")
logger = getLogger(__name__) logger = getLogger(__name__)
+4 -4
View File
@@ -24,12 +24,12 @@ from huggingface_hub.errors import HfHubHTTPError
from lerobot import envs from lerobot import envs
from lerobot.configs import parser from lerobot.configs import parser
from lerobot.configs.default import DatasetConfig, EvalConfig, PeftConfig, WandBConfig from lerobot.optim import LRSchedulerConfig, OptimizerConfig
from lerobot.configs.policies import PreTrainedConfig
from lerobot.optim import OptimizerConfig
from lerobot.optim.schedulers import LRSchedulerConfig
from lerobot.utils.hub import HubMixin from lerobot.utils.hub import HubMixin
from .default import DatasetConfig, EvalConfig, PeftConfig, WandBConfig
from .policies import PreTrainedConfig
TRAIN_CONFIG_NAME = "train_config.json" TRAIN_CONFIG_NAME = "train_config.json"
+10
View File
@@ -11,3 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
Data processing utilities (annotation tools, dataset transformations).
Available sub-modules (import directly)::
from lerobot.data_processing.sarm_annotations import ...
"""
__all__: list[str] = []
@@ -11,3 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
SARM subtask annotation tools.
Available modules (import directly)::
from lerobot.data_processing.sarm_annotations.subtask_annotation import ...
"""
__all__: list[str] = []
@@ -76,7 +76,7 @@ import torch
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets import LeRobotDataset
# Pydantic Models for SARM Subtask Annotation # Pydantic Models for SARM Subtask Annotation
@@ -746,8 +746,7 @@ def save_annotations_to_dataset(
dataset_path: Path, annotations: dict[int, SubtaskAnnotation], fps: int, prefix: str = "sparse" dataset_path: Path, annotations: dict[int, SubtaskAnnotation], fps: int, prefix: str = "sparse"
): ):
"""Save annotations to LeRobot dataset parquet format.""" """Save annotations to LeRobot dataset parquet format."""
from lerobot.datasets.io_utils import load_episodes from lerobot.datasets import DEFAULT_EPISODES_PATH, load_episodes
from lerobot.datasets.utils import DEFAULT_EPISODES_PATH
episodes_dataset = load_episodes(dataset_path) episodes_dataset = load_episodes(dataset_path)
if not episodes_dataset or len(episodes_dataset) == 0: if not episodes_dataset or len(episodes_dataset) == 0:
@@ -841,7 +840,7 @@ def generate_auto_sparse_annotations(
def load_annotations_from_dataset(dataset_path: Path, prefix: str = "sparse") -> dict[int, SubtaskAnnotation]: def load_annotations_from_dataset(dataset_path: Path, prefix: str = "sparse") -> dict[int, SubtaskAnnotation]:
"""Load annotations from LeRobot dataset parquet files.""" """Load annotations from LeRobot dataset parquet files."""
from lerobot.datasets.io_utils import load_episodes from lerobot.datasets import load_episodes
episodes_dataset = load_episodes(dataset_path) episodes_dataset = load_episodes(dataset_path)
if not episodes_dataset or len(episodes_dataset) == 0: if not episodes_dataset or len(episodes_dataset) == 0:
+57 -8
View File
@@ -15,19 +15,68 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.utils.import_utils import require_package
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.multi_dataset import MultiLeRobotDataset require_package("datasets", extra="dataset")
from lerobot.datasets.sampler import EpisodeAwareSampler require_package("av", extra="dataset")
from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset
from lerobot.datasets.transforms import ImageTransforms, ImageTransformsConfig from .aggregate import aggregate_datasets
from .compute_stats import DEFAULT_QUANTILES, aggregate_stats, get_feature_stats
from .dataset_metadata import CODEBASE_VERSION, LeRobotDatasetMetadata
from .dataset_tools import (
add_features,
convert_image_to_video_dataset,
delete_episodes,
merge_datasets,
modify_features,
modify_tasks,
recompute_stats,
remove_feature,
split_dataset,
)
from .factory import make_dataset, resolve_delta_timestamps
from .image_writer import safe_stop_image_writer
from .io_utils import load_episodes, write_stats
from .lerobot_dataset import LeRobotDataset
from .multi_dataset import MultiLeRobotDataset
from .pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from .sampler import EpisodeAwareSampler
from .streaming_dataset import StreamingLeRobotDataset
from .utils import DEFAULT_EPISODES_PATH, create_lerobot_dataset_card
from .video_utils import VideoEncodingManager
# NOTE: Low-level I/O functions (cast_stats_to_numpy, get_parquet_file_size_in_mb, etc.)
# and legacy migration constants are intentionally NOT re-exported here.
# Import directly: ``from lerobot.datasets.io_utils import ...``
__all__ = [ __all__ = [
"CODEBASE_VERSION",
"DEFAULT_EPISODES_PATH",
"DEFAULT_QUANTILES",
"EpisodeAwareSampler", "EpisodeAwareSampler",
"ImageTransforms",
"ImageTransformsConfig",
"LeRobotDataset", "LeRobotDataset",
"LeRobotDatasetMetadata", "LeRobotDatasetMetadata",
"MultiLeRobotDataset", "MultiLeRobotDataset",
"StreamingLeRobotDataset", "StreamingLeRobotDataset",
"VideoEncodingManager",
"add_features",
"aggregate_datasets",
"aggregate_pipeline_dataset_features",
"aggregate_stats",
"convert_image_to_video_dataset",
"create_initial_features",
"create_lerobot_dataset_card",
"delete_episodes",
"get_feature_stats",
"load_episodes",
"make_dataset",
"merge_datasets",
"modify_features",
"modify_tasks",
"recompute_stats",
"remove_feature",
"resolve_delta_timestamps",
"safe_stop_image_writer",
"split_dataset",
"write_stats",
] ]
+6 -6
View File
@@ -23,10 +23,10 @@ import datasets
import pandas as pd import pandas as pd
import tqdm import tqdm
from lerobot.datasets.compute_stats import aggregate_stats from .compute_stats import aggregate_stats
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from .dataset_metadata import LeRobotDatasetMetadata
from lerobot.datasets.feature_utils import get_hf_features_from_features from .feature_utils import get_hf_features_from_features
from lerobot.datasets.io_utils import ( from .io_utils import (
get_file_size_in_mb, get_file_size_in_mb,
get_parquet_file_size_in_mb, get_parquet_file_size_in_mb,
to_parquet_with_hf_images, to_parquet_with_hf_images,
@@ -34,7 +34,7 @@ from lerobot.datasets.io_utils import (
write_stats, write_stats,
write_tasks, write_tasks,
) )
from lerobot.datasets.utils import ( from .utils import (
DEFAULT_CHUNK_SIZE, DEFAULT_CHUNK_SIZE,
DEFAULT_DATA_FILE_SIZE_IN_MB, DEFAULT_DATA_FILE_SIZE_IN_MB,
DEFAULT_DATA_PATH, DEFAULT_DATA_PATH,
@@ -43,7 +43,7 @@ from lerobot.datasets.utils import (
DEFAULT_VIDEO_PATH, DEFAULT_VIDEO_PATH,
update_chunk_file_indices, update_chunk_file_indices,
) )
from lerobot.datasets.video_utils import concatenate_video_files, get_video_duration_in_s from .video_utils import concatenate_video_files, get_video_duration_in_s
def validate_all_metadata(all_metadata: list[LeRobotDatasetMetadata]): def validate_all_metadata(all_metadata: list[LeRobotDatasetMetadata]):
+3 -3
View File
@@ -19,9 +19,11 @@ import logging
import numpy as np import numpy as np
from lerobot.datasets.io_utils import load_image_as_numpy from lerobot.processor import RelativeActionsProcessorStep
from lerobot.utils.constants import ACTION, OBS_STATE from lerobot.utils.constants import ACTION, OBS_STATE
from .io_utils import load_image_as_numpy
DEFAULT_QUANTILES = [0.01, 0.10, 0.50, 0.90, 0.99] DEFAULT_QUANTILES = [0.01, 0.10, 0.50, 0.90, 0.99]
@@ -696,8 +698,6 @@ def compute_relative_action_stats(
ValueError: If the dataset has fewer frames than ``chunk_size``. ValueError: If the dataset has fewer frames than ``chunk_size``.
RuntimeError: If no valid (single-episode) chunks are found. RuntimeError: If no valid (single-episode) chunks are found.
""" """
from lerobot.processor.relative_action_processor import RelativeActionsProcessorStep
if exclude_joints is None: if exclude_joints is None:
exclude_joints = [] exclude_joints = []
+9 -8
View File
@@ -23,9 +23,13 @@ import pyarrow as pa
import pyarrow.parquet as pq import pyarrow.parquet as pq
from huggingface_hub import snapshot_download from huggingface_hub import snapshot_download
from lerobot.datasets.compute_stats import aggregate_stats from lerobot.utils.constants import DEFAULT_FEATURES, HF_LEROBOT_HOME, HF_LEROBOT_HUB_CACHE
from lerobot.datasets.feature_utils import _validate_feature_names, create_empty_dataset_info from lerobot.utils.feature_utils import _validate_feature_names
from lerobot.datasets.io_utils import ( from lerobot.utils.utils import flatten_dict
from .compute_stats import aggregate_stats
from .feature_utils import create_empty_dataset_info
from .io_utils import (
get_file_size_in_mb, get_file_size_in_mb,
load_episodes, load_episodes,
load_info, load_info,
@@ -37,19 +41,16 @@ from lerobot.datasets.io_utils import (
write_stats, write_stats,
write_tasks, write_tasks,
) )
from lerobot.datasets.utils import ( from .utils import (
DEFAULT_EPISODES_PATH, DEFAULT_EPISODES_PATH,
DEFAULT_FEATURES,
INFO_PATH, INFO_PATH,
check_version_compatibility, check_version_compatibility,
flatten_dict,
get_safe_version, get_safe_version,
has_legacy_hub_download_metadata, has_legacy_hub_download_metadata,
is_valid_version, is_valid_version,
update_chunk_file_indices, update_chunk_file_indices,
) )
from lerobot.datasets.video_utils import get_video_info from .video_utils import get_video_info
from lerobot.utils.constants import HF_LEROBOT_HOME, HF_LEROBOT_HUB_CACHE
CODEBASE_VERSION = "v3.0" CODEBASE_VERSION = "v3.0"
+4 -4
View File
@@ -21,17 +21,17 @@ from pathlib import Path
import datasets import datasets
import torch import torch
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from .dataset_metadata import LeRobotDatasetMetadata
from lerobot.datasets.feature_utils import ( from .feature_utils import (
check_delta_timestamps, check_delta_timestamps,
get_delta_indices, get_delta_indices,
get_hf_features_from_features, get_hf_features_from_features,
) )
from lerobot.datasets.io_utils import ( from .io_utils import (
hf_transform_to_torch, hf_transform_to_torch,
load_nested_dataset, load_nested_dataset,
) )
from lerobot.datasets.video_utils import decode_video_frames from .video_utils import decode_video_frames
class DatasetReader: class DatasetReader:
+13 -13
View File
@@ -36,22 +36,25 @@ import pyarrow.parquet as pq
import torch import torch
from tqdm import tqdm from tqdm import tqdm
from lerobot.datasets.aggregate import aggregate_datasets from lerobot.utils.constants import ACTION, HF_LEROBOT_HOME, OBS_IMAGE, OBS_STATE
from lerobot.datasets.compute_stats import ( from lerobot.utils.utils import flatten_dict
from .aggregate import aggregate_datasets
from .compute_stats import (
aggregate_stats, aggregate_stats,
compute_episode_stats, compute_episode_stats,
compute_relative_action_stats, compute_relative_action_stats,
) )
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from .dataset_metadata import LeRobotDatasetMetadata
from lerobot.datasets.io_utils import ( from .io_utils import (
get_parquet_file_size_in_mb, get_parquet_file_size_in_mb,
load_episodes, load_episodes,
write_info, write_info,
write_stats, write_stats,
write_tasks, write_tasks,
) )
from lerobot.datasets.lerobot_dataset import LeRobotDataset from .lerobot_dataset import LeRobotDataset
from lerobot.datasets.utils import ( from .utils import (
DATA_DIR, DATA_DIR,
DEFAULT_CHUNK_SIZE, DEFAULT_CHUNK_SIZE,
DEFAULT_DATA_FILE_SIZE_IN_MB, DEFAULT_DATA_FILE_SIZE_IN_MB,
@@ -59,8 +62,7 @@ from lerobot.datasets.utils import (
DEFAULT_EPISODES_PATH, DEFAULT_EPISODES_PATH,
update_chunk_file_indices, update_chunk_file_indices,
) )
from lerobot.datasets.video_utils import encode_video_frames, get_video_info from .video_utils import encode_video_frames, get_video_info
from lerobot.utils.constants import ACTION, HF_LEROBOT_HOME, OBS_IMAGE, OBS_STATE
def _load_episode_with_stats(src_dataset: LeRobotDataset, episode_idx: int) -> dict: def _load_episode_with_stats(src_dataset: LeRobotDataset, episode_idx: int) -> dict:
@@ -829,8 +831,6 @@ def _copy_and_reindex_episodes_metadata(
data_metadata: Dict mapping new episode index to its data file metadata data_metadata: Dict mapping new episode index to its data file metadata
video_metadata: Optional dict mapping new episode index to its video metadata video_metadata: Optional dict mapping new episode index to its video metadata
""" """
from lerobot.datasets.utils import flatten_dict
if src_dataset.meta.episodes is None: if src_dataset.meta.episodes is None:
src_dataset.meta.episodes = load_episodes(src_dataset.meta.root) src_dataset.meta.episodes = load_episodes(src_dataset.meta.root)
@@ -922,8 +922,8 @@ def _write_parquet(df: pd.DataFrame, path: Path, meta: LeRobotDatasetMetadata) -
This ensures images are properly embedded and the file can be loaded correctly by HF datasets. This ensures images are properly embedded and the file can be loaded correctly by HF datasets.
""" """
from lerobot.datasets.feature_utils import get_hf_features_from_features from .feature_utils import get_hf_features_from_features
from lerobot.datasets.io_utils import embed_images from .io_utils import embed_images
hf_features = get_hf_features_from_features(meta.features) hf_features = get_hf_features_from_features(meta.features)
ep_dataset = datasets.Dataset.from_dict(df.to_dict(orient="list"), features=hf_features, split="train") ep_dataset = datasets.Dataset.from_dict(df.to_dict(orient="list"), features=hf_features, split="train")
@@ -1367,7 +1367,7 @@ def _copy_data_without_images(
episode_indices: Episodes to include episode_indices: Episodes to include
img_keys: Image keys to remove img_keys: Image keys to remove
""" """
from lerobot.datasets.utils import DATA_DIR from .utils import DATA_DIR
data_dir = src_dataset.root / DATA_DIR data_dir = src_dataset.root / DATA_DIR
parquet_files = sorted(data_dir.glob("*/*.parquet")) parquet_files = sorted(data_dir.glob("*/*.parquet"))
+7 -7
View File
@@ -31,26 +31,26 @@ import PIL.Image
import pyarrow.parquet as pq import pyarrow.parquet as pq
import torch import torch
from lerobot.datasets.compute_stats import compute_episode_stats from .compute_stats import compute_episode_stats
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from .dataset_metadata import LeRobotDatasetMetadata
from lerobot.datasets.feature_utils import ( from .feature_utils import (
get_hf_features_from_features, get_hf_features_from_features,
validate_episode_buffer, validate_episode_buffer,
validate_frame, validate_frame,
) )
from lerobot.datasets.image_writer import AsyncImageWriter, write_image from .image_writer import AsyncImageWriter, write_image
from lerobot.datasets.io_utils import ( from .io_utils import (
embed_images, embed_images,
get_file_size_in_mb, get_file_size_in_mb,
load_episodes, load_episodes,
write_info, write_info,
) )
from lerobot.datasets.utils import ( from .utils import (
DEFAULT_EPISODES_PATH, DEFAULT_EPISODES_PATH,
DEFAULT_IMAGE_PATH, DEFAULT_IMAGE_PATH,
update_chunk_file_indices, update_chunk_file_indices,
) )
from lerobot.datasets.video_utils import ( from .video_utils import (
StreamingVideoEncoder, StreamingVideoEncoder,
concatenate_video_files, concatenate_video_files,
encode_video_frames, encode_video_frames,
+7 -11
View File
@@ -18,19 +18,15 @@ from pprint import pformat
import torch import torch
from lerobot.configs.policies import PreTrainedConfig from lerobot.configs import PreTrainedConfig
from lerobot.configs.train import TrainPipelineConfig from lerobot.configs.train import TrainPipelineConfig
from lerobot.datasets.dataset_metadata import LeRobotDatasetMetadata from lerobot.transforms import ImageTransforms
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.utils.constants import ACTION, IMAGENET_STATS, OBS_PREFIX, REWARD
from lerobot.datasets.multi_dataset import MultiLeRobotDataset
from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset
from lerobot.datasets.transforms import ImageTransforms
from lerobot.utils.constants import ACTION, OBS_PREFIX, REWARD
IMAGENET_STATS = { from .dataset_metadata import LeRobotDatasetMetadata
"mean": [[[0.485]], [[0.456]], [[0.406]]], # (c,1,1) from .lerobot_dataset import LeRobotDataset
"std": [[[0.229]], [[0.224]], [[0.225]]], # (c,1,1) from .multi_dataset import MultiLeRobotDataset
} from .streaming_dataset import StreamingLeRobotDataset
def resolve_delta_timestamps( def resolve_delta_timestamps(
+4 -199
View File
@@ -14,23 +14,21 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from pprint import pformat from pprint import pformat
from typing import Any
import datasets import datasets
import numpy as np import numpy as np
from PIL import Image as PILImage from PIL import Image as PILImage
from lerobot.configs.types import FeatureType, PolicyFeature from lerobot.utils.constants import DEFAULT_FEATURES
from lerobot.datasets.utils import ( from lerobot.utils.utils import is_valid_numpy_dtype_string
from .utils import (
DEFAULT_CHUNK_SIZE, DEFAULT_CHUNK_SIZE,
DEFAULT_DATA_FILE_SIZE_IN_MB, DEFAULT_DATA_FILE_SIZE_IN_MB,
DEFAULT_DATA_PATH, DEFAULT_DATA_PATH,
DEFAULT_FEATURES,
DEFAULT_VIDEO_FILE_SIZE_IN_MB, DEFAULT_VIDEO_FILE_SIZE_IN_MB,
DEFAULT_VIDEO_PATH, DEFAULT_VIDEO_PATH,
) )
from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_STR
from lerobot.utils.utils import is_valid_numpy_dtype_string
def get_hf_features_from_features(features: dict) -> datasets.Features: def get_hf_features_from_features(features: dict) -> datasets.Features:
@@ -71,199 +69,6 @@ def get_hf_features_from_features(features: dict) -> datasets.Features:
return datasets.Features(hf_features) return datasets.Features(hf_features)
def _validate_feature_names(features: dict[str, dict]) -> None:
"""Validate that feature names do not contain invalid characters.
Args:
features (dict): The LeRobot features dictionary.
Raises:
ValueError: If any feature name contains '/'.
"""
invalid_features = {name: ft for name, ft in features.items() if "/" in name}
if invalid_features:
raise ValueError(f"Feature names should not contain '/'. Found '/' in '{invalid_features}'.")
def hw_to_dataset_features(
hw_features: dict[str, type | tuple], prefix: str, use_video: bool = True
) -> dict[str, dict]:
"""Convert hardware-specific features to a LeRobot dataset feature dictionary.
This function takes a dictionary describing hardware outputs (like joint states
or camera image shapes) and formats it into the standard LeRobot feature
specification.
Args:
hw_features (dict): Dictionary mapping feature names to their type (float for
joints) or shape (tuple for images).
prefix (str): The prefix to add to the feature keys (e.g., "observation"
or "action").
use_video (bool): If True, image features are marked as "video", otherwise "image".
Returns:
dict: A LeRobot features dictionary.
"""
features = {}
joint_fts = {
key: ftype
for key, ftype in hw_features.items()
if ftype is float or (isinstance(ftype, PolicyFeature) and ftype.type != FeatureType.VISUAL)
}
cam_fts = {key: shape for key, shape in hw_features.items() if isinstance(shape, tuple)}
if joint_fts and prefix == ACTION:
features[prefix] = {
"dtype": "float32",
"shape": (len(joint_fts),),
"names": list(joint_fts),
}
if joint_fts and prefix == OBS_STR:
features[f"{prefix}.state"] = {
"dtype": "float32",
"shape": (len(joint_fts),),
"names": list(joint_fts),
}
for key, shape in cam_fts.items():
features[f"{prefix}.images.{key}"] = {
"dtype": "video" if use_video else "image",
"shape": shape,
"names": ["height", "width", "channels"],
}
_validate_feature_names(features)
return features
def build_dataset_frame(
ds_features: dict[str, dict], values: dict[str, Any], prefix: str
) -> dict[str, np.ndarray]:
"""Construct a single data frame from raw values based on dataset features.
A "frame" is a dictionary containing all the data for a single timestep,
formatted as numpy arrays according to the feature specification.
Args:
ds_features (dict): The LeRobot dataset features dictionary.
values (dict): A dictionary of raw values from the hardware/environment.
prefix (str): The prefix to filter features by (e.g., "observation"
or "action").
Returns:
dict: A dictionary representing a single frame of data.
"""
frame = {}
for key, ft in ds_features.items():
if key in DEFAULT_FEATURES or not key.startswith(prefix):
continue
elif ft["dtype"] == "float32" and len(ft["shape"]) == 1:
frame[key] = np.array([values[name] for name in ft["names"]], dtype=np.float32)
elif ft["dtype"] in ["image", "video"]:
frame[key] = values[key.removeprefix(f"{prefix}.images.")]
return frame
def dataset_to_policy_features(features: dict[str, dict]) -> dict[str, PolicyFeature]:
"""Convert dataset features to policy features.
This function transforms the dataset's feature specification into a format
that a policy can use, classifying features by type (e.g., visual, state,
action) and ensuring correct shapes (e.g., channel-first for images).
Args:
features (dict): The LeRobot dataset features dictionary.
Returns:
dict: A dictionary mapping feature keys to `PolicyFeature` objects.
Raises:
ValueError: If an image feature does not have a 3D shape.
"""
# TODO(aliberts): Implement "type" in dataset features and simplify this
policy_features = {}
for key, ft in features.items():
shape = ft["shape"]
if ft["dtype"] in ["image", "video"]:
type = FeatureType.VISUAL
if len(shape) != 3:
raise ValueError(f"Number of dimensions of {key} != 3 (shape={shape})")
names = ft["names"]
# Backward compatibility for "channel" which is an error introduced in LeRobotDataset v2.0 for ported datasets.
if names[2] in ["channel", "channels"]: # (h, w, c) -> (c, h, w)
shape = (shape[2], shape[0], shape[1])
elif key == OBS_ENV_STATE:
type = FeatureType.ENV
elif key.startswith(OBS_STR):
type = FeatureType.STATE
elif key.startswith(ACTION):
type = FeatureType.ACTION
else:
continue
policy_features[key] = PolicyFeature(
type=type,
shape=shape,
)
return policy_features
def combine_feature_dicts(*dicts: dict) -> dict:
"""Merge LeRobot grouped feature dicts.
- For 1D numeric specs (dtype not image/video/string) with "names": we merge the names and recompute the shape.
- For others (e.g. `observation.images.*`), the last one wins (if they are identical).
Args:
*dicts: A variable number of LeRobot feature dictionaries to merge.
Returns:
dict: A single merged feature dictionary.
Raises:
ValueError: If there's a dtype mismatch for a feature being merged.
"""
out: dict = {}
for d in dicts:
for key, value in d.items():
if not isinstance(value, dict):
out[key] = value
continue
dtype = value.get("dtype")
shape = value.get("shape")
is_vector = (
dtype not in ("image", "video", "string")
and isinstance(shape, tuple)
and len(shape) == 1
and "names" in value
)
if is_vector:
# Initialize or retrieve the accumulating dict for this feature key
target = out.setdefault(key, {"dtype": dtype, "names": [], "shape": (0,)})
# Ensure consistent data types across merged entries
if "dtype" in target and dtype != target["dtype"]:
raise ValueError(f"dtype mismatch for '{key}': {target['dtype']} vs {dtype}")
# Merge feature names: append only new ones to preserve order without duplicates
seen = set(target["names"])
for n in value["names"]:
if n not in seen:
target["names"].append(n)
seen.add(n)
# Recompute the shape to reflect the updated number of features
target["shape"] = (len(target["names"]),)
else:
# For images/videos and non-1D entries: override with the latest definition
out[key] = value
return out
def create_empty_dataset_info( def create_empty_dataset_info(
codebase_version: str, codebase_version: str,
fps: int, fps: int,
+4 -32
View File
@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import json
from pathlib import Path from pathlib import Path
from typing import Any from typing import Any
@@ -29,7 +28,10 @@ from datasets.table import embed_table_storage
from PIL import Image as PILImage from PIL import Image as PILImage
from torchvision import transforms from torchvision import transforms
from lerobot.datasets.utils import ( from lerobot.utils.io_utils import load_json, write_json
from lerobot.utils.utils import SuppressProgressBars, flatten_dict, unflatten_dict
from .utils import (
DEFAULT_DATA_FILE_SIZE_IN_MB, DEFAULT_DATA_FILE_SIZE_IN_MB,
DEFAULT_EPISODES_PATH, DEFAULT_EPISODES_PATH,
DEFAULT_SUBTASKS_PATH, DEFAULT_SUBTASKS_PATH,
@@ -37,11 +39,8 @@ from lerobot.datasets.utils import (
EPISODES_DIR, EPISODES_DIR,
INFO_PATH, INFO_PATH,
STATS_PATH, STATS_PATH,
flatten_dict,
serialize_dict, serialize_dict,
unflatten_dict,
) )
from lerobot.utils.utils import SuppressProgressBars
def get_parquet_file_size_in_mb(parquet_path: str | Path) -> float: def get_parquet_file_size_in_mb(parquet_path: str | Path) -> float:
@@ -116,33 +115,6 @@ def embed_images(dataset: datasets.Dataset) -> datasets.Dataset:
return dataset return dataset
def load_json(fpath: Path) -> Any:
"""Load data from a JSON file.
Args:
fpath (Path): Path to the JSON file.
Returns:
Any: The data loaded from the JSON file.
"""
with open(fpath) as f:
return json.load(f)
def write_json(data: dict, fpath: Path) -> None:
"""Write data to a JSON file.
Creates parent directories if they don't exist.
Args:
data (dict): The dictionary to write.
fpath (Path): The path to the output JSON file.
"""
fpath.parent.mkdir(exist_ok=True, parents=True)
with open(fpath, "w") as f:
json.dump(data, f, indent=4, ensure_ascii=False)
def write_info(info: dict, local_dir: Path) -> None: def write_info(info: dict, local_dir: Path) -> None:
write_json(info, local_dir / INFO_PATH) write_json(info, local_dir / INFO_PATH)
+7 -6
View File
@@ -24,20 +24,21 @@ import torch.utils
from huggingface_hub import HfApi, snapshot_download from huggingface_hub import HfApi, snapshot_download
from huggingface_hub.errors import RevisionNotFoundError from huggingface_hub.errors import RevisionNotFoundError
from lerobot.datasets.dataset_metadata import CODEBASE_VERSION, LeRobotDatasetMetadata from lerobot.utils.constants import HF_LEROBOT_HUB_CACHE
from lerobot.datasets.dataset_reader import DatasetReader
from lerobot.datasets.dataset_writer import DatasetWriter from .dataset_metadata import CODEBASE_VERSION, LeRobotDatasetMetadata
from lerobot.datasets.utils import ( from .dataset_reader import DatasetReader
from .dataset_writer import DatasetWriter
from .utils import (
create_lerobot_dataset_card, create_lerobot_dataset_card,
get_safe_version, get_safe_version,
is_valid_version, is_valid_version,
) )
from lerobot.datasets.video_utils import ( from .video_utils import (
StreamingVideoEncoder, StreamingVideoEncoder,
get_safe_default_codec, get_safe_default_codec,
resolve_vcodec, resolve_vcodec,
) )
from lerobot.utils.constants import HF_LEROBOT_HUB_CACHE
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
+5 -4
View File
@@ -21,12 +21,13 @@ import datasets
import torch import torch
import torch.utils import torch.utils
from lerobot.datasets.compute_stats import aggregate_stats
from lerobot.datasets.feature_utils import get_hf_features_from_features
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.video_utils import VideoFrame
from lerobot.utils.constants import HF_LEROBOT_HOME from lerobot.utils.constants import HF_LEROBOT_HOME
from .compute_stats import aggregate_stats
from .feature_utils import get_hf_features_from_features
from .lerobot_dataset import LeRobotDataset
from .video_utils import VideoFrame
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
+2 -2
View File
@@ -16,11 +16,11 @@ import re
from collections.abc import Sequence from collections.abc import Sequence
from typing import Any from typing import Any
from lerobot.configs.types import PipelineFeatureType from lerobot.configs import PipelineFeatureType
from lerobot.datasets.feature_utils import hw_to_dataset_features
from lerobot.processor import DataProcessorPipeline from lerobot.processor import DataProcessorPipeline
from lerobot.types import RobotAction, RobotObservation from lerobot.types import RobotAction, RobotObservation
from lerobot.utils.constants import ACTION, OBS_IMAGES, OBS_STATE, OBS_STR from lerobot.utils.constants import ACTION, OBS_IMAGES, OBS_STATE, OBS_STR
from lerobot.utils.feature_utils import hw_to_dataset_features
def create_initial_features( def create_initial_features(
+7 -6
View File
@@ -22,20 +22,21 @@ import numpy as np
import torch import torch
from datasets import load_dataset from datasets import load_dataset
from lerobot.datasets.dataset_metadata import CODEBASE_VERSION, LeRobotDatasetMetadata from lerobot.utils.constants import HF_LEROBOT_HOME, LOOKAHEAD_BACKTRACKTABLE, LOOKBACK_BACKTRACKTABLE
from lerobot.datasets.feature_utils import get_delta_indices
from lerobot.datasets.io_utils import item_to_torch from .dataset_metadata import CODEBASE_VERSION, LeRobotDatasetMetadata
from lerobot.datasets.utils import ( from .feature_utils import get_delta_indices
from .io_utils import item_to_torch
from .utils import (
check_version_compatibility, check_version_compatibility,
find_float_index, find_float_index,
is_float_in_list, is_float_in_list,
safe_shard, safe_shard,
) )
from lerobot.datasets.video_utils import ( from .video_utils import (
VideoDecoderCache, VideoDecoderCache,
decode_video_frames_torchcodec, decode_video_frames_torchcodec,
) )
from lerobot.utils.constants import HF_LEROBOT_HOME, LOOKAHEAD_BACKTRACKTABLE, LOOKBACK_BACKTRACKTABLE
class LookBackError(Exception): class LookBackError(Exception):
+2 -84
View File
@@ -17,9 +17,7 @@ import contextlib
import importlib.resources import importlib.resources
import json import json
import logging import logging
from collections.abc import Iterator
from pathlib import Path from pathlib import Path
from typing import Any
import datasets import datasets
import numpy as np import numpy as np
@@ -28,6 +26,8 @@ import torch
from huggingface_hub import DatasetCard, DatasetCardData, HfApi from huggingface_hub import DatasetCard, DatasetCardData, HfApi
from huggingface_hub.errors import RevisionNotFoundError from huggingface_hub.errors import RevisionNotFoundError
from lerobot.utils.utils import flatten_dict, unflatten_dict
V30_MESSAGE = """ V30_MESSAGE = """
The dataset you requested ({repo_id}) is in {version} format. The dataset you requested ({repo_id}) is in {version} format.
@@ -93,14 +93,6 @@ LEGACY_EPISODES_PATH = "meta/episodes.jsonl"
LEGACY_EPISODES_STATS_PATH = "meta/episodes_stats.jsonl" LEGACY_EPISODES_STATS_PATH = "meta/episodes_stats.jsonl"
LEGACY_TASKS_PATH = "meta/tasks.jsonl" LEGACY_TASKS_PATH = "meta/tasks.jsonl"
DEFAULT_FEATURES = {
"timestamp": {"dtype": "float32", "shape": (1,), "names": None},
"frame_index": {"dtype": "int64", "shape": (1,), "names": None},
"episode_index": {"dtype": "int64", "shape": (1,), "names": None},
"index": {"dtype": "int64", "shape": (1,), "names": None},
"task_index": {"dtype": "int64", "shape": (1,), "names": None},
}
def has_legacy_hub_download_metadata(root: Path) -> bool: def has_legacy_hub_download_metadata(root: Path) -> bool:
"""Return ``True`` when *root* looks like a legacy Hub ``local_dir`` mirror. """Return ``True`` when *root* looks like a legacy Hub ``local_dir`` mirror.
@@ -123,59 +115,6 @@ def update_chunk_file_indices(chunk_idx: int, file_idx: int, chunks_size: int) -
return chunk_idx, file_idx return chunk_idx, file_idx
def flatten_dict(d: dict, parent_key: str = "", sep: str = "/") -> dict:
"""Flatten a nested dictionary by joining keys with a separator.
Example:
>>> dct = {"a": {"b": 1, "c": {"d": 2}}, "e": 3}
>>> print(flatten_dict(dct))
{'a/b': 1, 'a/c/d': 2, 'e': 3}
Args:
d (dict): The dictionary to flatten.
parent_key (str): The base key to prepend to the keys in this level.
sep (str): The separator to use between keys.
Returns:
dict: A flattened dictionary.
"""
items = []
for k, v in d.items():
new_key = f"{parent_key}{sep}{k}" if parent_key else k
if isinstance(v, dict):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def unflatten_dict(d: dict, sep: str = "/") -> dict:
"""Unflatten a dictionary with delimited keys into a nested dictionary.
Example:
>>> flat_dct = {"a/b": 1, "a/c/d": 2, "e": 3}
>>> print(unflatten_dict(flat_dct))
{'a': {'b': 1, 'c': {'d': 2}}, 'e': 3}
Args:
d (dict): A dictionary with flattened keys.
sep (str): The separator used in the keys.
Returns:
dict: A nested dictionary.
"""
outdict = {}
for key, value in d.items():
parts = key.split(sep)
d = outdict
for part in parts[:-1]:
if part not in d:
d[part] = {}
d = d[part]
d[parts[-1]] = value
return outdict
def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict: def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict:
"""Serialize a dictionary containing tensors or numpy arrays to be JSON-compatible. """Serialize a dictionary containing tensors or numpy arrays to be JSON-compatible.
@@ -332,27 +271,6 @@ def get_safe_version(repo_id: str, version: str | packaging.version.Version) ->
raise ForwardCompatibilityError(repo_id, min(upper_versions)) raise ForwardCompatibilityError(repo_id, min(upper_versions))
def cycle(iterable: Any) -> Iterator[Any]:
"""Create a dataloader-safe cyclical iterator.
This is an equivalent of `itertools.cycle` but is safe for use with
PyTorch DataLoaders with multiple workers.
See https://github.com/pytorch/pytorch/issues/23900 for details.
Args:
iterable: The iterable to cycle over.
Yields:
Items from the iterable, restarting from the beginning when exhausted.
"""
iterator = iter(iterable)
while True:
try:
yield next(iterator)
except StopIteration:
iterator = iter(iterable)
def create_branch(repo_id: str, *, branch: str, repo_type: str | None = None) -> None: def create_branch(repo_id: str, *, branch: str, repo_type: str | None = None) -> None:
"""Create a branch on an existing Hugging Face repo. """Create a branch on an existing Hugging Face repo.
+7 -12
View File
@@ -37,6 +37,8 @@ import torchvision
from datasets.features.features import register_feature from datasets.features.features import register_feature
from PIL import Image from PIL import Image
from lerobot.utils.import_utils import get_safe_default_codec
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# List of hardware encoders to probe for auto-selection. Availability depends on the platform and FFmpeg build. # List of hardware encoders to probe for auto-selection. Availability depends on the platform and FFmpeg build.
@@ -116,16 +118,6 @@ def resolve_vcodec(vcodec: str) -> str:
return "libsvtav1" return "libsvtav1"
def get_safe_default_codec():
if importlib.util.find_spec("torchcodec"):
return "torchcodec"
else:
logger.warning(
"'torchcodec' is not available in your platform, falling back to 'pyav' as a default decoder"
)
return "pyav"
def decode_video_frames( def decode_video_frames(
video_path: Path | str, video_path: Path | str,
timestamps: list[float], timestamps: list[float],
@@ -271,7 +263,10 @@ class VideoDecoderCache:
if importlib.util.find_spec("torchcodec"): if importlib.util.find_spec("torchcodec"):
from torchcodec.decoders import VideoDecoder from torchcodec.decoders import VideoDecoder
else: else:
raise ImportError("torchcodec is required but not available.") raise ImportError(
"'torchcodec' is required but not installed. "
"Install it with: pip install 'lerobot[dataset]' (or uv pip install 'lerobot[dataset]')"
)
video_path = str(video_path) video_path = str(video_path)
@@ -606,7 +601,7 @@ class _CameraEncoderThread(threading.Thread):
self.encoder_threads = encoder_threads self.encoder_threads = encoder_threads
def run(self) -> None: def run(self) -> None:
from lerobot.datasets.compute_stats import RunningQuantileStats, auto_downsample_height_width from .compute_stats import RunningQuantileStats, auto_downsample_height_width
container = None container = None
output_stream = None output_stream = None
+24 -1
View File
@@ -12,4 +12,27 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .configs import AlohaEnv, EnvConfig, HubEnvConfig, PushtEnv # noqa: F401 # NOTE: gymnasium is currently a core dependency but is a candidate for moving to an
# optional extra in the future. When that transition happens, uncomment the guard below
# and update the extra name to the one that will contain gymnasium.
# from lerobot.utils.import_utils import require_package
# require_package("gymnasium", extra="<update_extra>", import_name="gymnasium")
from .configs import AlohaEnv, EnvConfig, HILSerlRobotEnvConfig, HubEnvConfig, PushtEnv
from .factory import make_env, make_env_config, make_env_pre_post_processors
from .utils import check_env_attributes_and_types, close_envs, env_to_policy_features, preprocess_observation
__all__ = [
"AlohaEnv",
"EnvConfig",
"HILSerlRobotEnvConfig",
"HubEnvConfig",
"PushtEnv",
"check_env_attributes_and_types",
"close_envs",
"env_to_policy_features",
"make_env",
"make_env_config",
"make_env_pre_post_processors",
"preprocess_observation",
]
+4 -11
View File
@@ -23,7 +23,8 @@ import draccus
import gymnasium as gym import gymnasium as gym
from gymnasium.envs.registration import registry as gym_registry from gymnasium.envs.registration import registry as gym_registry
from lerobot.configs.types import FeatureType, PolicyFeature from lerobot.configs import FeatureType, PolicyFeature
from lerobot.processor import IsaaclabArenaProcessorStep, LiberoProcessorStep, PolicyProcessorPipeline
from lerobot.robots import RobotConfig from lerobot.robots import RobotConfig
from lerobot.teleoperators.config import TeleoperatorConfig from lerobot.teleoperators.config import TeleoperatorConfig
from lerobot.utils.constants import ( from lerobot.utils.constants import (
@@ -124,8 +125,6 @@ class EnvConfig(draccus.ChoiceRegistry, abc.ABC):
def get_env_processors(self): def get_env_processors(self):
"""Return (preprocessor, postprocessor) for this env. Default: identity.""" """Return (preprocessor, postprocessor) for this env. Default: identity."""
from lerobot.processor.pipeline import PolicyProcessorPipeline
return PolicyProcessorPipeline(steps=[]), PolicyProcessorPipeline(steps=[]) return PolicyProcessorPipeline(steps=[]), PolicyProcessorPipeline(steps=[])
@@ -418,7 +417,7 @@ class LiberoEnv(EnvConfig):
return kwargs return kwargs
def create_envs(self, n_envs: int, use_async_envs: bool = False): def create_envs(self, n_envs: int, use_async_envs: bool = False):
from lerobot.envs.libero import create_libero_envs from .libero import create_libero_envs
if self.task is None: if self.task is None:
raise ValueError("LiberoEnv requires a task to be specified") raise ValueError("LiberoEnv requires a task to be specified")
@@ -436,9 +435,6 @@ class LiberoEnv(EnvConfig):
) )
def get_env_processors(self): def get_env_processors(self):
from lerobot.processor.env_processor import LiberoProcessorStep
from lerobot.processor.pipeline import PolicyProcessorPipeline
return ( return (
PolicyProcessorPipeline(steps=[LiberoProcessorStep()]), PolicyProcessorPipeline(steps=[LiberoProcessorStep()]),
PolicyProcessorPipeline(steps=[]), PolicyProcessorPipeline(steps=[]),
@@ -487,7 +483,7 @@ class MetaworldEnv(EnvConfig):
} }
def create_envs(self, n_envs: int, use_async_envs: bool = False): def create_envs(self, n_envs: int, use_async_envs: bool = False):
from lerobot.envs.metaworld import create_metaworld_envs from .metaworld import create_metaworld_envs
if self.task is None: if self.task is None:
raise ValueError("MetaWorld requires a task to be specified") raise ValueError("MetaWorld requires a task to be specified")
@@ -568,9 +564,6 @@ class IsaaclabArenaEnv(HubEnvConfig):
return {} return {}
def get_env_processors(self): def get_env_processors(self):
from lerobot.processor.env_processor import IsaaclabArenaProcessorStep
from lerobot.processor.pipeline import PolicyProcessorPipeline
state_keys = tuple(k.strip() for k in (self.state_keys or "").split(",") if k.strip()) state_keys = tuple(k.strip() for k in (self.state_keys or "").split(",") if k.strip())
camera_keys = tuple(k.strip() for k in (self.camera_keys or "").split(",") if k.strip()) camera_keys = tuple(k.strip() for k in (self.camera_keys or "").split(",") if k.strip())
if not state_keys and not camera_keys: if not state_keys and not camera_keys:
+2 -2
View File
@@ -19,8 +19,8 @@ from typing import Any
import gymnasium as gym import gymnasium as gym
from lerobot.envs.configs import EnvConfig, HubEnvConfig from .configs import EnvConfig, HubEnvConfig
from lerobot.envs.utils import _call_make_env, _download_hub_file, _import_hub_module, _normalize_hub_result from .utils import _call_make_env, _download_hub_file, _import_hub_module, _normalize_hub_result
def make_env_config(env_type: str, **kwargs) -> EnvConfig: def make_env_config(env_type: str, **kwargs) -> EnvConfig:
+2 -1
View File
@@ -29,9 +29,10 @@ from gymnasium import spaces
from libero.libero import benchmark, get_libero_path from libero.libero import benchmark, get_libero_path
from libero.libero.envs import OffScreenRenderEnv from libero.libero.envs import OffScreenRenderEnv
from lerobot.envs.utils import _LazyAsyncVectorEnv
from lerobot.types import RobotObservation from lerobot.types import RobotObservation
from .utils import _LazyAsyncVectorEnv
def _parse_camera_names(camera_name: str | Sequence[str]) -> list[str]: def _parse_camera_names(camera_name: str | Sequence[str]) -> list[str]:
"""Normalize camera_name into a non-empty list of strings.""" """Normalize camera_name into a non-empty list of strings."""
+2 -1
View File
@@ -25,9 +25,10 @@ import metaworld.policies as policies
import numpy as np import numpy as np
from gymnasium import spaces from gymnasium import spaces
from lerobot.envs.utils import _LazyAsyncVectorEnv
from lerobot.types import RobotObservation from lerobot.types import RobotObservation
from .utils import _LazyAsyncVectorEnv
# ---- Load configuration data from the external JSON file ---- # ---- Load configuration data from the external JSON file ----
CONFIG_PATH = Path(__file__).parent / "metaworld_config.json" CONFIG_PATH = Path(__file__).parent / "metaworld_config.json"
try: try:

Some files were not shown because too many files have changed in this diff Show More