diff --git a/docs/source/adding_benchmarks.mdx b/docs/source/adding_benchmarks.mdx
index 3a024f026..6e9d23bdf 100644
--- a/docs/source/adding_benchmarks.mdx
+++ b/docs/source/adding_benchmarks.mdx
@@ -216,7 +216,7 @@ class MyBenchmarkEnvConfig(EnvConfig):
def get_env_processors(self):
"""Override if your benchmark needs observation/action transforms."""
- from lerobot.processor.pipeline import PolicyProcessorPipeline
+ from lerobot.processor import PolicyProcessorPipeline
from lerobot.processor.env_processor import MyBenchmarkProcessorStep
return (
PolicyProcessorPipeline(steps=[MyBenchmarkProcessorStep()]),
diff --git a/docs/source/async.mdx b/docs/source/async.mdx
index a46408a0d..7b1efae97 100644
--- a/docs/source/async.mdx
+++ b/docs/source/async.mdx
@@ -170,7 +170,7 @@ python -m lerobot.async_inference.robot_client \
```python
import threading
from lerobot.robots.so_follower import SO100FollowerConfig
-from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
+from lerobot.cameras.opencv import OpenCVCameraConfig
from lerobot.async_inference.configs import RobotClientConfig
from lerobot.async_inference.robot_client import RobotClient
from lerobot.async_inference.helpers import visualize_action_queue_size
diff --git a/docs/source/backwardcomp.mdx b/docs/source/backwardcomp.mdx
index 3366c8ab9..a83ee2e2e 100644
--- a/docs/source/backwardcomp.mdx
+++ b/docs/source/backwardcomp.mdx
@@ -41,7 +41,7 @@ The script:
```python
# New usage pattern (after migration)
-from lerobot.policies.factory import make_policy, make_pre_post_processors
+from lerobot.policies import make_policy, make_pre_post_processors
# Load model and processors separately
policy = make_policy(config, ds_meta=dataset.meta)
diff --git a/docs/source/bring_your_own_policies.mdx b/docs/source/bring_your_own_policies.mdx
index 38c32aa71..57ecc2fb2 100644
--- a/docs/source/bring_your_own_policies.mdx
+++ b/docs/source/bring_your_own_policies.mdx
@@ -47,9 +47,9 @@ Here is a template to get you started, customize the parameters and methods as n
```python
# configuration_my_custom_policy.py
from dataclasses import dataclass, field
-from lerobot.configs.policies import PreTrainedConfig
-from lerobot.optim.optimizers import AdamWConfig
-from lerobot.optim.schedulers import CosineDecayWithWarmupSchedulerConfig
+from lerobot.configs import PreTrainedConfig
+from lerobot.optim import AdamWConfig
+from lerobot.optim import CosineDecayWithWarmupSchedulerConfig
@PreTrainedConfig.register_subclass("my_custom_policy")
@dataclass
@@ -120,7 +120,7 @@ import torch
import torch.nn as nn
from typing import Any
-from lerobot.policies.pretrained import PreTrainedPolicy
+from lerobot.policies import PreTrainedPolicy
from lerobot.utils.constants import ACTION
from .configuration_my_custom_policy import MyCustomPolicyConfig
diff --git a/docs/source/cameras.mdx b/docs/source/cameras.mdx
index 8af0f5ae5..2dc2859dd 100644
--- a/docs/source/cameras.mdx
+++ b/docs/source/cameras.mdx
@@ -79,9 +79,8 @@ The following examples show how to use the camera API to configure and capture f
```python
-from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
-from lerobot.cameras.opencv.camera_opencv import OpenCVCamera
-from lerobot.cameras.configs import ColorMode, Cv2Rotation
+from lerobot.cameras.opencv import OpenCVCamera, OpenCVCameraConfig
+from lerobot.cameras import ColorMode, Cv2Rotation
# Construct an `OpenCVCameraConfig` with your desired FPS, resolution, color mode, and rotation.
config = OpenCVCameraConfig(
@@ -126,9 +125,8 @@ with OpenCVCamera(config) as camera:
```python
-from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig
-from lerobot.cameras.realsense.camera_realsense import RealSenseCamera
-from lerobot.cameras.configs import ColorMode, Cv2Rotation
+from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig
+from lerobot.cameras import ColorMode, Cv2Rotation
# Create a `RealSenseCameraConfig` specifying your camera’s serial number and enabling depth.
config = RealSenseCameraConfig(
diff --git a/docs/source/dataset_subtask.mdx b/docs/source/dataset_subtask.mdx
index beb5d80bd..6264aca22 100644
--- a/docs/source/dataset_subtask.mdx
+++ b/docs/source/dataset_subtask.mdx
@@ -95,7 +95,7 @@ After completing your annotation:
When you load a dataset with subtask annotations, the subtask information is automatically available:
```python
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets import LeRobotDataset
# Load a dataset with subtask annotations
dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated")
@@ -133,11 +133,10 @@ if has_subtasks:
The `TokenizerProcessor` automatically handles subtask tokenization for Vision-Language Action (VLA) models:
```python
-from lerobot.processor.tokenizer_processor import TokenizerProcessor
-from lerobot.processor.pipeline import ProcessorPipeline
+from lerobot.processor import TokenizerProcessorStep
-# Create a tokenizer processor
-tokenizer_processor = TokenizerProcessor(
+# Create a tokenizer processor step
+tokenizer_processor = TokenizerProcessorStep(
tokenizer_name_or_path="google/paligemma-3b-pt-224",
padding="max_length",
max_length=64,
@@ -158,7 +157,7 @@ When subtasks are available in the batch, the tokenizer processor adds:
```python
import torch
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets import LeRobotDataset
dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated")
@@ -182,7 +181,7 @@ for batch in dataloader:
Try loading a dataset with subtask annotations:
```python
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets import LeRobotDataset
# Example dataset with subtask annotations
dataset = LeRobotDataset("jadechoghari/collect-fruit-annotated")
diff --git a/docs/source/earthrover_mini_plus.mdx b/docs/source/earthrover_mini_plus.mdx
index 884e84d8c..a87bd325b 100644
--- a/docs/source/earthrover_mini_plus.mdx
+++ b/docs/source/earthrover_mini_plus.mdx
@@ -66,10 +66,10 @@ The SDK gives you:
Follow our [Installation Guide](./installation) to install LeRobot.
-In addition to the base installation, install the EarthRover Mini dependencies:
+In addition to the base installation, install the EarthRover Mini with hardware dependencies:
```bash
-pip install -e .
+pip install -e ".[hardware]"
```
## How It Works
diff --git a/docs/source/env_processor.mdx b/docs/source/env_processor.mdx
index 290af3b34..8bfafdfb9 100644
--- a/docs/source/env_processor.mdx
+++ b/docs/source/env_processor.mdx
@@ -173,8 +173,8 @@ observation = {
The `make_env_pre_post_processors` function follows the same pattern as `make_pre_post_processors` for policies:
```python
-from lerobot.envs.factory import make_env_pre_post_processors
-from lerobot.envs.configs import LiberoEnv, PushtEnv
+from lerobot.envs import make_env_pre_post_processors, PushtEnv
+from lerobot.envs.configs import LiberoEnv
# For LIBERO: Returns LiberoProcessorStep in preprocessor
libero_cfg = LiberoEnv(task="libero_spatial", camera_name=["agentview"])
@@ -257,7 +257,7 @@ def eval_main(cfg: EvalPipelineConfig):
The `LiberoProcessorStep` demonstrates a real-world environment processor:
```python
-from lerobot.processor.pipeline import ObservationProcessorStep
+from lerobot.processor import ObservationProcessorStep
@dataclass
@ProcessorStepRegistry.register(name="libero_processor")
diff --git a/docs/source/envhub.mdx b/docs/source/envhub.mdx
index 36c08a8b3..47f5567a8 100644
--- a/docs/source/envhub.mdx
+++ b/docs/source/envhub.mdx
@@ -34,7 +34,7 @@ Finally, your environment must implement the standard `gym.vector.VectorEnv` int
Loading an environment from the Hub is as simple as:
```python
-from lerobot.envs.factory import make_env
+from lerobot.envs import make_env
# Load a hub environment (requires explicit consent to run remote code)
env = make_env("lerobot/cartpole-env", trust_remote_code=True)
@@ -191,7 +191,7 @@ api.upload_folder(
### Basic Usage
```python
-from lerobot.envs.factory import make_env
+from lerobot.envs import make_env
# Load from the hub
envs_dict = make_env(
@@ -314,7 +314,7 @@ env = make_env("trusted-org/verified-env@a1b2c3d4", trust_remote_code=True)
Here's a complete example using the reference CartPole environment:
```python
-from lerobot.envs.factory import make_env
+from lerobot.envs import make_env
import numpy as np
# Load the environment
diff --git a/docs/source/envhub_isaaclab_arena.mdx b/docs/source/envhub_isaaclab_arena.mdx
index 828d51bad..b934240d6 100644
--- a/docs/source/envhub_isaaclab_arena.mdx
+++ b/docs/source/envhub_isaaclab_arena.mdx
@@ -58,10 +58,10 @@ pip install -e .
cd ..
-# 5. Install LeRobot
+# 5. Install LeRobot (evaluation extra for env/policy evaluation)
git clone https://github.com/huggingface/lerobot.git
cd lerobot
-pip install -e .
+pip install -e ".[evaluation]"
cd ..
@@ -262,7 +262,7 @@ def main(cfg: EvalPipelineConfig):
"""Run random action rollout for IsaacLab Arena environment."""
logging.info(pformat(asdict(cfg)))
- from lerobot.envs.factory import make_env
+ from lerobot.envs import make_env
env_dict = make_env(
cfg.env,
diff --git a/docs/source/envhub_leisaac.mdx b/docs/source/envhub_leisaac.mdx
index 2537700a5..91bb6a871 100644
--- a/docs/source/envhub_leisaac.mdx
+++ b/docs/source/envhub_leisaac.mdx
@@ -74,7 +74,7 @@ EnvHub exposes every LeIsaac-supported task in a uniform interface. The examples
# envhub_random_action.py
import torch
-from lerobot.envs.factory import make_env
+from lerobot.envs import make_env
# Load from the hub
envs_dict = make_env("LightwheelAI/leisaac_env:envs/so101_pick_orange.py", n_envs=1, trust_remote_code=True)
@@ -142,7 +142,7 @@ from lerobot.teleoperators import ( # noqa: F401
)
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import init_logging
-from lerobot.envs.factory import make_env
+from lerobot.envs import make_env
@dataclass
@@ -282,7 +282,7 @@ Note: when working with `bi_so101_fold_cloth`, call `initialize()` immediately a
```python
import torch
-from lerobot.envs.factory import make_env
+from lerobot.envs import make_env
# Load from the hub
envs_dict = make_env("LightwheelAI/leisaac_env:envs/bi_so101_fold_cloth.py", n_envs=1, trust_remote_code=True)
diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx
index c80f2047a..d03e35d8d 100644
--- a/docs/source/il_robots.mdx
+++ b/docs/source/il_robots.mdx
@@ -58,8 +58,8 @@ lerobot-teleoperate \
```python
-from lerobot.teleoperators.so_leader import SO101LeaderConfig, SO101Leader
-from lerobot.robots.so_follower import SO101FollowerConfig, SO101Follower
+from lerobot.teleoperators.so_leader import SO101Leader, SO101LeaderConfig
+from lerobot.robots.so_follower import SO101Follower, SO101FollowerConfig
robot_config = SO101FollowerConfig(
port="/dev/tty.usbmodem58760431541",
@@ -116,9 +116,9 @@ lerobot-teleoperate \
```python
-from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
-from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader
-from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower
+from lerobot.cameras.opencv import OpenCVCameraConfig
+from lerobot.teleoperators.koch_leader import KochLeader, KochLeaderConfig
+from lerobot.robots.koch_follower import KochFollower, KochFollowerConfig
camera_config = {
"front": OpenCVCameraConfig(index_or_path=0, width=1920, height=1080, fps=30)
@@ -195,12 +195,11 @@ lerobot-record \
```python
-from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.datasets.utils import hw_to_dataset_features
+from lerobot.cameras.opencv import OpenCVCameraConfig
+from lerobot.datasets import LeRobotDataset
+from lerobot.utils.feature_utils import hw_to_dataset_features
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
-from lerobot.teleoperators.so_leader.config_so100_leader import SO100LeaderConfig
-from lerobot.teleoperators.so_leader.so100_leader import SO100Leader
+from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig
from lerobot.common.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
@@ -410,9 +409,8 @@ lerobot-replay \
```python
import time
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.robots.so_follower.config_so100_follower import SO100FollowerConfig
-from lerobot.robots.so_follower.so100_follower import SO100Follower
+from lerobot.datasets import LeRobotDataset
+from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import log_say
@@ -532,13 +530,12 @@ lerobot-record \
```python
-from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.datasets.utils import hw_to_dataset_features
-from lerobot.policies.act.modeling_act import ACTPolicy
-from lerobot.policies.factory import make_pre_post_processors
-from lerobot.robots.so_follower.config_so100_follower import SO100FollowerConfig
-from lerobot.robots.so_follower.so100_follower import SO100Follower
+from lerobot.cameras.opencv import OpenCVCameraConfig
+from lerobot.datasets import LeRobotDataset
+from lerobot.utils.feature_utils import hw_to_dataset_features
+from lerobot.policies.act import ACTPolicy
+from lerobot.policies import make_pre_post_processors
+from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.scripts.lerobot_record import record_loop
from lerobot.common.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx
index a988523b5..246e6415b 100644
--- a/docs/source/installation.mdx
+++ b/docs/source/installation.mdx
@@ -116,6 +116,8 @@ brew install ffmpeg
## Step 3: Install LeRobot 🤗
+The base `lerobot` install is intentionally **lightweight** — it includes only core ML dependencies (PyTorch, torchvision, numpy, opencv, einops, draccus, huggingface-hub, gymnasium, safetensors). Heavier dependencies are gated behind optional extras so you only install what you need.
+
### From Source
First, clone the repository and navigate into the directory:
@@ -131,12 +133,16 @@ Then, install the library in editable mode. This is useful if you plan to contri
```bash
-pip install -e .
+pip install -e ".[robot]" # For real robot workflows (recording, replaying)
+pip install -e ".[training]" # For training policies
+pip install -e ".[all]" # Everything (all policies, envs, hardware, dev tools)
```
```bash
-uv pip install -e .
+uv pip install -e ".[robot]" # For real robot workflows (recording, replaying)
+uv pip install -e ".[training]" # For training policies
+uv pip install -e ".[all]" # Everything (all policies, envs, hardware, dev tools)
```
@@ -162,26 +168,49 @@ uv pip install lerobot
-_This installs only the default dependencies._
+_This installs only the core ML dependencies. You will need to add extras for most workflows._
-**Extra Features:**
-To install additional functionality, use one of the following (If you are using `uv`, replace `pip install` with `uv pip install` in the commands below.):
+**Feature Extras:**
+LeRobot provides **feature-scoped extras** that map to common workflows. If you are using `uv`, replace `pip install` with `uv pip install` in the commands below.
+
+| Extra | What it adds | Typical use case |
+| ---------- | ------------------------------------------- | ----------------------------------- |
+| `dataset` | `datasets`, `av`, `torchcodec`, `jsonlines` | Loading & creating datasets |
+| `training` | `dataset` + `accelerate`, `wandb` | Training policies |
+| `hardware` | `pynput`, `pyserial`, `deepdiff` | Connecting to real robots |
+| `viz` | `rerun-sdk` | Visualization during recording/eval |
+| `build` | `cmake`, `setuptools` | Building native extensions |
+
+**Composite Extras** combine feature extras for common CLI scripts:
+
+| Extra | Includes | Typical use case |
+| ------------- | ------------------------------ | ------------------------------------------------------- |
+| `robot` | `dataset` + `hardware` + `viz` | `lerobot-record`, `lerobot-replay`, `lerobot-calibrate` |
+| `evaluation` | `av` | `lerobot-eval` (add policy + env extras as needed) |
+| `dataset_viz` | `dataset` + `viz` | `lerobot-dataset-viz`, `lerobot-imgtransform-viz` |
```bash
-pip install 'lerobot[all]' # All available features
-pip install 'lerobot[aloha,pusht]' # Specific features (Aloha & Pusht)
-pip install 'lerobot[feetech]' # Feetech motor support
+pip install 'lerobot[robot]' # Record, replay, calibrate
+pip install 'lerobot[training]' # Train policies
+pip install 'lerobot[robot,training]' # Record + train
+pip install 'lerobot[all]' # Everything
```
-_Replace `[...]` with your desired features._
+**Policy, environment, and hardware extras** are still available for specific dependencies:
-**Available Tags:**
-For a full list of optional dependencies, see:
-https://pypi.org/project/lerobot/
+```bash
+pip install 'lerobot[pi]' # Pi0/Pi0.5/Pi0-FAST policy deps
+pip install 'lerobot[smolvla]' # SmolVLA policy deps
+pip install 'lerobot[diffusion]' # Diffusion policy deps (diffusers)
+pip install 'lerobot[aloha,pusht]' # Simulation environments
+pip install 'lerobot[feetech]' # Feetech motor support
+```
+
+_Multiple extras can be combined (e.g., `.[robot,pi,pusht]`). For a full list of available extras, refer to `pyproject.toml`._
### Troubleshooting
-If you encounter build errors, you may need to install additional dependencies: `cmake`, `build-essential`, and `ffmpeg libs`.
+If you encounter build errors, you may need to install additional system dependencies: `cmake`, `build-essential`, and `ffmpeg libs`.
To install these for Linux run:
```bash
@@ -196,8 +225,8 @@ LeRobot provides optional extras for specific functionalities. Multiple extras c
### Simulations
-Install environment packages: `aloha` ([gym-aloha](https://github.com/huggingface/gym-aloha)), or `pusht` ([gym-pusht](https://github.com/huggingface/gym-pusht))
-Example:
+Install environment packages: `aloha` ([gym-aloha](https://github.com/huggingface/gym-aloha)), or `pusht` ([gym-pusht](https://github.com/huggingface/gym-pusht)).
+These automatically include the `dataset` extra.
```bash
pip install -e ".[aloha]" # or "[pusht]" for example
@@ -213,7 +242,7 @@ pip install -e ".[feetech]" # or "[dynamixel]" for example
### Experiment Tracking
-To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
+Weights and Biases is included in the `training` extra. To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with:
```bash
wandb login
diff --git a/docs/source/introduction_processors.mdx b/docs/source/introduction_processors.mdx
index 6f3768615..4395e889b 100644
--- a/docs/source/introduction_processors.mdx
+++ b/docs/source/introduction_processors.mdx
@@ -19,10 +19,10 @@ This means that your favorite policy can be used like this:
```python
import torch
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
-from lerobot.policies.factory import make_pre_post_processors
+from lerobot.datasets import LeRobotDataset
+from lerobot.policies import make_pre_post_processors
from lerobot.policies.your_policy import YourPolicy
-from lerobot.processor.pipeline import RobotProcessorPipeline, PolicyProcessorPipeline
+from lerobot.processor import RobotProcessorPipeline, PolicyProcessorPipeline
dataset = LeRobotDataset("hf_user/dataset", episodes=[0])
sample = dataset[10]
@@ -260,7 +260,7 @@ Since processor pipelines can add new features (like velocity fields), change te
These functions work together by starting with robot hardware specifications (`create_initial_features()`) then simulating the entire pipeline transformation (`aggregate_pipeline_dataset_features()`) to compute the final feature dictionary that gets passed to `LeRobotDataset.create()`, ensuring perfect alignment between what processors output and what datasets expect to store.
```python
-from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features
+from lerobot.datasets import aggregate_pipeline_dataset_features
# Start with robot's raw features
initial_features = create_initial_features(
diff --git a/docs/source/lerobot-dataset-v3.mdx b/docs/source/lerobot-dataset-v3.mdx
index aadc6f4ab..8ab4a5d40 100644
--- a/docs/source/lerobot-dataset-v3.mdx
+++ b/docs/source/lerobot-dataset-v3.mdx
@@ -89,7 +89,7 @@ A core v3 principle is **decoupling storage from the user API**: data is stored
```python
import torch
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets import LeRobotDataset
repo_id = "yaak-ai/L2D-v3"
@@ -135,7 +135,7 @@ for batch in data_loader:
Use `StreamingLeRobotDataset` to iterate directly from the Hub without local copies. This allows to stream large datasets without the need to downloading them onto disk or loading them onto memory, and is a key feature of the new dataset format.
```python
-from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset
+from lerobot.datasets import StreamingLeRobotDataset
repo_id = "yaak-ai/L2D-v3"
dataset = StreamingLeRobotDataset(repo_id) # streams directly from the Hub
@@ -167,7 +167,7 @@ Currently, transforms are applied during **training time only**, not during reco
Use the `image_transforms` parameter when loading a dataset for training:
```python
-from lerobot.datasets.lerobot_dataset import LeRobotDataset
+from lerobot.datasets import LeRobotDataset
from lerobot.transforms import ImageTransforms, ImageTransformsConfig, ImageTransformConfig
# Option 1: Use default transform configuration (disabled by default)
@@ -290,7 +290,7 @@ python -m lerobot.datasets.v30.convert_dataset_v21_to_v30 --repo-id=