mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-11 14:49:43 +00:00
Compare commits
205 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 4e671ef080 | |||
| cf9796b2f7 | |||
| 88116b11e1 | |||
| cf0c3f0a9a | |||
| ee48a80e4d | |||
| cb0fb8ad15 | |||
| f79fdf7205 | |||
| a305f5f46a | |||
| 45348d7b69 | |||
| d4c1c123c6 | |||
| da861139a3 | |||
| 4f51f7153c | |||
| 9027c7866f | |||
| c2bf226082 | |||
| f84c20d403 | |||
| 4c4462edea | |||
| 0b710932e2 | |||
| 9a19f8f6f4 | |||
| 3504d17fef | |||
| d35ed3fd83 | |||
| ce5b27d255 | |||
| 9dcb407ba7 | |||
| 5eb5bf7164 | |||
| 65fb5d3b1a | |||
| d6a24e2882 | |||
| d51bbe9492 | |||
| d8c875e069 | |||
| eff5b90542 | |||
| a1a3fa435d | |||
| 79c3466f0f | |||
| e1d433cbfc | |||
| 16e82fd29f | |||
| ae57fe2d33 | |||
| e3306951c0 | |||
| 10e36f2453 | |||
| 9204a8bccd | |||
| 43eedf62e4 | |||
| c51d40ad56 | |||
| 5c1d930a34 | |||
| 8d20ca1625 | |||
| e4df9ccb63 | |||
| 086815edb7 | |||
| c9243c29b0 | |||
| e7617076ca | |||
| 221e5862ea | |||
| 1e1b010257 | |||
| def71cc439 | |||
| 4557655ab1 | |||
| 28298fbe78 | |||
| f84affec23 | |||
| dad0babbf5 | |||
| fc5cd05fb0 | |||
| d01b060d24 | |||
| 7da15ba069 | |||
| b0a5b88c21 | |||
| 42fbcc89c5 | |||
| 9767120eb4 | |||
| 852713dc84 | |||
| 1f38712c95 | |||
| 0ffc5b4741 | |||
| a1b1643ff6 | |||
| 7739fe12e4 | |||
| be9bdc242f | |||
| 195cc79c49 | |||
| f8d42cc038 | |||
| 1797dea3d5 | |||
| 825c0666a9 | |||
| 47bc670ad2 | |||
| aa505d4192 | |||
| e380653c62 | |||
| bf5c037959 | |||
| 1234e71cfb | |||
| b1ff7132c1 | |||
| b357a8c4d8 | |||
| 0be53ef3e1 | |||
| aed90c8042 | |||
| 0b5da92a58 | |||
| 599218fe9a | |||
| 2507341a32 | |||
| bde397e891 | |||
| 76e260c401 | |||
| 5179515d81 | |||
| 8ad00d1ee7 | |||
| 7440d772ff | |||
| a4fc02a636 | |||
| f5c39d6292 | |||
| 3f616f0ebe | |||
| 9698e74e88 | |||
| 04d55e4670 | |||
| 7dce022a05 | |||
| cc05067a76 | |||
| bead25a58a | |||
| c877e98658 | |||
| a4c88d6340 | |||
| 34ca077d78 | |||
| 2a901f8134 | |||
| 450be9d7d1 | |||
| 681be962ae | |||
| b16e18f978 | |||
| 652e3cb859 | |||
| 2a5c757d58 | |||
| 6d4e983197 | |||
| ecda7482c7 | |||
| 7124d471c1 | |||
| a14af62ee3 | |||
| ac80f1f081 | |||
| feb3fed5e8 | |||
| 8d5f519fcb | |||
| b9d3c34ae4 | |||
| 5f759b1637 | |||
| 6a75b4761a | |||
| e5ade5565d | |||
| 0524551f52 | |||
| 862bc7ef85 | |||
| d38792d6e5 | |||
| db3cf0158c | |||
| 0535f2a59a | |||
| 2805ae347c | |||
| 28ef6fcd14 | |||
| 7fc7ec75bb | |||
| 87890cbf38 | |||
| 5326ffe77e | |||
| a1734cf575 | |||
| 82f300e880 | |||
| 3e7c9d7afc | |||
| e9cb779eab | |||
| 8ff95be04c | |||
| f02ce69df0 | |||
| 1feb7b5d88 | |||
| fbe9009db2 | |||
| c0013b130b | |||
| c4763f61a1 | |||
| b95c219d96 | |||
| 9b1138171e | |||
| 023b8f3466 | |||
| 1cad87ebd2 | |||
| 99de7567e6 | |||
| 21baa8fa02 | |||
| 8b4a5368b3 | |||
| f5c6b03b61 | |||
| e7be2fd113 | |||
| b632490b4b | |||
| 9a9c7208d2 | |||
| 427b97d198 | |||
| 2c2bb1e8bf | |||
| 4b24f94225 | |||
| 670a278cbc | |||
| fc74001202 | |||
| f14ac5d486 | |||
| 7bd0d62ce5 | |||
| 7eccefe235 | |||
| b72274066e | |||
| 20f2910b63 | |||
| fd4ae3466b | |||
| 7beb040e8e | |||
| 05bd18f453 | |||
| 8077456c00 | |||
| 5595887fd0 | |||
| 41959389b6 | |||
| 2c4e888c7f | |||
| 5ced72e6b8 | |||
| 907023f9f7 | |||
| 4ba23ea029 | |||
| 409ac0baca | |||
| 699363f9fc | |||
| ae7a54de57 | |||
| fb9139b882 | |||
| 9fe3a3fb17 | |||
| 26cb9a24c3 | |||
| 77106697c3 | |||
| 75bc44c166 | |||
| f2b79656eb | |||
| 14c2ece004 | |||
| 35612c61e1 | |||
| f7bb3e2d90 | |||
| 1e0d667a22 | |||
| 33969a0337 | |||
| fa26290e8c | |||
| e9f7f5127b | |||
| 097842c70f | |||
| 3b8a3a32a0 | |||
| 1c56779dd9 | |||
| 83a4338f8b | |||
| 730c7b2f35 | |||
| 116059a43e | |||
| b08149a113 | |||
| c227107f60 | |||
| 01dc289f3d | |||
| 6830ca7645 | |||
| ed42c71fc3 | |||
| e0139065bd | |||
| e509f255af | |||
| e2fcd140b0 | |||
| 2a7a0e6129 | |||
| 9f33791b19 | |||
| 453e0a995f | |||
| 8ebf79c494 | |||
| 8774aec304 | |||
| ac742c9f0d | |||
| cd13f1ecfd | |||
| 9aa632968f | |||
| 62caaf07b0 | |||
| 3355f04ca6 | |||
| 769f531603 | |||
| f6c7287ae7 |
@@ -30,7 +30,7 @@ pytest -sx tests/test_stuff.py::test_something
|
||||
```
|
||||
|
||||
```bash
|
||||
lerobot-train --some.option=true
|
||||
python -m lerobot.scripts.train --some.option=true
|
||||
```
|
||||
|
||||
## SECTION TO REMOVE BEFORE SUBMITTING YOUR PR
|
||||
|
||||
@@ -29,8 +29,8 @@ on:
|
||||
env:
|
||||
UV_VERSION: "0.8.0"
|
||||
PYTHON_VERSION: "3.10"
|
||||
DOCKER_IMAGE_NAME_CPU: huggingface/lerobot-cpu:latest
|
||||
DOCKER_IMAGE_NAME_GPU: huggingface/lerobot-gpu:latest
|
||||
DOCKER_IMAGE_NAME_CPU: huggingface/lerobot-gpu:latest
|
||||
DOCKER_IMAGE_NAME_GPU: huggingface/lerobot-cpu:latest
|
||||
|
||||
# Ensures that only the latest commit is built, canceling older runs.
|
||||
concurrency:
|
||||
|
||||
@@ -44,7 +44,7 @@ test-end-to-end:
|
||||
${MAKE} DEVICE=$(DEVICE) test-smolvla-ete-eval
|
||||
|
||||
test-act-ete-train:
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=act \
|
||||
--policy.dim_model=64 \
|
||||
--policy.n_action_steps=20 \
|
||||
@@ -68,12 +68,12 @@ test-act-ete-train:
|
||||
--output_dir=tests/outputs/act/
|
||||
|
||||
test-act-ete-train-resume:
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--config_path=tests/outputs/act/checkpoints/000002/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
|
||||
test-act-ete-eval:
|
||||
lerobot-eval \
|
||||
python -m lerobot.scripts.eval \
|
||||
--policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=aloha \
|
||||
@@ -82,7 +82,7 @@ test-act-ete-eval:
|
||||
--eval.batch_size=1
|
||||
|
||||
test-diffusion-ete-train:
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=diffusion \
|
||||
--policy.down_dims='[64,128,256]' \
|
||||
--policy.diffusion_step_embed_dim=32 \
|
||||
@@ -106,7 +106,7 @@ test-diffusion-ete-train:
|
||||
--output_dir=tests/outputs/diffusion/
|
||||
|
||||
test-diffusion-ete-eval:
|
||||
lerobot-eval \
|
||||
python -m lerobot.scripts.eval \
|
||||
--policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=pusht \
|
||||
@@ -115,7 +115,7 @@ test-diffusion-ete-eval:
|
||||
--eval.batch_size=1
|
||||
|
||||
test-tdmpc-ete-train:
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=tdmpc \
|
||||
--policy.device=$(DEVICE) \
|
||||
--policy.push_to_hub=false \
|
||||
@@ -137,7 +137,7 @@ test-tdmpc-ete-train:
|
||||
--output_dir=tests/outputs/tdmpc/
|
||||
|
||||
test-tdmpc-ete-eval:
|
||||
lerobot-eval \
|
||||
python -m lerobot.scripts.eval \
|
||||
--policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=xarm \
|
||||
@@ -148,7 +148,7 @@ test-tdmpc-ete-eval:
|
||||
|
||||
|
||||
test-smolvla-ete-train:
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=smolvla \
|
||||
--policy.n_action_steps=20 \
|
||||
--policy.chunk_size=20 \
|
||||
@@ -171,7 +171,7 @@ test-smolvla-ete-train:
|
||||
--output_dir=tests/outputs/smolvla/
|
||||
|
||||
test-smolvla-ete-eval:
|
||||
lerobot-eval \
|
||||
python -m lerobot.scripts.eval \
|
||||
--policy.path=tests/outputs/smolvla/checkpoints/000004/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=aloha \
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml?query=branch%3Amain)
|
||||
[](https://github.com/huggingface/lerobot/actions/workflows/nighty.yml?query=branch%3Amain)
|
||||
[](https://www.python.org/downloads/)
|
||||
[](https://github.com/huggingface/lerobot/blob/main/LICENSE)
|
||||
[](https://pypi.org/project/lerobot/)
|
||||
@@ -276,7 +276,7 @@ Check out [example 2](https://github.com/huggingface/lerobot/blob/main/examples/
|
||||
We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht):
|
||||
|
||||
```bash
|
||||
lerobot-eval \
|
||||
python -m lerobot.scripts.eval \
|
||||
--policy.path=lerobot/diffusion_pusht \
|
||||
--env.type=pusht \
|
||||
--eval.batch_size=10 \
|
||||
@@ -288,10 +288,10 @@ lerobot-eval \
|
||||
Note: After training your own policy, you can re-evaluate the checkpoints with:
|
||||
|
||||
```bash
|
||||
lerobot-eval --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model
|
||||
python -m lerobot.scripts.eval --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
See `lerobot-eval --help` for more instructions.
|
||||
See `python -m lerobot.scripts.eval --help` for more instructions.
|
||||
|
||||
### Train your own policy
|
||||
|
||||
@@ -303,7 +303,7 @@ A link to the wandb logs for the run will also show up in yellow in your termina
|
||||
|
||||
\<img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/wandb.png" alt="WandB logs example"\>
|
||||
|
||||
Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `lerobot-eval --help` for more instructions.
|
||||
Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python -m lerobot.scripts.eval --help` for more instructions.
|
||||
|
||||
#### Reproduce state-of-the-art (SOTA)
|
||||
|
||||
@@ -311,7 +311,7 @@ We provide some pretrained policies on our [hub page](https://huggingface.co/ler
|
||||
You can reproduce their training by loading the config from their run. Simply running:
|
||||
|
||||
```bash
|
||||
lerobot-train --config_path=lerobot/diffusion_pusht
|
||||
python -m lerobot.scripts.train --config_path=lerobot/diffusion_pusht
|
||||
```
|
||||
|
||||
reproduces SOTA results for Diffusion Policy on the PushT task.
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: NO
|
||||
downcast_bf16: 'no'
|
||||
enable_cpu_affinity: false
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
num_machines: 1
|
||||
num_processes: 1
|
||||
use_cpu: false
|
||||
@@ -1,18 +0,0 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: MULTI_GPU
|
||||
downcast_bf16: 'no'
|
||||
enable_cpu_affinity: false
|
||||
gpu_ids: all
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
num_machines: 1
|
||||
num_processes: 2
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
dynamo_backend: "no"
|
||||
@@ -29,7 +29,7 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
|
||||
# Install system dependencies and uv (as root)
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential git curl libglib2.0-0 libegl1-mesa-dev ffmpeg \
|
||||
build-essential git curl libglib2.0-0 libegl1-mesa ffmpeg \
|
||||
libusb-1.0-0-dev speech-dispatcher libgeos-dev portaudio19-dev \
|
||||
&& curl -LsSf https://astral.sh/uv/install.sh | sh \
|
||||
&& mv /root/.local/bin/uv /usr/local/bin/uv \
|
||||
|
||||
@@ -24,9 +24,16 @@
|
||||
- local: smolvla
|
||||
title: Finetune SmolVLA
|
||||
title: "Policies"
|
||||
|
||||
- sections:
|
||||
- local: introduction_processors
|
||||
title: Introduction to Robot Processors
|
||||
- local: implement_your_own_processor
|
||||
title: Implement your own processor
|
||||
- local: processors_robots_teleop
|
||||
title: Processors for Robots and Teleoperators
|
||||
title: "Robot Processors"
|
||||
- sections:
|
||||
- local: hope_jr
|
||||
title: Hope Jr
|
||||
- local: so101
|
||||
title: SO-101
|
||||
- local: so100
|
||||
@@ -35,12 +42,16 @@
|
||||
title: Koch v1.1
|
||||
- local: lekiwi
|
||||
title: LeKiwi
|
||||
- local: hope_jr
|
||||
title: Hope Jr
|
||||
title: "Robots"
|
||||
- sections:
|
||||
- local: phone_teleop
|
||||
title: Phone
|
||||
title: "Teleoperators"
|
||||
- sections:
|
||||
- local: notebooks
|
||||
title: Notebooks
|
||||
- local: feetech
|
||||
title: Updating Feetech Firmware
|
||||
title: "Resources"
|
||||
- sections:
|
||||
- local: contributing
|
||||
|
||||
@@ -9,7 +9,7 @@ To instantiate a camera, you need a camera identifier. This identifier might cha
|
||||
To find the camera indices of the cameras plugged into your system, run the following script:
|
||||
|
||||
```bash
|
||||
lerobot-find-cameras opencv # or realsense for Intel Realsense cameras
|
||||
python -m lerobot.find_cameras opencv # or realsense for Intel Realsense cameras
|
||||
```
|
||||
|
||||
The output will look something like this if you have two cameras connected:
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
# Feetech Motor Firmware Update
|
||||
|
||||
This tutorial guides you through updating the firmware of Feetech motors using the official Feetech software.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Windows computer (Feetech software is only available for Windows)
|
||||
- Feetech motor control board
|
||||
- USB cable to connect the control board to your computer
|
||||
- Feetech motors connected to the control board
|
||||
|
||||
## Step 1: Download Feetech Software
|
||||
|
||||
1. Visit the official Feetech software download page: [https://www.feetechrc.com/software.html](https://www.feetechrc.com/software.html)
|
||||
2. Download the latest version of the Feetech debugging software (FD)
|
||||
3. Install the software on your Windows computer
|
||||
|
||||
## Step 2: Hardware Setup
|
||||
|
||||
1. Connect your Feetech motors to the motor control board
|
||||
2. Connect the motor control board to your Windows computer via USB cable
|
||||
3. Ensure power is supplied to the motors
|
||||
|
||||
## Step 3: Configure Connection
|
||||
|
||||
1. Launch the Feetech debugging software
|
||||
2. Select the correct COM port from the port dropdown menu
|
||||
- If unsure which port to use, check Windows Device Manager under "Ports (COM & LPT)"
|
||||
3. Set the appropriate baud rate (typically 1000000 for most Feetech motors)
|
||||
4. Click "Open" to establish communication with the control board
|
||||
|
||||
## Step 4: Scan for Motors
|
||||
|
||||
1. Once connected, click the "Search" button to detect all connected motors
|
||||
2. The software will automatically discover and list all motors on the bus
|
||||
3. Each motor will appear with its ID number
|
||||
|
||||
## Step 5: Update Firmware
|
||||
|
||||
For each motor you want to update:
|
||||
|
||||
1. **Select the motor** from the list by clicking on it
|
||||
2. **Click on Upgrade tab**:
|
||||
3. **Click on Online button**:
|
||||
- If an potential firmware update is found, it will be displayed in the box
|
||||
4. **Click on Upgrade button**:
|
||||
- The update progress will be displayed
|
||||
|
||||
## Step 6: Verify Update
|
||||
|
||||
1. After the update completes, the software should automatically refresh the motor information
|
||||
2. Verify that the firmware version has been updated to the expected version
|
||||
|
||||
## Important Notes
|
||||
|
||||
⚠️ **Warning**: Do not disconnect power or USB during firmware updates, it will potentially brick the motor.
|
||||
|
||||
## Bonus: Motor Debugging on Linux/macOS
|
||||
|
||||
For debugging purposes only, you can use the open-source Feetech Debug Tool:
|
||||
|
||||
- **Repository**: [FT_SCServo_Debug_Qt](https://github.com/CarolinePascal/FT_SCServo_Debug_Qt/tree/fix/port-search-timer)
|
||||
|
||||
### Installation Instructions
|
||||
|
||||
Follow the instructions in the repository to install the tool, for Ubuntu you can directly install it, for MacOS you need to build it from source.
|
||||
|
||||
**Limitations:**
|
||||
|
||||
- This tool is for debugging and parameter adjustment only
|
||||
- Firmware updates must still be done on Windows with official Feetech software
|
||||
@@ -412,7 +412,7 @@ Example configuration for training the [reward classifier](https://huggingface.c
|
||||
To train the classifier, use the `train.py` script with your configuration:
|
||||
|
||||
```bash
|
||||
lerobot-train --config_path path/to/reward_classifier_train_config.json
|
||||
python -m lerobot.scripts.train --config_path path/to/reward_classifier_train_config.json
|
||||
```
|
||||
|
||||
**Deploying and Testing the Model**
|
||||
@@ -458,7 +458,7 @@ The reward classifier will automatically provide rewards based on the visual inp
|
||||
3. **Train the classifier**:
|
||||
|
||||
```bash
|
||||
lerobot-train --config_path src/lerobot/configs/reward_classifier_train_config.json
|
||||
python -m lerobot.scripts.train --config_path src/lerobot/configs/reward_classifier_train_config.json
|
||||
```
|
||||
|
||||
4. **Test the classifier**:
|
||||
|
||||
+11
-11
@@ -19,7 +19,7 @@ pip install -e ".[hopejr]"
|
||||
Before starting calibration and operation, you need to identify the USB ports for each HopeJR component. Run this script to find the USB ports for the arm, hand, glove, and exoskeleton:
|
||||
|
||||
```bash
|
||||
lerobot-find-port
|
||||
python -m lerobot.find_port
|
||||
```
|
||||
|
||||
This will display the available USB ports and their associated devices. Make note of the port paths (e.g., `/dev/tty.usbmodem58760433331`, `/dev/tty.usbmodem11301`) as you'll need to specify them in the `--robot.port` and `--teleop.port` parameters when recording data, replaying episodes, or running teleoperation scripts.
|
||||
@@ -31,7 +31,7 @@ Before performing teleoperation, HopeJR's limbs need to be calibrated. Calibrati
|
||||
### 1.1 Calibrate Robot Hand
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--robot.type=hope_jr_hand \
|
||||
--robot.port=/dev/tty.usbmodem58760432281 \
|
||||
--robot.id=blue \
|
||||
@@ -81,7 +81,7 @@ Once you have set the appropriate boundaries for all joints, click "Save" to sav
|
||||
### 1.2 Calibrate Teleoperator Glove
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--teleop.type=homunculus_glove \
|
||||
--teleop.port=/dev/tty.usbmodem11201 \
|
||||
--teleop.id=red \
|
||||
@@ -120,7 +120,7 @@ Once calibration is complete, the system will save the calibration to `/Users/yo
|
||||
### 1.3 Calibrate Robot Arm
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--robot.type=hope_jr_arm \
|
||||
--robot.port=/dev/tty.usbserial-1110 \
|
||||
--robot.id=white
|
||||
@@ -146,7 +146,7 @@ Use the calibration interface to set the range boundaries for each joint. Move e
|
||||
### 1.4 Calibrate Teleoperator Exoskeleton
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--teleop.type=homunculus_arm \
|
||||
--teleop.port=/dev/tty.usbmodem11201 \
|
||||
--teleop.id=black
|
||||
@@ -178,7 +178,7 @@ Due to global variable conflicts in the Feetech middleware, teleoperation for ar
|
||||
### Hand
|
||||
|
||||
```bash
|
||||
lerobot-teleoperate \
|
||||
python -m lerobot.teleoperate \
|
||||
--robot.type=hope_jr_hand \
|
||||
--robot.port=/dev/tty.usbmodem58760432281 \
|
||||
--robot.id=blue \
|
||||
@@ -194,7 +194,7 @@ lerobot-teleoperate \
|
||||
### Arm
|
||||
|
||||
```bash
|
||||
lerobot-teleoperate \
|
||||
python -m lerobot.teleoperate \
|
||||
--robot.type=hope_jr_arm \
|
||||
--robot.port=/dev/tty.usbserial-1110 \
|
||||
--robot.id=white \
|
||||
@@ -214,7 +214,7 @@ Record, Replay and Train with Hope-JR is still experimental.
|
||||
This step records the dataset, which can be seen as an example [here](https://huggingface.co/datasets/nepyope/hand_record_test_with_video_data/settings).
|
||||
|
||||
```bash
|
||||
lerobot-record \
|
||||
python -m lerobot.record \
|
||||
--robot.type=hope_jr_hand \
|
||||
--robot.port=/dev/tty.usbmodem58760432281 \
|
||||
--robot.id=right \
|
||||
@@ -236,7 +236,7 @@ lerobot-record \
|
||||
### Replay
|
||||
|
||||
```bash
|
||||
lerobot-replay \
|
||||
python -m lerobot.replay \
|
||||
--robot.type=hope_jr_hand \
|
||||
--robot.port=/dev/tty.usbmodem58760432281 \
|
||||
--robot.id=right \
|
||||
@@ -248,7 +248,7 @@ lerobot-replay \
|
||||
### Train
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--dataset.repo_id=nepyope/hand_record_test_with_video_data \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/hopejr_hand \
|
||||
@@ -263,7 +263,7 @@ lerobot-train \
|
||||
This training run can be viewed as an example [here](https://wandb.ai/tino/lerobot/runs/rp0k8zvw?nw=nwusertino).
|
||||
|
||||
```bash
|
||||
lerobot-record \
|
||||
python -m lerobot.record \
|
||||
--robot.type=hope_jr_hand \
|
||||
--robot.port=/dev/tty.usbmodem58760432281 \
|
||||
--robot.id=right \
|
||||
|
||||
+21
-10
@@ -45,7 +45,7 @@ Note that the `id` associated with a robot is used to store the calibration file
|
||||
<hfoptions id="teleoperate_so101">
|
||||
<hfoption id="Command">
|
||||
```bash
|
||||
lerobot-teleoperate \
|
||||
python -m lerobot.teleoperate \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=my_awesome_follower_arm \
|
||||
@@ -101,7 +101,7 @@ With `rerun`, you can teleoperate again while simultaneously visualizing the cam
|
||||
<hfoptions id="teleoperate_koch_camera">
|
||||
<hfoption id="Command">
|
||||
```bash
|
||||
lerobot-teleoperate \
|
||||
python -m lerobot.teleoperate \
|
||||
--robot.type=koch_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=my_awesome_follower_arm \
|
||||
@@ -174,7 +174,7 @@ Now you can record a dataset. To record 5 episodes and upload your dataset to th
|
||||
<hfoptions id="record">
|
||||
<hfoption id="Command">
|
||||
```bash
|
||||
lerobot-record \
|
||||
python -m lerobot.record \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem585A0076841 \
|
||||
--robot.id=my_awesome_follower_arm \
|
||||
@@ -376,7 +376,7 @@ You can replay the first episode on your robot with either the command below or
|
||||
<hfoptions id="replay">
|
||||
<hfoption id="Command">
|
||||
```bash
|
||||
lerobot-replay \
|
||||
python -m lerobot.replay \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=my_awesome_follower_arm \
|
||||
@@ -428,10 +428,10 @@ Your robot should replicate movements similar to those you recorded. For example
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`lerobot-train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--dataset.repo_id=${HF_USER}/so101_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_so101_test \
|
||||
@@ -453,7 +453,7 @@ Training should take several hours. You will find checkpoints in `outputs/train/
|
||||
To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
```
|
||||
@@ -490,7 +490,7 @@ You can use the `record` script from [`lerobot/record.py`](https://github.com/hu
|
||||
<hfoptions id="eval">
|
||||
<hfoption id="Command">
|
||||
```bash
|
||||
lerobot-record \
|
||||
python -m lerobot.record \
|
||||
--robot.type=so100_follower \
|
||||
--robot.port=/dev/ttyACM1 \
|
||||
--robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \
|
||||
@@ -519,11 +519,14 @@ from lerobot.utils.control_utils import init_keyboard_listener
|
||||
from lerobot.utils.utils import log_say
|
||||
from lerobot.utils.visualization_utils import _init_rerun
|
||||
from lerobot.record import record_loop
|
||||
from lerobot.policies.factory import make_processor
|
||||
|
||||
NUM_EPISODES = 5
|
||||
FPS = 30
|
||||
EPISODE_TIME_SEC = 60
|
||||
TASK_DESCRIPTION = "My task description"
|
||||
HF_MODEL_ID = "<hf_username>/<model_repo_id>"
|
||||
HF_DATASET_ID = "<hf_username>/<eval_dataset_repo_id>"
|
||||
|
||||
# Create the robot configuration
|
||||
camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
|
||||
@@ -535,7 +538,7 @@ robot_config = SO100FollowerConfig(
|
||||
robot = SO100Follower(robot_config)
|
||||
|
||||
# Initialize the policy
|
||||
policy = ACTPolicy.from_pretrained("<hf_username>/<my_policy_repo_id>")
|
||||
policy = ACTPolicy.from_pretrained(HF_MODEL_ID)
|
||||
|
||||
# Configure the dataset features
|
||||
action_features = hw_to_dataset_features(robot.action_features, "action")
|
||||
@@ -544,7 +547,7 @@ dataset_features = {**action_features, **obs_features}
|
||||
|
||||
# Create the dataset
|
||||
dataset = LeRobotDataset.create(
|
||||
repo_id="<hf_username>/eval_<dataset_repo_id>",
|
||||
repo_id=HF_DATASET_ID,
|
||||
fps=FPS,
|
||||
features=dataset_features,
|
||||
robot_type=robot.name,
|
||||
@@ -559,6 +562,12 @@ _init_rerun(session_name="recording")
|
||||
# Connect the robot
|
||||
robot.connect()
|
||||
|
||||
preprocessor, postprocessor = make_processor(
|
||||
policy_cfg=policy,
|
||||
pretrained_path=HF_MODEL_ID,
|
||||
dataset_stats=dataset.meta.stats,
|
||||
)
|
||||
|
||||
for episode_idx in range(NUM_EPISODES):
|
||||
log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}")
|
||||
|
||||
@@ -568,6 +577,8 @@ for episode_idx in range(NUM_EPISODES):
|
||||
events=events,
|
||||
fps=FPS,
|
||||
policy=policy,
|
||||
preprocessor=preprocessor,
|
||||
postprocessor=postprocessor,
|
||||
dataset=dataset,
|
||||
control_time_s=EPISODE_TIME_SEC,
|
||||
single_task=TASK_DESCRIPTION,
|
||||
|
||||
@@ -96,10 +96,10 @@ If you uploaded your dataset to the hub you can [visualize your dataset online](
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`lerobot-train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--dataset.repo_id=${HF_USER}/il_gym \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/il_sim_test \
|
||||
|
||||
@@ -0,0 +1,323 @@
|
||||
# Implement your own Robot Processor
|
||||
|
||||
In this tutorial, you'll learn how to implement your own Robot Processor.
|
||||
It begins by exploring the need for a custom processor, then uses the Normalization processors as the running example to explain how to implement, configure, and serialize a processor. Finally, it lists all helper processors that ship with LeRobot.
|
||||
|
||||
## Why would you need a custom processor?
|
||||
|
||||
In most cases, when reading raw data from a sensor like the camera and robot motor encoders,
|
||||
you will need to process this data to transform it into a format that is compatible to use with the policies in LeRobot.
|
||||
For example, raw images are encoded with `uint8` and the values are in the range `[0, 255]`.
|
||||
To use these images with the policies, you will need to cast them to `float32` and normalize them to the range `[0, 1]`.
|
||||
|
||||
For example, in LeRobot's `VanillaObservationProcessor`, raw images come from the environment as numpy arrays with `uint8` values in range `[0, 255]` and in channel-last format `(H, W, C)`. The processor transforms them into PyTorch tensors with `float32` values in range `[0, 1]` and channel-first format `(C, H, W)`:
|
||||
|
||||
```python
|
||||
# Input: numpy array with shape (480, 640, 3) and dtype uint8
|
||||
raw_image = env_observation["pixels"] # Values in [0, 255]
|
||||
|
||||
# After processing: torch tensor with shape (1, 3, 480, 640) and dtype float32
|
||||
processed_image = processor(transition)["observation"]["observation.image"] # Values in [0, 1]
|
||||
```
|
||||
|
||||
On the other hand, when a model returns a certain action to be executed on the robot, it is often that one has to post-process this action to make it compatible to run on the robot.
|
||||
For example, the model might return joint positions values that range from `[-1, 1]` and one would need to scale them to the ranges of the minimum and maximum joint angle positions of the robot.
|
||||
|
||||
In LeRobot, this normalization workflow is handled by the `NormalizerProcessor` (for inputs) and the `UnnormalizerProcessor` (for outputs). These processors are heavily used by policies (e.g., Pi0, SmolVLA) and integrate tightly with the `RobotProcessor`'s `get_config`, `state_dict`, and `load_state_dict` APIs.
|
||||
|
||||
For instance, `UnnormalizerProcessor` converts model outputs in `[-1, 1]` back to actual robot joint ranges:
|
||||
|
||||
```python
|
||||
# Input: model action with normalized values in [-1, 1]
|
||||
normalized_action = torch.tensor([-0.5, 0.8, -1.0, 0.2]) # Model output
|
||||
|
||||
# After post-processing: real joint positions in robot's native ranges
|
||||
# Example: joints range from [-180.0, 180.0]
|
||||
real_action = unnormalizer(transition)["action"]
|
||||
# real action after post-processing: [ -90., 144., -180., 36.]
|
||||
```
|
||||
|
||||
The unnormalizer uses the dataset statistics to convert back:
|
||||
|
||||
```python
|
||||
# For MIN_MAX normalization: action = (normalized + 1) * (max - min) / 2 + min
|
||||
real_action = (normalized_action + 1) * (max_val - min_val) / 2 + min_val
|
||||
```
|
||||
|
||||
All these situations point us towards the need for a mechanism to preprocess the data before being passed to the policies and then post-process the action that are returned to be executed on the robot.
|
||||
|
||||
To that end, LeRobot provides a pipeline mechanism to implement a sequence of processing steps for the input data and the output action.
|
||||
|
||||
## How to implement your own processor?
|
||||
|
||||
We'll use the `NormalizerProcessor` as a concrete running example because it is central to most policies and demonstrates configuration and state serialization cleanly.
|
||||
|
||||
Prepare the sequence of processing steps necessary for your problem. A processor step is a class that implements the following methods:
|
||||
|
||||
- `__call__`: implements the processing step for the input transition.
|
||||
- `get_config`: gets the configuration of the processor step.
|
||||
- `state_dict`: gets the state of the processor step.
|
||||
- `load_state_dict`: loads the state of the processor step.
|
||||
- `reset`: resets the state of the processor step.
|
||||
- `feature_contract`: displays the modification to the feature space during the processor step.
|
||||
|
||||
### Implement the `__call__` method
|
||||
|
||||
The `__call__` method is the core of your processor step. It takes an `EnvTransition` and returns a modified `EnvTransition`. Here's how the `NormalizerProcessor` conceptually works (simplified):
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
import torch
|
||||
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
||||
from lerobot.processor.pipeline import EnvTransition, TransitionKey
|
||||
|
||||
@dataclass
|
||||
class NormalizerProcessor:
|
||||
features: dict[str, PolicyFeature]
|
||||
norm_map: dict[FeatureType, NormalizationMode]
|
||||
stats: dict[str, dict[str, torch.Tensor]]
|
||||
eps: float = 1e-8
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
normalized_info = {}
|
||||
|
||||
obs = transition.get(TransitionKey.OBSERVATION)
|
||||
act = transition.get(TransitionKey.ACTION)
|
||||
|
||||
new_obs = self._normalize_observation(obs, normalized_info)
|
||||
new_act = self._normalize_action(act, normalized_info)
|
||||
|
||||
new_transition = transition.copy()
|
||||
new_transition[TransitionKey.OBSERVATION] = new_obs
|
||||
new_transition[TransitionKey.ACTION] = new_act
|
||||
|
||||
# Record what was normalized into complementary_data
|
||||
if normalized_info:
|
||||
comp = new_transition.get(TransitionKey.COMPLEMENTARY_DATA) or {}
|
||||
comp = dict(comp)
|
||||
comp["normalized_keys"] = normalized_info
|
||||
new_transition[TransitionKey.COMPLEMENTARY_DATA] = comp
|
||||
|
||||
return new_transition
|
||||
```
|
||||
|
||||
See the full implementation in `src/lerobot/processor/normalize_processor.py` for details on mean/std and min/max modes and key selection.
|
||||
|
||||
**Key principles:**
|
||||
|
||||
- Always check if required data exists before processing
|
||||
- Return unchanged transition if no processing is needed
|
||||
- Use `transition.copy()` to avoid side effects
|
||||
- Only modify the specific keys your processor handles
|
||||
|
||||
**Tip**: For observation-only processors, you can inherit from `ObservationProcessor` to avoid writing `__call__` boilerplate. The normalizer is mixed (observations and actions), so it implements `__call__` directly.
|
||||
|
||||
### Configuration and State Management
|
||||
|
||||
Processors support serialization through three methods that separate configuration from tensor state. This is especially important for normalization processors, which carry dataset statistics (tensors) in their state, and hyperparameters in their config:
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
import torch
|
||||
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
||||
|
||||
@dataclass
|
||||
class NormalizerProcessor:
|
||||
features: dict[str, PolicyFeature]
|
||||
norm_map: dict[FeatureType, NormalizationMode]
|
||||
eps: float = 1e-8
|
||||
_tensor_stats: dict[str, dict[str, torch.Tensor]] = field(default_factory=dict, init=False, repr=False)
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
"""JSON-serializable configuration (no tensors)."""
|
||||
return {
|
||||
"eps": self.eps,
|
||||
"features": {k: {"type": v.type.value, "shape": v.shape} for k, v in self.features.items()},
|
||||
"norm_map": {ft.value: nm.value for ft, nm in self.norm_map.items()},
|
||||
}
|
||||
|
||||
def state_dict(self) -> dict[str, torch.Tensor]:
|
||||
"""Tensor state only (e.g., dataset statistics)."""
|
||||
flat: dict[str, torch.Tensor] = {}
|
||||
for key, sub in self._tensor_stats.items():
|
||||
for stat_name, tensor in sub.items():
|
||||
flat[f"{key}.{stat_name}"] = tensor
|
||||
return flat
|
||||
|
||||
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
|
||||
"""Restore tensor state at runtime."""
|
||||
self._tensor_stats.clear()
|
||||
for flat_key, tensor in state.items():
|
||||
key, stat_name = flat_key.rsplit(".", 1)
|
||||
self._tensor_stats.setdefault(key, {})[stat_name] = tensor
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
|
||||
```python
|
||||
# Save (e.g., inside a policy)
|
||||
config = processor.get_config()
|
||||
tensors = processor.state_dict()
|
||||
|
||||
# Restore (e.g., loading a pretrained policy)
|
||||
new_processor = NormalizerProcessor(**config)
|
||||
new_processor.load_state_dict(tensors)
|
||||
```
|
||||
|
||||
### Transform features
|
||||
|
||||
The `transform_features` method defines how your processor transforms feature names and shapes. This is crucial for policy configuration and debugging.
|
||||
|
||||
Normalization typically preserves the feature keys and shapes, so `NormalizerProcessor.transform_features` returns the input features unchanged. When your processor renames or reshapes, implement this method to reflect the mapping for downstream components. For example, a simple rename processor:
|
||||
|
||||
```python
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
# Simple renaming
|
||||
if "pixels" in features:
|
||||
features["observation.image"] = features.pop("pixels")
|
||||
|
||||
# Pattern-based renaming
|
||||
for key in list(features.keys()):
|
||||
if key.startswith("env_state."):
|
||||
suffix = key[len("env_state."):]
|
||||
features[f"observation.{suffix}"] = features.pop(key)
|
||||
|
||||
return features
|
||||
```
|
||||
|
||||
**Key principles:**
|
||||
|
||||
- Use `features.pop(old_key)` to remove and get the old feature
|
||||
- Use `features[new_key] = old_feature` to add the renamed feature
|
||||
- Always return the modified features dictionary
|
||||
- Document transformations clearly in the docstring
|
||||
|
||||
### Example of usage from the codebase
|
||||
|
||||
`transform_features` is used by `RobotProcessor` to derive the dataset/policy feature contract from an initial feature set by applying each step's transformation. You can see concrete examples in the codebase:
|
||||
|
||||
- Phone teleoperation record pipeline (`examples/phone_so100_record.py`): processors like `ForwardKinematicsJointsToEE`, `GripperVelocityToJoint`, and `EEBoundsAndSafety` implement `transform_features` to declare which action/observation keys should be materialized in the dataset.
|
||||
- SO100 follower kinematics (`src/lerobot/robots/so100_follower/robot_kinematic_processor.py`): each processor's `transform_features` method adds or refines feature keys such as `observation.state.ee.{x,y,z,wx,wy,wz}` or `action.gripper.pos`.
|
||||
- Rename and tokenizer processors (`src/lerobot/processor/rename_processor.py`, `src/lerobot/processor/tokenizer_processor.py`): demonstrate key renaming and adding language token features to the contract.
|
||||
|
||||
In practice, you will often aggregate features by running `RobotProcessor.transform_features(...)` with your initial features to compute the final contract before recording or training.
|
||||
|
||||
## Helper Classes
|
||||
|
||||
LeRobot provides pre-built processor classes for common transformations. Below is a comprehensive list of registered processors in the codebase.
|
||||
|
||||
### Core processors (observations, actions, normalization)
|
||||
|
||||
- **`VanillaObservationProcessor`** (`observation_processor`): Images and state processing to LeRobot format.
|
||||
- **`NormalizerProcessor`** (`normalizer_processor`): Normalize observations/actions (mean/std or min/max to [-1, 1]).
|
||||
- **`UnnormalizerProcessor`** (`unnormalizer_processor`): Inverse of the normalizer for model outputs.
|
||||
- **`DeviceProcessor`** (`device_processor`): Move tensors to a specific device (CPU/GPU) and optional float dtype.
|
||||
- **`ToBatchProcessor`** (`to_batch_processor`): Add batch dimension to observations/actions when missing.
|
||||
- **`RenameProcessor`** (`rename_processor`): Rename observation keys using a mapping dictionary.
|
||||
- **`TokenizerProcessor`** (`tokenizer_processor`): Tokenize language tasks into `observation.language.*` tensors.
|
||||
|
||||
### Teleoperation mapping processors
|
||||
|
||||
- **`MapDeltaActionToRobotAction`** (`map_delta_action_to_robot_action`): Map teleop deltas (e.g., gamepad) to `action.target_*` fields.
|
||||
- **`MapPhoneActionToRobotAction`** (`map_phone_action_to_robot_action`): Map calibrated phone pose/buttons to `action.target_*` and gripper.
|
||||
|
||||
### Robot kinematics processors (SO100 follower example)
|
||||
|
||||
- **`EEReferenceAndDelta`** (`ee_reference_and_delta`): Compute desired EE pose from target deltas and current pose.
|
||||
- **`EEBoundsAndSafety`** (`ee_bounds_and_safety`): Clip EE pose to bounds and check for jumps.
|
||||
- **`InverseKinematicsEEToJoints`** (`inverse_kinematics_ee_to_joints`): Convert EE pose to joint targets via IK.
|
||||
- **`GripperVelocityToJoint`** (`gripper_velocity_to_joint`): Convert gripper velocity input to joint position command.
|
||||
- **`ForwardKinematicsJointsToEE`** (`forward_kinematics_joints_to_ee`): Compute EE pose features from joint positions via FK.
|
||||
- **`AddRobotObservationAsComplimentaryData`** (`add_robot_observation`): Read robot observation and insert `raw_joint_positions` into complementary data.
|
||||
|
||||
### Policy-specific utility processors
|
||||
|
||||
- **`Pi0NewLineProcessor`** (`pi0_new_line_processor`): Ensure text tasks end with a newline (Pi0 tokenizer compatibility).
|
||||
- **`SmolVLANewLineProcessor`** (`smolvla_new_line_processor`): Ensure text tasks end with a newline (SmolVLA tokenizer compatibility).
|
||||
|
||||
### Usage Example
|
||||
|
||||
```python
|
||||
from lerobot.processor import NormalizerProcessor, DeviceProcessor, RobotProcessor, ToBatchProcessor
|
||||
|
||||
# Create a processing pipeline (typical policy preprocessor)
|
||||
steps = [
|
||||
NormalizerProcessor(features=features, norm_map=norm_map, stats=stats),
|
||||
ToBatchProcessor(),
|
||||
DeviceProcessor(device="cuda"),
|
||||
]
|
||||
|
||||
# Use in RobotProcessor
|
||||
processor = RobotProcessor(steps=steps)
|
||||
processed_transition = processor(raw_transition)
|
||||
```
|
||||
|
||||
### Using overrides
|
||||
|
||||
You can override step parameters at load-time using `overrides`. This is handy for non-serializable objects or site-specific settings. It works both in policy factories and with `RobotProcessor.from_pretrained(...)`.
|
||||
|
||||
Example: during policy evaluation on the robot, override the device and rename map.
|
||||
Use this to run a policy trained on CUDA on a CPU-only robot, or to remap camera keys when the robot uses different names than the dataset.
|
||||
|
||||
```437:445:src/lerobot/record.py
|
||||
preprocessor, postprocessor = make_processor(
|
||||
policy_cfg=cfg.policy,
|
||||
pretrained_path=cfg.policy.pretrained_path,
|
||||
dataset_stats=rename_stats(dataset.meta.stats, cfg.dataset.rename_map),
|
||||
preprocessor_overrides={
|
||||
"device_processor": {"device": cfg.policy.device},
|
||||
"rename_processor": {"rename_map": cfg.dataset.rename_map},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
Direct usage with `from_pretrained`:
|
||||
|
||||
```python
|
||||
from lerobot.processor import RobotProcessor
|
||||
|
||||
processor = RobotProcessor.from_pretrained(
|
||||
"username/my-processor",
|
||||
overrides={
|
||||
"device_processor": {"device": "cuda:0"}, # registry name for registered steps
|
||||
"CustomStep": {"param": 42}, # class name for non-registered steps
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Keep processors atomic** - One transformation per processor for reusability and debugging
|
||||
- **Use dataclasses** - Clean initialization with `@dataclass`
|
||||
- **Always register processors** - Use `@ProcessorStepRegistry.register("name")` for discoverability
|
||||
- **Check for None** - Always validate required data exists before processing
|
||||
- **Use copy() for safety** - Avoid side effects with `transition.copy()`
|
||||
- **Separate config and state** - JSON-serializable config vs tensor state_dict
|
||||
- **Use base classes** - Inherit from `ObservationProcessor` for observation-only processing
|
||||
|
||||
```python
|
||||
@ProcessorStepRegistry.register("my_processor")
|
||||
@dataclass
|
||||
class MyProcessor(ObservationProcessor):
|
||||
threshold: float = 0.5
|
||||
|
||||
def observation(self, observation):
|
||||
if observation is None:
|
||||
return observation
|
||||
# Your processing logic here
|
||||
return processed_observation
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
You now have all the tools to implement custom processors in LeRobot! The key steps are:
|
||||
|
||||
1. **Define your processor** as a dataclass with the required methods (`__call__`, `get_config`, `state_dict`, `load_state_dict`, `reset`, `feature_contract`)
|
||||
2. **Register it** using `@ProcessorStepRegistry.register("name")` for discoverability
|
||||
3. **Integrate it** into a `RobotProcessor` pipeline with other processing steps
|
||||
4. **Use base classes** like `ObservationProcessor` when possible to reduce boilerplate
|
||||
|
||||
The processor system is designed to be modular and composable, allowing you to build complex data processing pipelines from simple, focused components. Whether you're preprocessing sensor data for training or post-processing model outputs for robot execution, custom processors give you the flexibility to handle any data transformation your robotics application requires. Policies like Pi0 and SmolVLA use the same normalization processors described above, so your understanding here will transfer directly when wiring policy preprocessors and postprocessors.
|
||||
|
||||
Start simple, test thoroughly, and leverage the existing helper classes to build robust data processing pipelines for your robot learning workflows.
|
||||
@@ -0,0 +1,991 @@
|
||||
# Introduction to Processors
|
||||
|
||||
In robotics, there's a fundamental mismatch between the data that robots and humans produce and what machine learning models expect. This creates several translation challenges:
|
||||
|
||||
**Raw Robot Data → Model Input:**
|
||||
|
||||
- Robots output raw sensor data (camera images, joint positions, force readings) that need normalization, batching, and device placement before models can process them
|
||||
- Language instructions from humans ("pick up the red cube") must be tokenized into numerical representations
|
||||
- Different robots use different coordinate systems and units that need standardization
|
||||
|
||||
**Model Output → Robot Commands:**
|
||||
|
||||
- Models might output end-effector positions, but robots need joint-space commands
|
||||
- Teleoperators (like gamepads) produce relative movements (delta positions), but robots expect absolute commands
|
||||
- Model predictions are often normalized and need to be converted back to real-world scales
|
||||
|
||||
**Cross-Domain Translation:**
|
||||
|
||||
- Training data from one robot setup needs adaptation for deployment on different hardware
|
||||
- Models trained with specific camera configurations must work with new camera arrangements
|
||||
- Datasets with different naming conventions need harmonization
|
||||
|
||||
**That's where processors come in.** They serve as the universal translators that bridge these gaps, ensuring seamless data flow from sensors to models to actuators.
|
||||
|
||||
Processors are the data transformation backbone of LeRobot. They handle all the preprocessing and postprocessing steps needed to convert raw environment data into model-ready inputs and vice versa. This guide will walk you through everything you need to know about processors - from basic concepts to advanced usage patterns.
|
||||
|
||||
## What are Processors?
|
||||
|
||||
In robotics, data comes in many forms - images from cameras, joint positions from sensors, text instructions from users, and more. Each type of data requires specific transformations before a model can use it effectively. Models need this data to be:
|
||||
|
||||
- **Normalized**: Scaled to appropriate ranges for neural network processing
|
||||
- **Batched**: Organized with proper dimensions for batch processing
|
||||
- **Tokenized**: Text converted to numerical representations
|
||||
- **Device-placed**: Moved to the right hardware (CPU/GPU)
|
||||
- **Type-converted**: Cast to appropriate data types
|
||||
|
||||
Processors handle these transformations through composable, reusable steps that can be chained together into pipelines. Think of them as a modular assembly line where each station performs a specific transformation on your data.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### EnvTransition: The Universal Data Container
|
||||
|
||||
The `EnvTransition` is the fundamental data structure that flows through all processors. It's a typed dictionary that represents a complete robot-environment interaction:
|
||||
|
||||
```python
|
||||
from lerobot.processor.pipeline import TransitionKey, EnvTransition
|
||||
|
||||
# Example transition from a robot collecting data
|
||||
transition: EnvTransition = {
|
||||
TransitionKey.OBSERVATION: {
|
||||
"observation.images.camera0": camera0_image_tensor, # Shape: (H, W, C)
|
||||
"observation.images.camera1": camera1_image_tensor, # Shape: (H, W, C)
|
||||
"observation.state": joint_positions_tensor, # Shape: (7,) for 7-DOF arm
|
||||
"observation.environment_state": env_state_tensor # Shape: (3,) for object position
|
||||
},
|
||||
TransitionKey.ACTION: action_tensor, # Shape: (7,) for joint velocities
|
||||
TransitionKey.REWARD: 0.0, # Scalar reward signal
|
||||
TransitionKey.DONE: False, # Episode termination flag
|
||||
TransitionKey.TRUNCATED: False, # Episode truncation flag
|
||||
TransitionKey.INFO: {"success": False}, # Additional metadata
|
||||
TransitionKey.COMPLEMENTARY_DATA: {
|
||||
"task": "pick up the red cube", # Language instruction
|
||||
"task_index": 0, # Task identifier
|
||||
"index": 42 # Frame index
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Each key in the transition has a specific purpose:
|
||||
|
||||
- **OBSERVATION**: All sensor data (images, states, proprioception)
|
||||
- **ACTION**: The action to execute or that was executed
|
||||
- **REWARD**: Reinforcement learning signal
|
||||
- **DONE/TRUNCATED**: Episode boundary indicators
|
||||
- **INFO**: Arbitrary metadata
|
||||
- **COMPLEMENTARY_DATA**: Task descriptions, indices, padding flags, inter-step data (e.g., you need to compute the velocities and then use this velocity to clip the action)
|
||||
|
||||
### ProcessorStep: The Building Block Interface
|
||||
|
||||
A `ProcessorStep` is a single transformation unit that processes transitions. It's a protocol (interface) that any processor step must implement:
|
||||
|
||||
```python
|
||||
from lerobot.processor.pipeline import ProcessorStep, EnvTransition
|
||||
from lerobot.configs.types import PolicyFeature
|
||||
from typing import Any
|
||||
import torch
|
||||
|
||||
class MyProcessorStep:
|
||||
"""Example processor step interface - all methods must be implemented."""
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
"""Transform the transition - this is the main processing logic."""
|
||||
raise NotImplementedError
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
"""Declare how this step transforms feature shapes/types."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
"""Return JSON-serializable configuration for saving/loading."""
|
||||
raise NotImplementedError
|
||||
|
||||
def state_dict(self) -> dict[str, torch.Tensor]:
|
||||
"""Return any learnable parameters (tensors only)."""
|
||||
raise NotImplementedError
|
||||
|
||||
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
|
||||
"""Load learnable parameters from saved state."""
|
||||
raise NotImplementedError
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset any internal state between episodes."""
|
||||
raise NotImplementedError
|
||||
```
|
||||
|
||||
### RobotProcessor: The Pipeline Orchestrator
|
||||
|
||||
The `RobotProcessor` chains multiple `ProcessorStep` instances together, executing them sequentially. It provides automatic format conversion to handle both batch dictionaries (from datasets) and EnvTransition dictionaries:
|
||||
|
||||
```python
|
||||
from lerobot.processor.pipeline import RobotProcessor, _default_batch_to_transition, _default_transition_to_batch
|
||||
|
||||
# Create a processing pipeline
|
||||
processor = RobotProcessor(
|
||||
steps=[
|
||||
step1, # First transformation
|
||||
step2, # Second transformation
|
||||
step3 # Third transformation
|
||||
],
|
||||
name="my_preprocessing_pipeline",
|
||||
|
||||
# Optional: Custom converters for input/output formats
|
||||
to_transition=_default_batch_to_transition, # How to convert batch dict → EnvTransition
|
||||
to_output=_default_transition_to_batch # How to convert EnvTransition → output format
|
||||
)
|
||||
|
||||
# The processor automatically handles different input formats:
|
||||
# 1. If input is a batch dict (from dataset), converts to EnvTransition
|
||||
# 2. Passes through each step sequentially
|
||||
# 3. Converts back to original format (or custom output format)
|
||||
|
||||
# Example with batch dict input (common in training)
|
||||
batch_dict = {"observation.state": tensor, "action": tensor}
|
||||
output = processor(batch_dict) # Automatically converted to/from EnvTransition
|
||||
|
||||
# Example with EnvTransition input (common in inference)
|
||||
transition = {TransitionKey.OBSERVATION: {...}, TransitionKey.ACTION: ...}
|
||||
output = processor(transition) # Stays as EnvTransition throughout
|
||||
```
|
||||
|
||||
The `to_transition` and `to_output` converters enable seamless integration with existing codebases.
|
||||
By default, they handle the standard LeRobot batch format, but you can customize them for different data structures.
|
||||
|
||||
### Additional Converter Functions
|
||||
|
||||
LeRobot provides several specialized converter functions for common robotics scenarios:
|
||||
|
||||
```python
|
||||
from lerobot.processor.converters import (
|
||||
to_transition_teleop_action,
|
||||
to_transition_robot_observation,
|
||||
to_output_robot_action,
|
||||
to_dataset_frame
|
||||
)
|
||||
```
|
||||
|
||||
**`to_transition_teleop_action`** - Converts teleoperation device actions to EnvTransitions:
|
||||
|
||||
```python
|
||||
# Use case: Phone, gamepad, or other teleop device control
|
||||
phone_action = {"x": 0.1, "y": -0.2, "gripper": 0.8}
|
||||
transition = to_transition_teleop_action(phone_action)
|
||||
# Creates: {ACTION: {"action.x": 0.1, "action.y": -0.2, "action.gripper": 0.8}, ...}
|
||||
```
|
||||
|
||||
**`to_transition_robot_observation`** - Converts robot sensor data to EnvTransitions:
|
||||
|
||||
```python
|
||||
# Use case: Live robot observation during inference
|
||||
robot_obs = {
|
||||
"joint_1": 0.5, "joint_2": -0.3, # joint positions
|
||||
"camera_0": image_array # camera images
|
||||
}
|
||||
transition = to_transition_robot_observation(robot_obs)
|
||||
# Creates: {OBSERVATION: {"observation.state.joint_1": 0.5, "observation.images.camera_0": image, ...}}
|
||||
```
|
||||
|
||||
**`to_output_robot_action`** - Extracts robot-executable actions from EnvTransitions:
|
||||
|
||||
```python
|
||||
# Use case: Converting model outputs back to robot commands
|
||||
model_transition = {ACTION: {"action.joint_1": 0.2, "action.joint_2": 0.1}}
|
||||
robot_action = to_output_robot_action(model_transition)
|
||||
# Returns: {"joint_1": 0.2, "joint_2": 0.1} - ready for robot.send_action()
|
||||
```
|
||||
|
||||
**`to_dataset_frame`** - Converts transitions to dataset-compatible format:
|
||||
|
||||
```python
|
||||
# Use case: Saving processed data or creating training batches
|
||||
features = {
|
||||
"action": {"names": ["joint_1", "joint_2"]},
|
||||
"observation.state": {"names": ["joint_1", "joint_2"]},
|
||||
"observation.images.camera0": {...}
|
||||
}
|
||||
batch = to_dataset_frame(transition, features)
|
||||
# Returns: {"action": [0.2, 0.1], "observation.state": [0.5, -0.3], ...}
|
||||
```
|
||||
|
||||
These converters are particularly useful when integrating with real robots, as shown in the examples:
|
||||
|
||||
```python
|
||||
# Example from phone_so100_teleop.py - Real robot teleoperation
|
||||
phone_to_robot_ee_pose = RobotProcessor(
|
||||
steps=[...],
|
||||
to_transition=to_transition_teleop_action, # Phone → EnvTransition
|
||||
to_output=lambda tr: tr # Keep as EnvTransition
|
||||
)
|
||||
|
||||
# Example from phone_so100_eval.py - Robot action execution
|
||||
robot_ee_to_joints = RobotProcessor(
|
||||
steps=[...],
|
||||
to_transition=lambda tr: tr, # Already EnvTransition
|
||||
to_output=to_output_robot_action # EnvTransition → Robot action
|
||||
)
|
||||
|
||||
# Example from phone_so100_record.py - Dataset recording
|
||||
robot_joints_to_ee_pose = RobotProcessor(
|
||||
steps=[...],
|
||||
to_transition=to_transition_robot_observation, # Robot obs → EnvTransition
|
||||
to_output=lambda tr: tr # Keep as EnvTransition for dataset
|
||||
)
|
||||
```
|
||||
|
||||
### Data Format Conversion
|
||||
|
||||
Different data sources have different formats, but processors need a unified `EnvTransition` structure internally.
|
||||
The default converters handle LeRobot datasets, but you can customize them:
|
||||
|
||||
```python
|
||||
# Default: LeRobot batch format
|
||||
lerobot_batch = {
|
||||
"observation.state": torch.tensor(...),
|
||||
"action": torch.tensor(...),
|
||||
"next.reward": torch.tensor(...),
|
||||
"task": ["pick cube", ...]
|
||||
}
|
||||
# → Converts to EnvTransition → Processes → Converts back
|
||||
|
||||
# Custom: Live robot data
|
||||
robot_data = {
|
||||
"cameras": {"wrist_cam": np.array(...)},
|
||||
"joint_positions": np.array(...),
|
||||
"gripper_state": 0.5
|
||||
}
|
||||
|
||||
def robot_to_transition(data: dict) -> EnvTransition:
|
||||
return {
|
||||
TransitionKey.OBSERVATION: {
|
||||
"observation.images.wrist": torch.from_numpy(data["cameras"]["wrist_cam"]),
|
||||
"observation.state": torch.from_numpy(data["joint_positions"])
|
||||
},
|
||||
TransitionKey.ACTION: None,
|
||||
# ... other fields with defaults
|
||||
}
|
||||
|
||||
# Use custom converter
|
||||
processor = RobotProcessor(
|
||||
steps=[...],
|
||||
to_transition=robot_to_transition,
|
||||
to_output=lambda transition: transition # Keep as EnvTransition
|
||||
)
|
||||
```
|
||||
|
||||
**When to customize:** Live robot data, Gymnasium environments, legacy datasets, or any non-LeRobot format.
|
||||
|
||||
## Common Processor Steps
|
||||
|
||||
LeRobot provides a rich set of pre-built processor steps for common transformations.
|
||||
Let's explore each in detail:
|
||||
|
||||
### Data Normalization
|
||||
|
||||
Normalization is crucial for neural network training and inference.
|
||||
The `NormalizerProcessor` handles both mean-std normalization and min-max scaling:
|
||||
|
||||
```python
|
||||
from lerobot.processor.normalize_processor import NormalizerProcessor, UnnormalizerProcessor
|
||||
from lerobot.configs.types import PolicyFeature, FeatureType, NormalizationMode
|
||||
|
||||
# Define what features exist in your data
|
||||
features = {
|
||||
"observation.images.camera0": PolicyFeature(
|
||||
type=FeatureType.IMAGE,
|
||||
shape=(224, 224, 3)
|
||||
),
|
||||
"observation.state": PolicyFeature(
|
||||
type=FeatureType.STATE,
|
||||
shape=(7,)
|
||||
),
|
||||
"action": PolicyFeature(
|
||||
type=FeatureType.ACTION,
|
||||
shape=(7,)
|
||||
)
|
||||
}
|
||||
|
||||
# Define normalization strategy per feature type
|
||||
norm_map = {
|
||||
FeatureType.IMAGE: NormalizationMode.MEAN_STD, # Images: (x - mean) / std
|
||||
FeatureType.STATE: NormalizationMode.MIN_MAX, # States: scale to [-1, 1]
|
||||
FeatureType.ACTION: NormalizationMode.MIN_MAX # Actions: scale to [-1, 1]
|
||||
}
|
||||
|
||||
# Create normalizer with dataset statistics
|
||||
normalizer = NormalizerProcessor(
|
||||
features=features,
|
||||
norm_map=norm_map,
|
||||
stats=dataset.meta.stats, # Contains mean, std, min, max per feature
|
||||
normalize_keys={"observation.state", "action"} # Optional: only normalize specific keys
|
||||
)
|
||||
|
||||
# For postprocessing: inverse transformation
|
||||
unnormalizer = UnnormalizerProcessor(
|
||||
features=features,
|
||||
norm_map=norm_map,
|
||||
stats=dataset.meta.stats
|
||||
)
|
||||
|
||||
# The normalizer automatically:
|
||||
# - Detects which normalization to apply based on feature type
|
||||
# - Handles device placement of statistics tensors
|
||||
# - Skips keys not in stats or not in normalize_keys
|
||||
# - Adds metadata about what was normalized
|
||||
```
|
||||
|
||||
### Device Management
|
||||
|
||||
The `DeviceProcessor` ensures tensors are on the right device with the right dtype:
|
||||
|
||||
```python
|
||||
from lerobot.processor.device_processor import DeviceProcessor
|
||||
|
||||
# Basic GPU placement
|
||||
gpu_processor = DeviceProcessor(device="cuda:0")
|
||||
|
||||
# Advanced: GPU with half-precision for inference
|
||||
efficient_processor = DeviceProcessor(
|
||||
device="cuda:0",
|
||||
float_dtype="float16" # Convert float32 -> float16 for memory efficiency
|
||||
)
|
||||
|
||||
# The processor:
|
||||
# - Moves all tensors to specified device
|
||||
# - Preserves non-tensor data unchanged
|
||||
# - Optionally converts float dtypes while preserving int/bool types
|
||||
# - Uses non_blocking transfers for CUDA devices
|
||||
# - Handles nested structures (observations, complementary_data)
|
||||
|
||||
# Supported float dtypes:
|
||||
# "float16" / "half": 16-bit floating point
|
||||
# "float32" / "float": 32-bit floating point (default)
|
||||
# "float64" / "double": 64-bit floating point
|
||||
# "bfloat16": Brain floating point (better for training)
|
||||
```
|
||||
|
||||
### Batch Processing
|
||||
|
||||
Models expect batched inputs, but robot interactions often produce unbatched data:
|
||||
|
||||
```python
|
||||
from lerobot.processor.batch_processor import ToBatchProcessor
|
||||
|
||||
batch_processor = ToBatchProcessor()
|
||||
|
||||
# Automatically adds batch dimensions where needed:
|
||||
# State: (7,) -> (1, 7)
|
||||
# Image: (224, 224, 3) -> (1, 224, 224, 3)
|
||||
# Action: (4,) -> (1, 4)
|
||||
# Task: "pick_cube" -> ["pick_cube"]
|
||||
# Already batched: (1, 7) -> (1, 7) [unchanged]
|
||||
|
||||
# The processor intelligently:
|
||||
# - Detects tensor dimensionality
|
||||
# - Adds batch dim to 1D states/actions
|
||||
# - Adds batch dim to 3D images
|
||||
# - Wraps string tasks in lists
|
||||
# - Preserves already-batched data
|
||||
|
||||
# Example usage in inference:
|
||||
single_observation = robot.get_observation() # Unbatched
|
||||
batched_input = batch_processor({"observation": single_observation})
|
||||
model_output = model(batched_input) # Model expects batch dim
|
||||
```
|
||||
|
||||
### Text Tokenization
|
||||
|
||||
For language-conditioned policies, text instructions must be tokenized:
|
||||
|
||||
```python
|
||||
from lerobot.processor.tokenizer_processor import TokenizerProcessor
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
# Option 1: Auto-load tokenizer by name
|
||||
tokenizer_proc = TokenizerProcessor(
|
||||
tokenizer_name="google/paligemma-3b-pt-224",
|
||||
max_length=128,
|
||||
task_key="task", # Where to find text in complementary_data
|
||||
padding="max_length", # Pad to max_length
|
||||
padding_side="right",
|
||||
truncation=True # Truncate if longer than max_length
|
||||
)
|
||||
|
||||
# Option 2: Provide custom tokenizer
|
||||
custom_tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
|
||||
custom_proc = TokenizerProcessor(
|
||||
tokenizer=custom_tokenizer,
|
||||
max_length=256,
|
||||
padding_side="left" # For autoregressive models
|
||||
)
|
||||
|
||||
# The processor:
|
||||
# - Extracts task text from complementary_data
|
||||
# - Tokenizes using HuggingFace tokenizer
|
||||
# - Adds tokens and attention_mask to observations
|
||||
# - Handles both single strings and lists of strings
|
||||
# - Preserves original task in complementary_data
|
||||
|
||||
# Output structure:
|
||||
# observation["observation.language.tokens"] = tensor([101, 2032, ...])
|
||||
# observation["observation.language.attention_mask"] = tensor([1, 1, 0, ...])
|
||||
```
|
||||
|
||||
### Key Renaming
|
||||
|
||||
Different datasets and models may use different naming conventions.
|
||||
The `RenameProcessor` solves this mismatch:
|
||||
|
||||
**Why is this useful?**
|
||||
|
||||
- When loading a model trained on a different dataset with different key names
|
||||
- When using foundation models that expect specific key naming conventions
|
||||
- When standardizing datasets from different sources
|
||||
- When adapting legacy code to new naming standards
|
||||
|
||||
```python
|
||||
from lerobot.processor.rename_processor import RenameProcessor
|
||||
|
||||
# Example 1: Dataset uses "top"/"wrist", model expects "camera0"/"camera1"
|
||||
rename_proc = RenameProcessor(
|
||||
rename_map={
|
||||
"observation.images.top": "observation.images.camera0",
|
||||
"observation.images.wrist": "observation.images.camera1",
|
||||
}
|
||||
)
|
||||
|
||||
# Example 2: Foundation model compatibility
|
||||
# Your dataset: "observation.state", Foundation model: "proprio"
|
||||
foundation_rename = RenameProcessor(
|
||||
rename_map={
|
||||
"observation.state": "proprio",
|
||||
"observation.images.main": "rgb",
|
||||
}
|
||||
)
|
||||
|
||||
# Example 3: Standardizing multiple datasets
|
||||
standardize_rename = RenameProcessor(
|
||||
rename_map={
|
||||
# Different robots might use different names
|
||||
"observation.joint_positions": "observation.state",
|
||||
"observation.gripper_state": "observation.end_effector",
|
||||
"observation.arm_camera": "observation.images.wrist",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## Building Complete Pipelines
|
||||
|
||||
Let's build a real-world preprocessing and postprocessing pipeline for a vision-based
|
||||
manipulation policy:
|
||||
|
||||
```python
|
||||
# Consolidated imports
|
||||
from lerobot.processor import (
|
||||
RobotProcessor,
|
||||
NormalizerProcessor,
|
||||
UnnormalizerProcessor,
|
||||
DeviceProcessor,
|
||||
ToBatchProcessor,
|
||||
TokenizerProcessor,
|
||||
RenameProcessor
|
||||
)
|
||||
|
||||
# Step 1: Define the preprocessing pipeline
|
||||
preprocessor = RobotProcessor(
|
||||
steps=[
|
||||
# 1. Standardize naming from dataset
|
||||
RenameProcessor(
|
||||
rename_map={
|
||||
"observation.images.top": "observation.images.camera0",
|
||||
"observation.images.wrist": "observation.images.camera1"
|
||||
}
|
||||
),
|
||||
|
||||
# 2. Add batch dimensions for model
|
||||
ToBatchProcessor(),
|
||||
|
||||
# 3. Tokenize language instructions if present
|
||||
TokenizerProcessor(
|
||||
tokenizer_name="google/paligemma-3b-pt-224",
|
||||
max_length=64,
|
||||
task_key="task"
|
||||
),
|
||||
|
||||
# 4. Normalize numerical data
|
||||
NormalizerProcessor(
|
||||
features=policy_features,
|
||||
norm_map={
|
||||
FeatureType.IMAGE: NormalizationMode.MEAN_STD,
|
||||
FeatureType.STATE: NormalizationMode.MIN_MAX,
|
||||
FeatureType.ACTION: NormalizationMode.MIN_MAX
|
||||
},
|
||||
stats=dataset.meta.stats
|
||||
),
|
||||
|
||||
# 5. Move to GPU and convert to half precision
|
||||
DeviceProcessor(
|
||||
device="cuda:0",
|
||||
float_dtype="float16"
|
||||
)
|
||||
],
|
||||
name="robot_preprocessor"
|
||||
)
|
||||
|
||||
# Step 2: Define the postprocessing pipeline
|
||||
postprocessor = RobotProcessor(
|
||||
steps=[
|
||||
# 1. Move back to CPU for robot hardware
|
||||
DeviceProcessor(device="cpu"),
|
||||
|
||||
# 2. Denormalize actions to original scale
|
||||
UnnormalizerProcessor(
|
||||
features=policy_features,
|
||||
norm_map={
|
||||
FeatureType.ACTION: NormalizationMode.MIN_MAX
|
||||
},
|
||||
stats=dataset.meta.stats
|
||||
)
|
||||
],
|
||||
name="robot_postprocessor"
|
||||
)
|
||||
```
|
||||
|
||||
## Using Processors in Practice
|
||||
|
||||
### Training Loop Integration
|
||||
|
||||
Here's how processors integrate into a training loop using the policy's forward method:
|
||||
|
||||
```python
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
# Create dataset and dataloader
|
||||
dataset = LeRobotDataset(repo_id="your_dataset")
|
||||
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
|
||||
|
||||
# Initialize model and processors
|
||||
model = YourPolicy.from_pretrained("your_model")
|
||||
preprocessor = RobotProcessor.from_pretrained(
|
||||
"your_model",
|
||||
config_filename="robot_preprocessor.json"
|
||||
)
|
||||
|
||||
# Training loop
|
||||
for epoch in range(num_epochs):
|
||||
for batch in dataloader:
|
||||
# Preprocess batch
|
||||
processed_batch = preprocessor(batch)
|
||||
|
||||
# Forward pass - returns loss and optional metrics
|
||||
loss, metrics = model.forward(processed_batch)
|
||||
|
||||
# Backward pass
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# Log metrics if available
|
||||
if metrics:
|
||||
wandb.log(metrics)
|
||||
```
|
||||
|
||||
### Inference Pipeline
|
||||
|
||||
For deployment, processors ensure consistent data handling with real robots:
|
||||
|
||||
```python
|
||||
# Load model and processors
|
||||
policy = YourPolicy.from_pretrained("path/to/model")
|
||||
preprocessor = RobotProcessor.from_pretrained(
|
||||
"path/to/model",
|
||||
config_filename="robot_preprocessor.json"
|
||||
)
|
||||
postprocessor = RobotProcessor.from_pretrained(
|
||||
"path/to/model",
|
||||
config_filename="robot_postprocessor.json"
|
||||
)
|
||||
|
||||
# Connect to robot
|
||||
robot = make_robot_from_config(robot_config)
|
||||
robot.connect()
|
||||
|
||||
# Inference loop
|
||||
policy.eval()
|
||||
# Reset the policy and processors
|
||||
policy.reset()
|
||||
preprocessor.reset()
|
||||
postprocessor.reset()
|
||||
|
||||
with torch.no_grad():
|
||||
while not done:
|
||||
# Get observation from robot
|
||||
observation = robot.get_observation()
|
||||
|
||||
# Build dataset-compatible frame
|
||||
observation_frame = build_dataset_frame(
|
||||
dataset.features,
|
||||
observation,
|
||||
prefix="observation"
|
||||
)
|
||||
|
||||
# Add task instruction to complementary data
|
||||
observation_frame["task"] = "pick up the red cube"
|
||||
|
||||
# Preprocess for model
|
||||
model_input = preprocessor(observation_frame)
|
||||
|
||||
# Run policy
|
||||
raw_action = policy.select_action(model_input)
|
||||
|
||||
# Postprocess action
|
||||
action_transition = {TransitionKey.ACTION: raw_action}
|
||||
processed = postprocessor(action_transition)
|
||||
action = processed[TransitionKey.ACTION]
|
||||
|
||||
# Convert to robot action format
|
||||
robot_action = {
|
||||
key: action[i].item()
|
||||
for i, key in enumerate(robot.action_features)
|
||||
}
|
||||
|
||||
# Execute on robot
|
||||
robot.send_action(robot_action)
|
||||
```
|
||||
|
||||
## Saving and Loading Processors
|
||||
|
||||
Processors can be persisted and shared just like models, making them portable across different
|
||||
environments and ensuring reproducibility:
|
||||
|
||||
### Local Save/Load
|
||||
|
||||
```python
|
||||
# Save processor configuration and state
|
||||
preprocessor.save_pretrained(
|
||||
"./my_robot_processor",
|
||||
config_filename="preprocessor.json" # Optional custom name
|
||||
)
|
||||
|
||||
# The save creates:
|
||||
# my_robot_processor/
|
||||
# ├── preprocessor.json # Configuration
|
||||
# ├── preprocessor_step_0_normalizer.safetensors # Step 0 state (stats)
|
||||
# └── preprocessor_step_1_device.safetensors # Step 1 state (if any)
|
||||
|
||||
# Load processor
|
||||
loaded = RobotProcessor.from_pretrained(
|
||||
"./my_robot_processor",
|
||||
config_filename="preprocessor.json"
|
||||
)
|
||||
```
|
||||
|
||||
### HuggingFace Hub Integration
|
||||
|
||||
The HuggingFace Hub provides a centralized place to share and version your processors.
|
||||
This is particularly useful for sharing preprocessing configurations with models,
|
||||
ensuring that anyone who downloads your model can reproduce your exact preprocessing pipeline.
|
||||
It also enables versioning and collaboration on preprocessing strategies.
|
||||
|
||||
```python
|
||||
# Save to HuggingFace Hub
|
||||
preprocessor.save_pretrained("username/my-robot-policy")
|
||||
|
||||
# Load from Hub with automatic download
|
||||
hub_processor = RobotProcessor.from_pretrained(
|
||||
"username/my-robot-policy",
|
||||
config_filename="robot_preprocessor.json",
|
||||
revision="main", # Optional: specific revision
|
||||
cache_dir="./cache" # Optional: local cache directory
|
||||
)
|
||||
|
||||
# The Hub integration provides:
|
||||
# - Automatic versioning with git
|
||||
# - Public or private sharing
|
||||
# - Download caching for efficiency
|
||||
# - Integration with model repositories
|
||||
```
|
||||
|
||||
### Loading with Overrides
|
||||
|
||||
Sometimes you need to modify loaded processors for new environments or datasets.
|
||||
The override mechanism allows you to update specific processor configurations without modifying
|
||||
the saved files:
|
||||
|
||||
```python
|
||||
# Load processor with configuration overrides
|
||||
processor = RobotProcessor.from_pretrained(
|
||||
"./saved_processor",
|
||||
overrides={
|
||||
# Change device for different hardware
|
||||
"device_processor": {"device": "cuda:1"},
|
||||
|
||||
# Update statistics for new dataset
|
||||
"normalizer_processor": {"stats": new_dataset.meta.stats},
|
||||
|
||||
# Provide non-serializable objects (like tokenizers)
|
||||
"tokenizer_processor": {"tokenizer": custom_tokenizer}
|
||||
}
|
||||
)
|
||||
|
||||
# Common override scenarios:
|
||||
# 1. Adapting to different hardware (GPU availability)
|
||||
# 2. Fine-tuning on new datasets with different statistics
|
||||
# 3. Providing runtime dependencies that can't be serialized
|
||||
# 4. Testing variations without creating new saved configs
|
||||
```
|
||||
|
||||
## Creating Custom Processor Steps
|
||||
|
||||
Build your own processor steps for specialized transformations.
|
||||
The key is implementing the required interface:
|
||||
|
||||
### Basic Custom Step with Registration
|
||||
|
||||
The registration mechanism allows your custom processors to be saved and loaded by name rather
|
||||
than by module path.
|
||||
This makes them more portable and easier to share:
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
from lerobot.processor.pipeline import ProcessorStepRegistry, ObservationProcessor
|
||||
|
||||
# The @register decorator adds your processor to the global registry
|
||||
# Use a unique name, preferably namespaced to avoid conflicts
|
||||
@dataclass
|
||||
@ProcessorStepRegistry.register("my_company/gaussian_noise")
|
||||
class GaussianNoiseProcessor(ObservationProcessor):
|
||||
"""Add Gaussian noise to observations for robustness training."""
|
||||
|
||||
noise_std: float = 0.01
|
||||
training_only: bool = True
|
||||
is_training: bool = True
|
||||
|
||||
def observation(self, observation):
|
||||
"""Add noise to observation tensors."""
|
||||
if not self.is_training and self.training_only:
|
||||
return observation
|
||||
|
||||
noisy_obs = {}
|
||||
for key, value in observation.items():
|
||||
if isinstance(value, torch.Tensor) and "image" not in key:
|
||||
# Add noise to non-image observations
|
||||
noise = torch.randn_like(value) * self.noise_std
|
||||
noisy_obs[key] = value + noise
|
||||
else:
|
||||
noisy_obs[key] = value
|
||||
|
||||
return noisy_obs
|
||||
|
||||
def get_config(self):
|
||||
return {
|
||||
"noise_std": self.noise_std,
|
||||
"training_only": self.training_only,
|
||||
"is_training": self.is_training
|
||||
}
|
||||
|
||||
# Why register?
|
||||
# 1. Enables saving by name: config saves "my_company/gaussian_noise" instead of full module path
|
||||
# 2. More portable: Others can use your processor without your exact module structure
|
||||
# 3. Version-safe: Module refactoring won't break saved configs
|
||||
# 4. Cleaner configs: JSON shows readable names instead of long import paths
|
||||
```
|
||||
|
||||
### Using Base Classes for Common Patterns
|
||||
|
||||
LeRobot provides base classes like `ObservationProcessor`, `ActionProcessor`, etc., that handle
|
||||
the boilerplate of extracting and reinserting specific components:
|
||||
|
||||
```python
|
||||
from lerobot.processor import ActionProcessor
|
||||
|
||||
@dataclass
|
||||
@ProcessorStepRegistry.register("my_company/action_clipper")
|
||||
class ActionClipProcessor(ActionProcessor):
|
||||
"""Clip actions to safe ranges."""
|
||||
|
||||
min_value: float = -1.0
|
||||
max_value: float = 1.0
|
||||
|
||||
def action(self, action):
|
||||
"""Process only the action component."""
|
||||
# No need to handle transition dict - base class does it
|
||||
return torch.clamp(action, self.min_value, self.max_value)
|
||||
|
||||
def get_config(self):
|
||||
return {"min_value": self.min_value, "max_value": self.max_value}
|
||||
```
|
||||
|
||||
For more advanced processor patterns including stateful processors, see [Implement Your Own Processor](implement_your_own_processor.mdx).
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Debugging with Hooks
|
||||
|
||||
Processors support hooks for monitoring and debugging without modifying the pipeline code:
|
||||
|
||||
```python
|
||||
# Define monitoring hooks
|
||||
def log_shapes(step_idx: int, transition: EnvTransition):
|
||||
"""Log tensor shapes after each step."""
|
||||
obs = transition.get(TransitionKey.OBSERVATION)
|
||||
if obs:
|
||||
print(f"Step {step_idx} shapes:")
|
||||
for key, value in obs.items():
|
||||
if isinstance(value, torch.Tensor):
|
||||
print(f" {key}: {value.shape}")
|
||||
|
||||
def check_nans(step_idx: int, transition: EnvTransition):
|
||||
"""Check for NaN values."""
|
||||
obs = transition.get(TransitionKey.OBSERVATION)
|
||||
if obs:
|
||||
for key, value in obs.items():
|
||||
if isinstance(value, torch.Tensor) and torch.isnan(value).any():
|
||||
print(f"Warning: NaN detected in {key} at step {step_idx}")
|
||||
|
||||
# Register hooks
|
||||
processor.register_after_step_hook(log_shapes)
|
||||
processor.register_after_step_hook(check_nans)
|
||||
|
||||
# Process data - hooks will be called after each step
|
||||
output = processor(input_data)
|
||||
|
||||
# Remove hooks when done debugging
|
||||
processor.unregister_after_step_hook(log_shapes)
|
||||
processor.unregister_after_step_hook(check_nans)
|
||||
```
|
||||
|
||||
### Step-by-Step Inspection
|
||||
|
||||
Use `step_through()` for detailed debugging of the transformation pipeline:
|
||||
|
||||
```python
|
||||
# Inspect data at each transformation stage
|
||||
for i, intermediate in enumerate(processor.step_through(data)):
|
||||
print(f"\n=== After step {i} ===")
|
||||
|
||||
# Check observation shapes
|
||||
obs = intermediate.get(TransitionKey.OBSERVATION)
|
||||
if obs:
|
||||
for key, value in obs.items():
|
||||
if isinstance(value, torch.Tensor):
|
||||
print(f"{key}: shape={value.shape}, "
|
||||
f"dtype={value.dtype}, "
|
||||
f"device={value.device}, "
|
||||
f"range=[{value.min():.3f}, {value.max():.3f}]")
|
||||
|
||||
# Check action if present
|
||||
action = intermediate.get(TransitionKey.ACTION)
|
||||
if action is not None and isinstance(action, torch.Tensor):
|
||||
print(f"action: shape={action.shape}, range=[{action.min():.3f}, {action.max():.3f}]")
|
||||
```
|
||||
|
||||
### Pipeline Slicing
|
||||
|
||||
Extract subsets of a pipeline for testing or creating variations:
|
||||
|
||||
```python
|
||||
# Get specific steps
|
||||
first_three_steps = processor[:3] # Returns new RobotProcessor
|
||||
middle_step = processor[2] # Returns single ProcessorStep
|
||||
|
||||
# Test individual steps
|
||||
test_input = {...}
|
||||
step_output = processor[0](test_input) # Test first step only
|
||||
|
||||
# Create variations
|
||||
variant_processor = RobotProcessor(
|
||||
steps=processor.steps[:-1] + [new_final_step],
|
||||
name="variant"
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices and Tips
|
||||
|
||||
### 1. Order Matters
|
||||
|
||||
The sequence of processors is crucial. Follow this general order:
|
||||
|
||||
```python
|
||||
# Preprocessing: Raw → Model-ready
|
||||
1. Rename (standardize keys)
|
||||
2. Batch (add dimensions)
|
||||
3. Tokenize (text → tokens)
|
||||
4. Normalize (scale values)
|
||||
5. Device (move to GPU)
|
||||
|
||||
# Postprocessing: Model → Robot-ready
|
||||
1. Device (move to CPU)
|
||||
2. Unnormalize (restore scale)
|
||||
3. Unbatch (remove dimensions if needed)
|
||||
```
|
||||
|
||||
### 2. Registration Best Practices
|
||||
|
||||
```python
|
||||
# Always register custom steps for better portability
|
||||
@ProcessorStepRegistry.register("my_company/special_processor")
|
||||
class SpecialProcessor:
|
||||
...
|
||||
|
||||
# Use namespaced names to avoid conflicts
|
||||
# Good: "my_company/augmentation"
|
||||
# Bad: "augmentation" (too generic)
|
||||
|
||||
# Check registered processors
|
||||
print(ProcessorStepRegistry.list()) # See all registered processors
|
||||
```
|
||||
|
||||
### 3. Common Pitfalls and Solutions
|
||||
|
||||
**Tensor Device Mismatch:**
|
||||
|
||||
```python
|
||||
# Problem: RuntimeError: Expected all tensors on same device
|
||||
# Solution: Ensure DeviceProcessor is in pipeline
|
||||
preprocessor = RobotProcessor(
|
||||
steps=[
|
||||
NormalizerProcessor(...),
|
||||
DeviceProcessor(device="cuda") # Add this
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
**Missing Statistics:**
|
||||
|
||||
```python
|
||||
# Problem: NormalizerProcessor has no stats
|
||||
# Solution 1: Compute stats from dataset
|
||||
from lerobot.datasets.compute_stats import compute_stats
|
||||
stats = compute_stats(dataset)
|
||||
|
||||
# Solution 2: Load with overrides
|
||||
processor = RobotProcessor.from_pretrained(
|
||||
"model_path",
|
||||
overrides={"normalizer_processor": {"stats": dataset.meta.stats}}
|
||||
)
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
Now that you understand processors, explore these topics:
|
||||
|
||||
- [**Implement Your Own Processor**](implement_your_own_processor.mdx) - Deep dive into creating custom processors with advanced features like stateful processing
|
||||
- [**Policy Documentation**](policies.mdx) - Learn how different policies use processors
|
||||
- [**Dataset Documentation**](datasets.mdx) - Understand the data format that processors transform
|
||||
- [**Training Guide**](training.mdx) - See processors in action during model training
|
||||
- [**Evaluation Guide**](evaluation.mdx) - Learn about processor usage during policy evaluation
|
||||
|
||||
## Summary
|
||||
|
||||
Processors are the unsung heroes of robotics pipelines, handling the critical transformations between raw sensor data and model-ready tensors. By understanding and effectively using processors, you can:
|
||||
|
||||
- Build robust, reusable data pipelines
|
||||
- Share preprocessing configurations across projects
|
||||
- Debug data transformations systematically
|
||||
- Ensure consistency between training and deployment
|
||||
- Create custom transformations for specialized tasks
|
||||
|
||||
Remember: good preprocessing is often the difference between a model that works in theory
|
||||
and one that works in practice!
|
||||
The modular pipeline approach ensures your transformations are testable, reproducible,
|
||||
and portable across different robots and environments.
|
||||
@@ -31,7 +31,7 @@ pip install -e ".[dynamixel]"
|
||||
To find the port for each bus servo adapter, run this script:
|
||||
|
||||
```bash
|
||||
lerobot-find-port
|
||||
python -m lerobot.find_port
|
||||
```
|
||||
|
||||
<hfoptions id="example">
|
||||
@@ -98,7 +98,7 @@ For a visual reference on how to set the motor ids please refer to [this video](
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-setup-motors \
|
||||
python -m lerobot.setup_motors \
|
||||
--robot.type=koch_follower \
|
||||
--robot.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step
|
||||
```
|
||||
@@ -174,7 +174,7 @@ Do the same steps for the leader arm but modify the command or script accordingl
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-setup-motors \
|
||||
python -m lerobot.setup_motors \
|
||||
--teleop.type=koch_leader \
|
||||
--teleop.port=/dev/tty.usbmodem575E0031751 \ # <- paste here the port found at previous step
|
||||
```
|
||||
@@ -211,7 +211,7 @@ Run the following command or API example to calibrate the follower arm:
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--robot.type=koch_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
|
||||
--robot.id=my_awesome_follower_arm # <- Give the robot a unique name
|
||||
@@ -249,7 +249,7 @@ Do the same steps to calibrate the leader arm, run the following command or API
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--teleop.type=koch_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
|
||||
--teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
|
||||
|
||||
@@ -60,7 +60,7 @@ First, we will assemble the two SO100/SO101 arms. One to attach to the mobile ba
|
||||
To find the port for each bus servo adapter, run this script:
|
||||
|
||||
```bash
|
||||
lerobot-find-port
|
||||
python -m lerobot.find_port
|
||||
```
|
||||
|
||||
<hfoptions id="example">
|
||||
@@ -116,7 +116,7 @@ The instructions for configuring the motors can be found in the SO101 [docs](./s
|
||||
You can run this command to setup motors for LeKiwi. It will first setup the motors for arm (id 6..1) and then setup motors for wheels (9,8,7)
|
||||
|
||||
```bash
|
||||
lerobot-setup-motors \
|
||||
python -m lerobot.setup_motors \
|
||||
--robot.type=lekiwi \
|
||||
--robot.port=/dev/tty.usbmodem58760431551 # <- paste here the port found at previous step
|
||||
```
|
||||
@@ -174,7 +174,7 @@ The calibration process is very important because it allows a neural network tra
|
||||
Make sure the arm is connected to the Raspberry Pi and run this script or API example (on the Raspberry Pi via SSH) to launch calibration of the follower arm:
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--robot.type=lekiwi \
|
||||
--robot.id=my_awesome_kiwi # <- Give the robot a unique name
|
||||
```
|
||||
@@ -193,7 +193,7 @@ Then, to calibrate the leader arm (which is attached to the laptop/pc). Run the
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--teleop.type=so100_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
|
||||
--teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
|
||||
|
||||
@@ -0,0 +1,195 @@
|
||||
# Phone
|
||||
|
||||
Use your phone (iOS or Android) to control your robot.
|
||||
|
||||
**In this guide you'll learn:**
|
||||
|
||||
- How to connect an iOS/Android phone
|
||||
- How phone pose is mapped to robot end‑effector (EE) targets
|
||||
- How to tweak safety limits, gripper control, and IK settings
|
||||
|
||||
To use phone to control your robot, install the relevant dependencies with:
|
||||
|
||||
```bash
|
||||
pip install lerobot[phone]
|
||||
```
|
||||
|
||||
## Get started
|
||||
|
||||
### Supported platforms
|
||||
|
||||
- iOS: Uses the HEBI Mobile I/O app (ARKit pose + buttons). Download the app first, open it and the examples will discover it on your network and stream the phone pose and inputs.
|
||||
- Android: Uses the `teleop` package (WebXR). When you start the Python process, it prints a local URL. Open the link on your phone, tap Start, then use Move to stream pose.
|
||||
|
||||
Links:
|
||||
|
||||
- Android WebXR library: [`teleop` on PyPI](https://pypi.org/project/teleop/)
|
||||
- iOS app: [HEBI Mobile I/O](https://docs.hebi.us/tools.html#mobile-io)
|
||||
|
||||
### Phone orientation and controls
|
||||
|
||||
- Orientation: hold the phone with the screen facing up and the top edge pointing in the same direction as the robot gripper. This ensures calibration aligns the phone’s frame with the robot frame so motion feels natural.
|
||||
- Enable/disable:
|
||||
- iOS: Hold `B1` to enable teleoperation, release to stop. The first press captures a reference pose.
|
||||
- Android: Press and hold the `Move` button, release to stop. The first press captures a reference pose.
|
||||
- Gripper control:
|
||||
- iOS: Analog input `A3` controls the gripper as velocity input.
|
||||
- Android: Buttons `A` and `B` act like increment/decrement (A opens, B closes). You can tune velocity in the `GripperVelocityToJoint` step.
|
||||
|
||||
### Step 1: Choose the platform
|
||||
|
||||
Modify the examples to use `PhoneOS.IOS` or `PhoneOS.ANDROID` in `PhoneConfig`. The API is identical across platforms, only the input source differs. All examples are under `examples/` and have `phone_so100_*.py` variants.
|
||||
|
||||
Teleoperation example:
|
||||
|
||||
```36:43:examples/phone_so100_teleop.py
|
||||
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS
|
||||
|
||||
teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID
|
||||
teleop_device = Phone(teleop_config)
|
||||
```
|
||||
|
||||
### Step 2: Connect and calibrate
|
||||
|
||||
When `Phone(teleop_config)` is created and `connect()` is called, calibration is prompted automatically. Hold the phone in the orientation described above, then:
|
||||
|
||||
- iOS: press and hold `B1` to capture the reference pose.
|
||||
- Android: press `Move` button on the WebXR page to capture the reference pose.
|
||||
|
||||
Why calibrate? We capture the current pose so subsequent poses are expressed in a robot aligned frame. When you again press the button to enable control, the position is recaptured to avoid drift when your phone is repositioned while it was disabled.
|
||||
|
||||
### Step 3: Run an example
|
||||
|
||||
Run on of the examples scripts to teleoperate, record a dataset, replay a dataset or evaluate a policy.
|
||||
|
||||
All scripts assume you configured your robot (e.g., SO-100 follower) and set the correct serial port.
|
||||
|
||||
- Android: after starting the script, open the printed local URL on your phone, tap Start, then press and hold Move.
|
||||
- iOS: open HEBI Mobile I/O first; B1 enables motion. A3 controls the gripper.
|
||||
|
||||
You can customize mapping or safety limits by editing the processor steps shown in the examples.
|
||||
|
||||
You can also remap inputs (e.g., use a different analog input) or adapt the pipeline to other robots (e.g., LeKiwi) by modifying the input and kinematics steps. More about this in the [Processors for Robots and Teleoperators](./processors_robots_teleop.mdx) guide.
|
||||
|
||||
- Run this example to teleoperate:
|
||||
|
||||
```bash
|
||||
python examples/phone_so100_teleop.py
|
||||
```
|
||||
|
||||
- Run this example to record a dataset, which saves absolute end effector observations and actions:
|
||||
|
||||
```bash
|
||||
python examples/phone_so100_record.py
|
||||
```
|
||||
|
||||
- Run this example to replay recorded episodes:
|
||||
|
||||
```bash
|
||||
python examples/phone_so100_replay.py
|
||||
```
|
||||
|
||||
- Run this example to evaluate a pretrained policy:
|
||||
|
||||
```bash
|
||||
python examples/phone_so100_eval.py
|
||||
```
|
||||
|
||||
### Important pipeline steps and options
|
||||
|
||||
- Kinematics are used in multiple steps. We use [Placo](https://github.com/Rhoban/placo) which is a wrapper around Pinocchio for handling our kinematics. We construct the kinematics object by passing the robot's URDF and target frame. We set `target_frame_name` to the gripper frame.
|
||||
|
||||
```44:49:examples/phone_so100_teleop.py
|
||||
RobotKinematics(
|
||||
urdf_path="./src/lerobot/teleoperators/sim/so101_new_calib.urdf",
|
||||
target_frame_name="gripper_frame_link",
|
||||
joint_names=list(robot.bus.motors.keys()),
|
||||
)
|
||||
```
|
||||
|
||||
- The `MapPhoneActionToRobotAction` step converts the calibrated phone pose and inputs into target deltas and gripper commands, below is shown what the step outputs.
|
||||
|
||||
```72:83:src/lerobot/teleoperators/phone/phone_processor.py
|
||||
# Map calibrated phone pose to robot targets (enabled gates the motion)
|
||||
act.update(
|
||||
{
|
||||
"action.enabled": enabled,
|
||||
"action.target_x": -pos[1] if enabled else 0.0,
|
||||
"action.target_y": pos[0] if enabled else 0.0,
|
||||
"action.target_z": pos[2] if enabled else 0.0,
|
||||
"action.target_wx": rotvec[1] if enabled else 0.0,
|
||||
"action.target_wy": rotvec[0] if enabled else 0.0,
|
||||
"action.target_wz": -rotvec[2] if enabled else 0.0,
|
||||
"action.gripper": gripper,
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
- The `EEReferenceAndDelta` step converts target deltas to an absolute desired EE pose, storing a reference on enable, the `end_effector_step_sizes` are the step sizes for the EE pose and can be modified to change the motion speed.
|
||||
|
||||
```56:65:examples/phone_so100_teleop.py
|
||||
EEReferenceAndDelta(
|
||||
kinematics=kinematics_solver,
|
||||
end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5},
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
)
|
||||
```
|
||||
|
||||
- The `EEBoundsAndSafety` step clamps EE motion to a workspace and checks for large ee step jumps to ensure safety. The `end_effector_bounds` are the bounds for the EE pose and can be modified to change the workspace. The `max_ee_step_m` and `max_ee_twist_step_rad` are the step limits for the EE pose and can be modified to change the safety limits.
|
||||
|
||||
```61:66:examples/phone_so100_teleop.py
|
||||
EEBoundsAndSafety(
|
||||
end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]},
|
||||
max_ee_step_m=0.10,
|
||||
max_ee_twist_step_rad=0.50,
|
||||
)
|
||||
```
|
||||
|
||||
- The `GripperVelocityToJoint` step turns a velocity‑like gripper input into absolute gripper position using the current measured state. The `speed_factor` is the factor by which the velocity is multiplied.
|
||||
|
||||
```78:81:examples/phone_so100_teleop.py
|
||||
GripperVelocityToJoint(
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
speed_factor=20.0,
|
||||
)
|
||||
```
|
||||
|
||||
#### Different IK initial guesses
|
||||
|
||||
We use different IK initial guesses in the kinematic steps. As initial guess either the current measured joints or the previous IK solution is used.
|
||||
|
||||
- Closed loop (used in record/eval): sets `initial_guess_current_joints=True` so IK starts from the measured joints each frame.
|
||||
|
||||
```71:76:examples/phone_so100_eval.py
|
||||
InverseKinematicsEEToJoints(
|
||||
kinematics=kinematics_solver,
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
initial_guess_current_joints=True, # closed loop
|
||||
)
|
||||
```
|
||||
|
||||
- Open loop (used in replay): sets `initial_guess_current_joints=False` so IK continues from the previous IK solution rather than the measured state. This preserves action stability when we replay without feedback.
|
||||
|
||||
```80:86:examples/phone_so100_replay.py
|
||||
InverseKinematicsEEToJoints(
|
||||
kinematics=kinematics_solver,
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
initial_guess_current_joints=False, # open loop
|
||||
)
|
||||
```
|
||||
|
||||
### Pipeline steps explained
|
||||
|
||||
- MapPhoneActionToRobotAction: converts calibrated phone pose and inputs into target deltas and a gripper command. Motion is gated by an enable signal (B1 on iOS, Move on Android).
|
||||
- AddRobotObservationAsComplimentaryData: reads current robot joints and inserts them under `complementary_data.raw_joint_positions` for FK/IK steps to use.
|
||||
- EEReferenceAndDelta: latches a reference EE pose on enable and combines it with target deltas to produce an absolute desired EE pose each frame. When disabled, it keeps sending the last commanded pose.
|
||||
- EEBoundsAndSafety: clamps the EE pose to a workspace and rate‑limits jumps for safety. Also declares `action.ee.*` features.
|
||||
- InverseKinematicsEEToJoints: turns an EE pose into joint positions with IK. `initial_guess_current_joints=True` is recommended for closed‑loop control; set `False` for open‑loop replay for stability.
|
||||
- GripperVelocityToJoint: integrates a velocity‑like gripper input into an absolute gripper position using the current measured state.
|
||||
- ForwardKinematicsJointsToEE: computes `observation.state.ee.*` from observed joints for logging and training on EE state.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
- iOS not discovered: ensure HEBI Mobile I/O is open and your laptop/phone are on the same network.
|
||||
- Android URL not reachable: check local you used `https` instead of `http`, use the exact IP printed by the script and allow your browser to enter and ignore the certificate issue.
|
||||
- Motion feels inverted: adjust the sign flips in `MapPhoneActionToRobotAction` or swap axes to match your setup.
|
||||
@@ -0,0 +1,148 @@
|
||||
# Processors for Robots and Teleoperators
|
||||
|
||||
This guide shows how to build and modify processing pipelines that connect teleoperators (e.g., phone) to robots and datasets. Pipelines standardize conversions between different action/observation spaces so you can swap teleops and robots without rewriting glue code.
|
||||
|
||||
We use the Phone to SO‑100 follower examples for concreteness, but the same patterns apply to other robots.
|
||||
|
||||
**What you'll learn**
|
||||
|
||||
- Absolute vs. relative EE control: What each means, trade‑offs, and how to choose for your task.
|
||||
- Three-pipeline pattern: How to map teleop actions → dataset actions → robot commands, and robot observations → dataset observations.
|
||||
- Adapters (`to_transition` / `to_output`): How these convert raw dicts to `EnvTransition` and back to reduce boilerplate.
|
||||
- Dataset feature contracts: How steps declare features via `transform_features(...)`, and how to aggregate/merge them for recording.
|
||||
- Choosing a representation: When to store joints, absolute EE poses, or relative EE deltas—and how that affects training.
|
||||
- Pipeline customization guidance: How to swap robots/URDFs safely and tune bounds, step sizes, and options like IK initialization.
|
||||
|
||||
### Absolute vs relative EE control
|
||||
|
||||
The examples in this guide use absolute end effector (EE) poses because they are easy to reason about. In practice, relative EE deltas or joint position are often preferred as learning features.
|
||||
|
||||
You can choose what you save and learn from the teleop and robot action spaces, joints, absolute EE, or relative EE by using/implementing the right steps (and `transform_features()`) in your pipelines.
|
||||
|
||||
## Three pipelines
|
||||
|
||||
We often compose three pipelines. Depending on your setup, some can be empty if action and observation spaces already match.
|
||||
Each of these pipelines handle different conversions between different action and observation spaces. Below is a quick explanation of each pipeline.
|
||||
|
||||
1. Pipeline 1: Teleop action space → dataset action space (phone pose → EE targets)
|
||||
2. Pipeline 2: Dataset action space → robot command space (EE targets → joints)
|
||||
3. Pipeline 3: Robot observation space → dataset observation space (joints → EE pose)
|
||||
|
||||
Below is an example of the three pipelines that we use in the phone to SO-100 follower examples:
|
||||
|
||||
```69:90:examples/phone_so100_record.py
|
||||
phone_to_robot_ee_pose = RobotProcessor( # teleop -> dataset action
|
||||
steps=[MapPhoneActionToRobotAction(platform=teleop_config.phone_os),
|
||||
AddRobotObservationAsComplimentaryData(robot=robot),
|
||||
EEReferenceAndDelta(kinematics=kinematics_solver,
|
||||
end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5},
|
||||
motor_names=list(robot.bus.motors.keys())),
|
||||
EEBoundsAndSafety(end_effector_bounds={"min": [-1, -1, -1], "max": [1, 1, 1]},
|
||||
max_ee_step_m=0.20, max_ee_twist_step_rad=0.50)],
|
||||
to_transition=to_transition_teleop_action,
|
||||
to_output=lambda tr: tr,
|
||||
)
|
||||
|
||||
robot_ee_to_joints = RobotProcessor( # dataset action -> robot
|
||||
steps=[InverseKinematicsEEToJoints(kinematics=kinematics_solver,
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
initial_guess_current_joints=True),
|
||||
GripperVelocityToJoint(motor_names=list(robot.bus.motors.keys()), speed_factor=20.0)],
|
||||
to_transition=lambda tr: tr,
|
||||
to_output=to_output_robot_action,
|
||||
)
|
||||
|
||||
robot_joints_to_ee_pose = RobotProcessor( # robot obs -> dataset obs
|
||||
steps=[ForwardKinematicsJointsToEE(kinematics=kinematics_solver,
|
||||
motor_names=list(robot.bus.motors.keys()))],
|
||||
to_transition=to_transition_robot_observation,
|
||||
to_output=lambda tr: tr,
|
||||
)
|
||||
```
|
||||
|
||||
## Why to_transition / to_output
|
||||
|
||||
To convert from robot/teleoperator to pipeline and back, we use the `to_transition` and `to_output` pipeline adapters.
|
||||
They standardize conversions to reduce boilerplate code, and form the bridge between the robot and teleoperators raw dicts and the pipeline’s `EnvTransition` format.
|
||||
In the phone to SO-100 follower examples we use the following adapters:
|
||||
|
||||
- `to_transition_teleop_action`: transforms the teleop action dict to a pipeline transition (puts keys under `action.*`, converts scalars/arrays to tensors, keeps objects like `Rotation` intact)
|
||||
- `to_output_robot_action`: transforms the pipeline transition to a robot action dict (extracts keys ending with `.pos`/`.vel` and strips `action.` prefix)
|
||||
- `to_transition_robot_observation`: transforms the robot observation dict to a pipeline transition (splits state vs images; stores state under `observation.state.*` and images under `observation.images.*`)
|
||||
|
||||
See `src/lerobot/processor/converters.py` for more details.
|
||||
|
||||
## Dataset feature contracts
|
||||
|
||||
Dataset features are the keys saved in the dataset. Each step can declare what its dataset features are via `transform_features(...)`. We can then aggregate features per pipeline with `aggregate_pipeline_dataset_features()` and merge multiple groups with `merge_features(...)`.
|
||||
|
||||
Below is and example of how we declare features with the `transform_features` method in the phone to SO-100 follower examples:
|
||||
|
||||
```203:211:src/lerobot/robots/so100_follower/robot_kinematic_processor.py
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
# Because this is last step we specify the dataset features of this step that we want to be stored in the dataset
|
||||
features["action.ee.x"] = float
|
||||
features["action.ee.y"] = float
|
||||
features["action.ee.z"] = float
|
||||
features["action.ee.wx"] = float
|
||||
features["action.ee.wy"] = float
|
||||
features["action.ee.wz"] = float
|
||||
return features
|
||||
```
|
||||
|
||||
Tip: declare features at the last step that produces them (e.g., `EEBoundsAndSafety` declares `action.ee.*`, `ForwardKinematicsJointsToEE` declares `observation.state.ee.*`).
|
||||
|
||||
Below is an example of how we aggregate and merge features in the phone to SO-100 follower examples:
|
||||
|
||||
```121:145:examples/phone_so100_record.py
|
||||
action_ee = aggregate_pipeline_dataset_features(
|
||||
pipeline=phone_to_robot_ee_pose,
|
||||
initial_features=phone.action_features,
|
||||
use_videos=True,
|
||||
patterns=["action.ee"],
|
||||
)
|
||||
|
||||
gripper = aggregate_pipeline_dataset_features(
|
||||
pipeline=robot_ee_to_joints,
|
||||
initial_features={},
|
||||
use_videos=True,
|
||||
patterns=["action.gripper.pos", "observation.state.gripper.pos"],
|
||||
)
|
||||
|
||||
observation_ee = aggregate_pipeline_dataset_features(
|
||||
pipeline=robot_joints_to_ee_pose,
|
||||
initial_features=robot.observation_features,
|
||||
use_videos=True,
|
||||
patterns=["observation.state.ee"],
|
||||
)
|
||||
|
||||
dataset_features = merge_features(action_ee, gripper, observation_ee)
|
||||
```
|
||||
|
||||
How it works:
|
||||
|
||||
- `aggregate_pipeline_dataset_features(...)`: applies `transform_features` across the pipeline and filters by patterns (images included when `use_videos=True`).
|
||||
- `merge_features(...)`: combine multiple feature dicts.
|
||||
- Recording uses `to_dataset_frame(...)` to build frames consistent with `dataset.features` before we call `add_frame(...)` to add the frame to the dataset.
|
||||
|
||||
## Guidance when customizing robot pipelines
|
||||
|
||||
You can store any of the following features as your action/observation space:
|
||||
|
||||
- Joint positions
|
||||
- Absolute EE poses
|
||||
- Relative EE deltas
|
||||
- Other features: joint velocity, etc.
|
||||
|
||||
Pick what you want to use for your policy action and observation space and configure/modify the pipelines and steps accordingly.
|
||||
|
||||
### Different robots
|
||||
|
||||
- Swap `RobotKinematics` URDF and `motor_names`. Ensure `target_frame_name` points to your gripper/wrist.
|
||||
|
||||
### Safety first
|
||||
|
||||
- When changing pipelines, start with tight bounds, implement safety steps when working with real robots.
|
||||
- Its advised to start with simulation first and then move to real robots.
|
||||
|
||||
Hope this guide helps you get started with customizing your robot pipelines, If you run into any issues at any point, jump into our [Discord community](https://discord.com/invite/s3KuuzsPFb) for support.
|
||||
@@ -54,7 +54,7 @@ If you don't have a gpu device, you can train using our notebook on [.
|
||||
|
||||
```bash
|
||||
cd lerobot && lerobot-train \
|
||||
cd lerobot && python -m lerobot.scripts.train \
|
||||
--policy.path=lerobot/smolvla_base \
|
||||
--dataset.repo_id=${HF_USER}/mydataset \
|
||||
--batch_size=64 \
|
||||
@@ -73,7 +73,7 @@ cd lerobot && lerobot-train \
|
||||
Fine-tuning is an art. For a complete overview of the options for finetuning, run
|
||||
|
||||
```bash
|
||||
lerobot-train --help
|
||||
python -m lerobot.scripts.train --help
|
||||
```
|
||||
|
||||
<p align="center">
|
||||
@@ -97,7 +97,7 @@ Similarly for when recording an episode, it is recommended that you are logged i
|
||||
Once you are logged in, you can run inference in your setup by doing:
|
||||
|
||||
```bash
|
||||
lerobot-record \
|
||||
python -m lerobot.record \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/ttyACM0 \ # <- Use your port
|
||||
--robot.id=my_blue_follower_arm \ # <- Use your robot id
|
||||
|
||||
@@ -26,7 +26,7 @@ Unlike the SO-101, the motor connectors are not easily accessible once the arm i
|
||||
To find the port for each bus servo adapter, run this script:
|
||||
|
||||
```bash
|
||||
lerobot-find-port
|
||||
python -m lerobot.find_port
|
||||
```
|
||||
|
||||
<hfoptions id="example">
|
||||
@@ -93,7 +93,7 @@ For a visual reference on how to set the motor ids please refer to [this video](
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-setup-motors \
|
||||
python -m lerobot.setup_motors \
|
||||
--robot.type=so100_follower \
|
||||
--robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step
|
||||
```
|
||||
@@ -168,7 +168,7 @@ Do the same steps for the leader arm.
|
||||
<hfoptions id="setup_motors">
|
||||
<hfoption id="Command">
|
||||
```bash
|
||||
lerobot-setup-motors \
|
||||
python -m lerobot.setup_motors \
|
||||
--teleop.type=so100_leader \
|
||||
--teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step
|
||||
```
|
||||
@@ -568,7 +568,7 @@ Run the following command or API example to calibrate the follower arm:
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--robot.type=so100_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
|
||||
--robot.id=my_awesome_follower_arm # <- Give the robot a unique name
|
||||
@@ -606,7 +606,7 @@ Do the same steps to calibrate the leader arm, run the following command or API
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--teleop.type=so100_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
|
||||
--teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
|
||||
|
||||
@@ -162,7 +162,7 @@ It is advisable to install one 3-pin cable in the motor after placing them befor
|
||||
To find the port for each bus servo adapter, connect MotorBus to your computer via USB and power. Run the following script and disconnect the MotorBus when prompted:
|
||||
|
||||
```bash
|
||||
lerobot-find-port
|
||||
python -m lerobot.find_port
|
||||
```
|
||||
|
||||
<hfoptions id="example">
|
||||
@@ -240,7 +240,7 @@ Connect the usb cable from your computer and the power supply to the follower ar
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-setup-motors \
|
||||
python -m lerobot.setup_motors \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step
|
||||
```
|
||||
@@ -316,7 +316,7 @@ Do the same steps for the leader arm.
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-setup-motors \
|
||||
python -m lerobot.setup_motors \
|
||||
--teleop.type=so101_leader \
|
||||
--teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step
|
||||
```
|
||||
@@ -353,7 +353,7 @@ Run the following command or API example to calibrate the follower arm:
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
|
||||
--robot.id=my_awesome_follower_arm # <- Give the robot a unique name
|
||||
@@ -402,7 +402,7 @@ Do the same steps to calibrate the leader arm, run the following command or API
|
||||
<hfoption id="Command">
|
||||
|
||||
```bash
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--teleop.type=so101_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot
|
||||
--teleop.id=my_awesome_leader_arm # <- Give the robot a unique name
|
||||
|
||||
@@ -62,7 +62,7 @@ By default, every field takes its default value specified in the dataclass. If a
|
||||
Let's say that we want to train [Diffusion Policy](../src/lerobot/policies/diffusion) on the [pusht](https://huggingface.co/datasets/lerobot/pusht) dataset, using the [gym_pusht](https://github.com/huggingface/gym-pusht) environment for evaluation. The command to do so would look like this:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--dataset.repo_id=lerobot/pusht \
|
||||
--policy.type=diffusion \
|
||||
--env.type=pusht
|
||||
@@ -77,7 +77,7 @@ Let's break this down:
|
||||
Let's see another example. Let's say you've been training [ACT](../src/lerobot/policies/act) on [lerobot/aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) using the [gym-aloha](https://github.com/huggingface/gym-aloha) environment for evaluation with:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=act \
|
||||
--dataset.repo_id=lerobot/aloha_sim_insertion_human \
|
||||
--env.type=aloha \
|
||||
@@ -90,7 +90,7 @@ We now want to train a different policy for aloha on another task. We'll change
|
||||
Looking at the [`AlohaEnv`](../src/lerobot/envs/configs.py) config, the task is `"AlohaInsertion-v0"` by default, which corresponds to the task we trained on in the command above. The [gym-aloha](https://github.com/huggingface/gym-aloha?tab=readme-ov-file#description) environment also has the `AlohaTransferCube-v0` task which corresponds to this other task we want to train on. Putting this together, we can train this new policy on this different task using:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=act \
|
||||
--dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
--env.type=aloha \
|
||||
@@ -127,7 +127,7 @@ Now, let's assume that we want to reproduce the run just above. That run has pro
|
||||
We can then simply load the config values from this file using:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \
|
||||
--output_dir=outputs/train/act_aloha_transfer_2
|
||||
```
|
||||
@@ -137,7 +137,7 @@ lerobot-train \
|
||||
Similarly to Hydra, we can still override some parameters in the CLI if we want to, e.g.:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \
|
||||
--output_dir=outputs/train/act_aloha_transfer_2
|
||||
--policy.n_action_steps=80
|
||||
@@ -148,7 +148,7 @@ lerobot-train \
|
||||
`--config_path` can also accept the repo_id of a repo on the hub that contains a `train_config.json` file, e.g. running:
|
||||
|
||||
```bash
|
||||
lerobot-train --config_path=lerobot/diffusion_pusht
|
||||
python -m lerobot.scripts.train --config_path=lerobot/diffusion_pusht
|
||||
```
|
||||
|
||||
will start a training run with the same configuration used for training [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)
|
||||
@@ -160,7 +160,7 @@ Being able to resume a training run is important in case it crashed or aborted f
|
||||
Let's reuse the command from the previous run and add a few more options:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=act \
|
||||
--dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
--env.type=aloha \
|
||||
@@ -179,7 +179,7 @@ INFO 2025-01-24 16:10:56 ts/train.py:263 Checkpoint policy after step 100
|
||||
Now let's simulate a crash by killing the process (hit `ctrl`+`c`). We can then simply resume this run from the last checkpoint available with:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \
|
||||
--resume=true
|
||||
```
|
||||
@@ -190,7 +190,7 @@ Another reason for which you might want to resume a run is simply to extend trai
|
||||
You could double the number of steps of the previous run with:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \
|
||||
--resume=true \
|
||||
--steps=200000
|
||||
@@ -224,7 +224,7 @@ In addition to the features currently in Draccus, we've added a special `.path`
|
||||
For example, we could fine-tune a [policy pre-trained on the aloha transfer task](https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human) on the aloha insertion task. We can achieve this with:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.path=lerobot/act_aloha_sim_transfer_cube_human \
|
||||
--dataset.repo_id=lerobot/aloha_sim_insertion_human \
|
||||
--env.type=aloha \
|
||||
@@ -270,7 +270,7 @@ We'll summarize here the main use cases to remember from this tutorial.
|
||||
#### Train a policy from scratch – CLI
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=act \ # <- select 'act' policy
|
||||
--env.type=pusht \ # <- select 'pusht' environment
|
||||
--dataset.repo_id=lerobot/pusht # <- train on this dataset
|
||||
@@ -279,7 +279,7 @@ lerobot-train \
|
||||
#### Train a policy from scratch - config file + CLI
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--config_path=path/to/pretrained_model \ # <- can also be a repo_id
|
||||
--policy.n_action_steps=80 # <- you may still override values
|
||||
```
|
||||
@@ -287,7 +287,7 @@ lerobot-train \
|
||||
#### Resume/continue a training run
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--config_path=checkpoint/pretrained_model/ \
|
||||
--resume=true \
|
||||
--steps=200000 # <- you can change some training parameters
|
||||
@@ -296,7 +296,7 @@ lerobot-train \
|
||||
#### Fine-tuning
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.path=lerobot/act_aloha_sim_transfer_cube_human \ # <- can also be a local path to a checkpoint
|
||||
--dataset.repo_id=lerobot/aloha_sim_insertion_human \
|
||||
--env.type=aloha \
|
||||
|
||||
@@ -18,7 +18,7 @@ Replays the actions of an episode from a dataset on a robot.
|
||||
Example:
|
||||
|
||||
```shell
|
||||
lerobot-replay \
|
||||
python -m lerobot.replay \
|
||||
--robot.type=so100_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=black \
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.datasets.utils import hw_to_dataset_features
|
||||
from lerobot.policies.act.modeling_act import ACTPolicy
|
||||
from lerobot.policies.factory import make_processor
|
||||
from lerobot.record import record_loop
|
||||
from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
|
||||
from lerobot.utils.control_utils import init_keyboard_listener
|
||||
@@ -11,12 +12,14 @@ NUM_EPISODES = 2
|
||||
FPS = 30
|
||||
EPISODE_TIME_SEC = 60
|
||||
TASK_DESCRIPTION = "My task description"
|
||||
HF_MODEL_ID = "<hf_username>/<model_repo_id>"
|
||||
HF_DATASET_ID = "<hf_username>/<eval_dataset_repo_id>"
|
||||
|
||||
# Create the robot and teleoperator configurations
|
||||
robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
|
||||
robot = LeKiwiClient(robot_config)
|
||||
|
||||
policy = ACTPolicy.from_pretrained("<hf_username>/<policy_repo_id>")
|
||||
policy = ACTPolicy.from_pretrained(HF_MODEL_ID)
|
||||
|
||||
# Configure the dataset features
|
||||
action_features = hw_to_dataset_features(robot.action_features, "action")
|
||||
@@ -25,7 +28,7 @@ dataset_features = {**action_features, **obs_features}
|
||||
|
||||
# Create the dataset
|
||||
dataset = LeRobotDataset.create(
|
||||
repo_id="<hf_username>/<eval_dataset_repo_id>",
|
||||
repo_id=HF_DATASET_ID,
|
||||
fps=FPS,
|
||||
features=dataset_features,
|
||||
robot_type=robot.name,
|
||||
@@ -43,6 +46,12 @@ listener, events = init_keyboard_listener()
|
||||
if not robot.is_connected:
|
||||
raise ValueError("Robot is not connected!")
|
||||
|
||||
preprocessor, postprocessor = make_processor(
|
||||
policy_cfg=policy,
|
||||
pretrained_path=HF_MODEL_ID,
|
||||
dataset_stats=dataset.meta.stats,
|
||||
)
|
||||
|
||||
recorded_episodes = 0
|
||||
while recorded_episodes < NUM_EPISODES and not events["stop_recording"]:
|
||||
log_say(f"Running inference, recording eval episode {recorded_episodes} of {NUM_EPISODES}")
|
||||
@@ -53,6 +62,8 @@ while recorded_episodes < NUM_EPISODES and not events["stop_recording"]:
|
||||
events=events,
|
||||
fps=FPS,
|
||||
policy=policy,
|
||||
preprocessor=preprocessor,
|
||||
postprocessor=postprocessor,
|
||||
dataset=dataset,
|
||||
control_time_s=EPISODE_TIME_SEC,
|
||||
single_task=TASK_DESCRIPTION,
|
||||
|
||||
@@ -38,7 +38,7 @@ while True:
|
||||
keyboard_keys = keyboard.get_action()
|
||||
base_action = robot._from_keyboard_to_base_action(keyboard_keys)
|
||||
|
||||
log_rerun_data(observation, {**arm_action, **base_action})
|
||||
log_rerun_data(observation=observation, action={**arm_action, **base_action})
|
||||
|
||||
action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
|
||||
|
||||
|
||||
@@ -0,0 +1,158 @@
|
||||
# !/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
|
||||
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features
|
||||
from lerobot.datasets.utils import merge_features
|
||||
from lerobot.model.kinematics import RobotKinematics
|
||||
from lerobot.policies.act.modeling_act import ACTPolicy
|
||||
from lerobot.policies.factory import make_processor
|
||||
from lerobot.processor.converters import (
|
||||
to_output_robot_action,
|
||||
to_transition_robot_observation,
|
||||
)
|
||||
from lerobot.processor.pipeline import RobotProcessor
|
||||
from lerobot.record import record_loop
|
||||
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
|
||||
from lerobot.robots.so100_follower.robot_kinematic_processor import (
|
||||
AddRobotObservationAsComplimentaryData,
|
||||
ForwardKinematicsJointsToEE,
|
||||
InverseKinematicsEEToJoints,
|
||||
)
|
||||
from lerobot.robots.so100_follower.so100_follower import SO100Follower
|
||||
from lerobot.utils.control_utils import init_keyboard_listener
|
||||
from lerobot.utils.utils import log_say
|
||||
from lerobot.utils.visualization_utils import _init_rerun
|
||||
|
||||
NUM_EPISODES = 5
|
||||
FPS = 30
|
||||
EPISODE_TIME_SEC = 60
|
||||
TASK_DESCRIPTION = "My task description"
|
||||
HF_MODEL_ID = "<hf_username>/<model_repo_id>"
|
||||
HF_DATASET_ID = "<hf_username>/<dataset_repo_id>"
|
||||
|
||||
# Initialize the robot with degrees
|
||||
camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
|
||||
robot_config = SO100FollowerConfig(
|
||||
port="/dev/tty.usbmodem58760434471",
|
||||
id="my_awesome_follower_arm",
|
||||
cameras=camera_config,
|
||||
use_degrees=True,
|
||||
)
|
||||
|
||||
# Initialize the robot
|
||||
robot = SO100Follower(robot_config)
|
||||
|
||||
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
|
||||
kinematics_solver = RobotKinematics(
|
||||
urdf_path="./src/lerobot/teleoperators/sim/so101_new_calib.urdf",
|
||||
target_frame_name="gripper_frame_link",
|
||||
joint_names=list(robot.bus.motors.keys()),
|
||||
)
|
||||
|
||||
# Build pipeline to convert ee pose action to joint action
|
||||
robot_ee_to_joints = RobotProcessor(
|
||||
steps=[
|
||||
AddRobotObservationAsComplimentaryData(robot=robot),
|
||||
InverseKinematicsEEToJoints(
|
||||
kinematics=kinematics_solver,
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
initial_guess_current_joints=True,
|
||||
),
|
||||
],
|
||||
to_transition=lambda tr: tr,
|
||||
to_output=to_output_robot_action,
|
||||
)
|
||||
|
||||
# Build pipeline to convert joint observation to ee pose observation
|
||||
robot_joints_to_ee_pose = RobotProcessor(
|
||||
steps=[
|
||||
ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys()))
|
||||
],
|
||||
to_transition=to_transition_robot_observation,
|
||||
to_output=lambda tr: tr,
|
||||
)
|
||||
|
||||
# Build dataset action and gripper features
|
||||
action_ee_and_gripper = aggregate_pipeline_dataset_features(
|
||||
pipeline=robot_ee_to_joints,
|
||||
initial_features={},
|
||||
use_videos=True,
|
||||
patterns=["action.ee", "action.gripper.pos", "observation.state.gripper.pos"],
|
||||
) # Get all ee action features + gripper pos action features
|
||||
|
||||
# Build dataset observation features
|
||||
obs_ee = aggregate_pipeline_dataset_features(
|
||||
pipeline=robot_joints_to_ee_pose,
|
||||
initial_features=robot.observation_features,
|
||||
use_videos=True,
|
||||
patterns=["observation.state.ee"],
|
||||
) # Get all ee observation features
|
||||
|
||||
dataset_features = merge_features(obs_ee, action_ee_and_gripper)
|
||||
|
||||
print("All dataset features: ", dataset_features)
|
||||
|
||||
# Create the dataset
|
||||
dataset = LeRobotDataset.create(
|
||||
repo_id=HF_DATASET_ID,
|
||||
fps=FPS,
|
||||
features=dataset_features,
|
||||
robot_type=robot.name,
|
||||
use_videos=True,
|
||||
image_writer_threads=4,
|
||||
)
|
||||
|
||||
# Initialize the keyboard listener and rerun visualization
|
||||
_, events = init_keyboard_listener()
|
||||
_init_rerun(session_name="recording_phone")
|
||||
|
||||
# Connect the robot and teleoperator
|
||||
robot.connect()
|
||||
|
||||
episode_idx = 0
|
||||
|
||||
policy = ACTPolicy.from_pretrained(HF_MODEL_ID)
|
||||
preprocessor, postprocessor = make_processor(
|
||||
policy_cfg=policy,
|
||||
pretrained_path=HF_MODEL_ID,
|
||||
dataset_stats=dataset.meta.stats,
|
||||
)
|
||||
|
||||
for episode_idx in range(NUM_EPISODES):
|
||||
log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}")
|
||||
|
||||
record_loop(
|
||||
robot=robot,
|
||||
events=events,
|
||||
fps=FPS,
|
||||
policy=policy,
|
||||
preprocessor=preprocessor,
|
||||
postprocessor=postprocessor,
|
||||
dataset=dataset,
|
||||
control_time_s=EPISODE_TIME_SEC,
|
||||
single_task=TASK_DESCRIPTION,
|
||||
display_data=True,
|
||||
robot_action_processor=robot_ee_to_joints,
|
||||
robot_observation_processor=robot_joints_to_ee_pose,
|
||||
)
|
||||
dataset.save_episode()
|
||||
|
||||
# Clean up
|
||||
log_say("Stop recording")
|
||||
robot.disconnect()
|
||||
dataset.push_to_hub()
|
||||
@@ -0,0 +1,215 @@
|
||||
# !/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
|
||||
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features
|
||||
from lerobot.datasets.utils import merge_features
|
||||
from lerobot.model.kinematics import RobotKinematics
|
||||
from lerobot.processor.converters import (
|
||||
to_output_robot_action,
|
||||
to_transition_robot_observation,
|
||||
to_transition_teleop_action,
|
||||
)
|
||||
from lerobot.processor.pipeline import RobotProcessor
|
||||
from lerobot.record import record_loop
|
||||
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
|
||||
from lerobot.robots.so100_follower.robot_kinematic_processor import (
|
||||
AddRobotObservationAsComplimentaryData,
|
||||
EEBoundsAndSafety,
|
||||
EEReferenceAndDelta,
|
||||
ForwardKinematicsJointsToEE,
|
||||
GripperVelocityToJoint,
|
||||
InverseKinematicsEEToJoints,
|
||||
)
|
||||
from lerobot.robots.so100_follower.so100_follower import SO100Follower
|
||||
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS
|
||||
from lerobot.teleoperators.phone.phone import Phone
|
||||
from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction
|
||||
from lerobot.utils.control_utils import init_keyboard_listener
|
||||
from lerobot.utils.utils import log_say
|
||||
from lerobot.utils.visualization_utils import _init_rerun
|
||||
|
||||
NUM_EPISODES = 10
|
||||
FPS = 30
|
||||
EPISODE_TIME_SEC = 60
|
||||
RESET_TIME_SEC = 30
|
||||
TASK_DESCRIPTION = "My task description"
|
||||
HF_REPO_ID = "<hf_username>/<dataset_repo_id>"
|
||||
|
||||
# Initialize the robot and teleoperator
|
||||
camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
|
||||
robot_config = SO100FollowerConfig(
|
||||
port="/dev/tty.usbmodem58760434471",
|
||||
id="my_awesome_follower_arm",
|
||||
cameras=camera_config,
|
||||
use_degrees=True,
|
||||
)
|
||||
teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID
|
||||
|
||||
# Initialize the robot and teleoperator
|
||||
robot = SO100Follower(robot_config)
|
||||
phone = Phone(teleop_config)
|
||||
|
||||
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
|
||||
kinematics_solver = RobotKinematics(
|
||||
urdf_path="./src/lerobot/teleoperators/sim/so101_new_calib.urdf",
|
||||
target_frame_name="gripper_frame_link",
|
||||
joint_names=list(robot.bus.motors.keys()),
|
||||
)
|
||||
|
||||
# Build pipeline to convert phone action to ee pose action
|
||||
phone_to_robot_ee_pose = RobotProcessor(
|
||||
steps=[
|
||||
MapPhoneActionToRobotAction(platform=teleop_config.phone_os),
|
||||
AddRobotObservationAsComplimentaryData(robot=robot),
|
||||
EEReferenceAndDelta(
|
||||
kinematics=kinematics_solver,
|
||||
end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5},
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
),
|
||||
EEBoundsAndSafety(
|
||||
end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]},
|
||||
max_ee_step_m=0.20,
|
||||
max_ee_twist_step_rad=0.50,
|
||||
),
|
||||
],
|
||||
to_transition=to_transition_teleop_action,
|
||||
to_output=lambda tr: tr,
|
||||
)
|
||||
|
||||
# Build pipeline to convert ee pose action to joint action
|
||||
robot_ee_to_joints = RobotProcessor(
|
||||
steps=[
|
||||
InverseKinematicsEEToJoints(
|
||||
kinematics=kinematics_solver,
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
initial_guess_current_joints=True,
|
||||
),
|
||||
GripperVelocityToJoint(
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
speed_factor=20.0,
|
||||
),
|
||||
],
|
||||
to_transition=lambda tr: tr,
|
||||
to_output=to_output_robot_action,
|
||||
)
|
||||
|
||||
# Build pipeline to convert joint observation to ee pose observation
|
||||
robot_joints_to_ee_pose = RobotProcessor(
|
||||
steps=[
|
||||
ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys()))
|
||||
],
|
||||
to_transition=to_transition_robot_observation,
|
||||
to_output=lambda tr: tr,
|
||||
)
|
||||
|
||||
# Build dataset ee action features
|
||||
action_ee = aggregate_pipeline_dataset_features(
|
||||
pipeline=phone_to_robot_ee_pose,
|
||||
initial_features=phone.action_features,
|
||||
use_videos=True,
|
||||
patterns=["action.ee"],
|
||||
)
|
||||
|
||||
# Get gripper pos action features
|
||||
gripper = aggregate_pipeline_dataset_features(
|
||||
pipeline=robot_ee_to_joints,
|
||||
initial_features={},
|
||||
use_videos=True,
|
||||
patterns=["action.gripper.pos", "observation.state.gripper.pos"],
|
||||
)
|
||||
|
||||
# Build dataset ee observation features
|
||||
observation_ee = aggregate_pipeline_dataset_features(
|
||||
pipeline=robot_joints_to_ee_pose,
|
||||
initial_features=robot.observation_features,
|
||||
use_videos=True,
|
||||
patterns=["observation.state.ee"],
|
||||
)
|
||||
|
||||
dataset_features = merge_features(action_ee, gripper, observation_ee)
|
||||
|
||||
print("All dataset features: ", dataset_features)
|
||||
|
||||
# Create the dataset
|
||||
dataset = LeRobotDataset.create(
|
||||
repo_id=HF_REPO_ID,
|
||||
fps=FPS,
|
||||
features=dataset_features,
|
||||
robot_type=robot.name,
|
||||
use_videos=True,
|
||||
image_writer_threads=4,
|
||||
)
|
||||
|
||||
# Initialize the keyboard listener and rerun visualization
|
||||
_, events = init_keyboard_listener()
|
||||
_init_rerun(session_name="recording_phone")
|
||||
|
||||
# Connect the robot and teleoperator
|
||||
robot.connect()
|
||||
phone.connect()
|
||||
|
||||
episode_idx = 0
|
||||
while episode_idx < NUM_EPISODES and not events["stop_recording"]:
|
||||
log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}")
|
||||
|
||||
record_loop(
|
||||
robot=robot,
|
||||
events=events,
|
||||
fps=FPS,
|
||||
teleop=phone,
|
||||
dataset=dataset,
|
||||
control_time_s=EPISODE_TIME_SEC,
|
||||
single_task=TASK_DESCRIPTION,
|
||||
display_data=True,
|
||||
teleop_action_processor=phone_to_robot_ee_pose,
|
||||
robot_action_processor=robot_ee_to_joints,
|
||||
robot_observation_processor=robot_joints_to_ee_pose,
|
||||
)
|
||||
|
||||
# Reset the environment if not stopping or re-recording
|
||||
if not events["stop_recording"] and (episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]):
|
||||
log_say("Reset the environment")
|
||||
record_loop(
|
||||
robot=robot,
|
||||
events=events,
|
||||
fps=FPS,
|
||||
teleop=phone,
|
||||
control_time_s=RESET_TIME_SEC,
|
||||
single_task=TASK_DESCRIPTION,
|
||||
display_data=True,
|
||||
teleop_action_processor=phone_to_robot_ee_pose,
|
||||
robot_action_processor=robot_ee_to_joints,
|
||||
robot_observation_processor=robot_joints_to_ee_pose,
|
||||
)
|
||||
|
||||
if events["rerecord_episode"]:
|
||||
log_say("Re-recording episode")
|
||||
events["rerecord_episode"] = False
|
||||
events["exit_early"] = False
|
||||
dataset.clear_episode_buffer()
|
||||
continue
|
||||
|
||||
dataset.save_episode()
|
||||
episode_idx += 1
|
||||
|
||||
# Clean up
|
||||
log_say("Stop recording")
|
||||
robot.disconnect()
|
||||
phone.disconnect()
|
||||
dataset.push_to_hub()
|
||||
@@ -0,0 +1,106 @@
|
||||
# !/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.model.kinematics import RobotKinematics
|
||||
from lerobot.processor.converters import to_output_robot_action
|
||||
from lerobot.processor.pipeline import RobotProcessor
|
||||
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
|
||||
from lerobot.robots.so100_follower.robot_kinematic_processor import (
|
||||
AddRobotObservationAsComplimentaryData,
|
||||
InverseKinematicsEEToJoints,
|
||||
)
|
||||
from lerobot.robots.so100_follower.so100_follower import SO100Follower
|
||||
from lerobot.utils.robot_utils import busy_wait
|
||||
from lerobot.utils.utils import log_say
|
||||
|
||||
EPISODE_IDX = 0
|
||||
HF_REPO_ID = "<hf_username>/<dataset_repo_id>"
|
||||
|
||||
robot_config = SO100FollowerConfig(
|
||||
port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", use_degrees=True
|
||||
)
|
||||
robot = SO100Follower(robot_config)
|
||||
robot.connect()
|
||||
|
||||
dataset = LeRobotDataset(HF_REPO_ID, episodes=[EPISODE_IDX])
|
||||
actions = dataset.hf_dataset.select_columns("action")
|
||||
|
||||
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
|
||||
kinematics_solver = RobotKinematics(
|
||||
urdf_path="./src/lerobot/teleoperators/sim/so101_new_calib.urdf",
|
||||
target_frame_name="gripper_frame_link",
|
||||
joint_names=list(robot.bus.motors.keys()),
|
||||
)
|
||||
|
||||
|
||||
# This method converts the action from the dataset to a transition for pipeline
|
||||
def action_to_transition(action: dict):
|
||||
act = {}
|
||||
|
||||
# EE pose
|
||||
for k in ("ee.x", "ee.y", "ee.z", "ee.wx", "ee.wy", "ee.wz"):
|
||||
if k in action:
|
||||
act[f"action.{k}"] = float(action[k])
|
||||
|
||||
# Gripper: your dataset has absolute position
|
||||
if "gripper.pos" in action:
|
||||
act["action.gripper.pos"] = float(action["gripper.pos"])
|
||||
|
||||
return {
|
||||
"observation": None,
|
||||
"action": act,
|
||||
"reward": None,
|
||||
"done": False,
|
||||
"truncated": False,
|
||||
"info": {},
|
||||
"complementary_data": {},
|
||||
}
|
||||
|
||||
|
||||
# Build pipeline to convert ee pose action to joint action
|
||||
robot_ee_to_joints = RobotProcessor(
|
||||
steps=[
|
||||
AddRobotObservationAsComplimentaryData(robot=robot),
|
||||
InverseKinematicsEEToJoints(
|
||||
kinematics=kinematics_solver,
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
initial_guess_current_joints=False, # Because replay is open loop
|
||||
),
|
||||
],
|
||||
to_transition=action_to_transition,
|
||||
to_output=to_output_robot_action,
|
||||
)
|
||||
|
||||
robot_ee_to_joints.reset()
|
||||
|
||||
log_say(f"Replaying episode {EPISODE_IDX}")
|
||||
for idx in range(dataset.num_frames):
|
||||
t0 = time.perf_counter()
|
||||
|
||||
ee_action = {
|
||||
name: float(actions[idx]["action"][i]) for i, name in enumerate(dataset.features["action"]["names"])
|
||||
}
|
||||
|
||||
joint_action = robot_ee_to_joints(ee_action)
|
||||
action_sent = robot.send_action(joint_action)
|
||||
|
||||
busy_wait(1.0 / dataset.fps - (time.perf_counter() - t0))
|
||||
|
||||
robot.disconnect()
|
||||
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specif
|
||||
|
||||
import time
|
||||
|
||||
from lerobot.model.kinematics import RobotKinematics
|
||||
from lerobot.processor import RobotProcessor
|
||||
from lerobot.processor.converters import to_output_robot_action, to_transition_teleop_action
|
||||
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
|
||||
from lerobot.robots.so100_follower.robot_kinematic_processor import (
|
||||
AddRobotObservationAsComplimentaryData,
|
||||
EEBoundsAndSafety,
|
||||
EEReferenceAndDelta,
|
||||
GripperVelocityToJoint,
|
||||
InverseKinematicsEEToJoints,
|
||||
)
|
||||
from lerobot.robots.so100_follower.so100_follower import SO100Follower
|
||||
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS
|
||||
from lerobot.teleoperators.phone.phone import Phone
|
||||
from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction
|
||||
|
||||
# Initialize the robot and teleoperator
|
||||
robot_config = SO100FollowerConfig(
|
||||
port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", use_degrees=True
|
||||
)
|
||||
teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID
|
||||
|
||||
# Initialize the robot and teleoperator
|
||||
robot = SO100Follower(robot_config)
|
||||
teleop_device = Phone(teleop_config)
|
||||
|
||||
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
|
||||
kinematics_solver = RobotKinematics(
|
||||
urdf_path="./src/lerobot/teleoperators/sim/so101_new_calib.urdf",
|
||||
target_frame_name="gripper_frame_link",
|
||||
joint_names=list(robot.bus.motors.keys()),
|
||||
)
|
||||
|
||||
# Build pipeline to convert phone action to ee pose action
|
||||
phone_to_robot_ee_pose = RobotProcessor(
|
||||
steps=[
|
||||
MapPhoneActionToRobotAction(platform=teleop_config.phone_os),
|
||||
AddRobotObservationAsComplimentaryData(robot=robot),
|
||||
EEReferenceAndDelta(
|
||||
kinematics=kinematics_solver,
|
||||
end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5},
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
),
|
||||
EEBoundsAndSafety(
|
||||
end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]},
|
||||
max_ee_step_m=0.10,
|
||||
max_ee_twist_step_rad=0.50,
|
||||
),
|
||||
],
|
||||
to_transition=to_transition_teleop_action,
|
||||
to_output=lambda tr: tr,
|
||||
)
|
||||
|
||||
# Build pipeline to convert ee pose action to joint action
|
||||
robot_ee_to_joints = RobotProcessor(
|
||||
steps=[
|
||||
InverseKinematicsEEToJoints(
|
||||
kinematics=kinematics_solver,
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
),
|
||||
GripperVelocityToJoint(
|
||||
motor_names=list(robot.bus.motors.keys()),
|
||||
speed_factor=20.0,
|
||||
),
|
||||
],
|
||||
to_transition=lambda tr: tr,
|
||||
to_output=to_output_robot_action,
|
||||
)
|
||||
|
||||
robot.connect()
|
||||
teleop_device.connect()
|
||||
|
||||
print("Starting teleop loop. Move your phone to teleoperate the robot.")
|
||||
while True:
|
||||
phone_obs = teleop_device.get_action()
|
||||
if not phone_obs:
|
||||
time.sleep(0.01)
|
||||
continue
|
||||
|
||||
# Get teleop observation
|
||||
phone_obs = teleop_device.get_action()
|
||||
|
||||
# Phone to EE pose transition
|
||||
ee_transition = phone_to_robot_ee_pose(phone_obs)
|
||||
|
||||
# EE pose to Joints transition
|
||||
joint_action = robot_ee_to_joints(ee_transition)
|
||||
|
||||
if joint_action:
|
||||
robot.send_action(joint_action)
|
||||
|
||||
time.sleep(0.01)
|
||||
+5
-2
@@ -73,6 +73,7 @@ dependencies = [
|
||||
"pynput>=1.7.7",
|
||||
"pyserial>=3.5",
|
||||
"wandb>=0.20.0",
|
||||
"scipy>=1.15.2",
|
||||
|
||||
"torch>=2.2.1,<2.8.0", # TODO: Bumb dependency
|
||||
"torchcodec>=0.2.1,<0.6.0; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", # TODO: Bumb dependency
|
||||
@@ -95,7 +96,7 @@ dependencies = [
|
||||
# Common
|
||||
pygame-dep = ["pygame>=2.5.1"]
|
||||
placo-dep = ["placo>=0.9.6"]
|
||||
transformers-dep = ["transformers>=4.50.3,<4.52.0"] # TODO: Bumb dependency
|
||||
transformers-dep = ["transformers<=4.52.0"]
|
||||
grpcio-dep = ["grpcio==1.73.1", "protobuf==6.31.0"]
|
||||
|
||||
# Motors
|
||||
@@ -111,6 +112,7 @@ intelrealsense = [
|
||||
"pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'",
|
||||
"pyrealsense2-macosx>=2.54 ; sys_platform == 'darwin'",
|
||||
]
|
||||
phone = ["hebi-py>=2.8.0", "teleop>=0.1.0"]
|
||||
# stretch = [
|
||||
# "hello-robot-stretch-body>=0.7.27 ; sys_platform == 'linux'",
|
||||
# "pyrender @ git+https://github.com/mmatl/pyrender.git ; sys_platform == 'linux'",
|
||||
@@ -152,7 +154,8 @@ all = [
|
||||
"lerobot[video_benchmark]",
|
||||
"lerobot[aloha]",
|
||||
"lerobot[pusht]",
|
||||
"lerobot[xarm]"
|
||||
"lerobot[xarm]",
|
||||
"lerobot[phone]",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Convert video dataset to image dataset for faster training.
|
||||
This pre-extracts all frames from MP4 files to PNG images.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
import logging
|
||||
import shutil
|
||||
|
||||
def convert_dataset_videos_to_images(repo_id: str, root: str | None = None):
|
||||
"""Convert all videos in a LeRobot dataset to individual image files."""
|
||||
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.datasets.video_utils import decode_video_frames
|
||||
import torch
|
||||
|
||||
# Load dataset
|
||||
dataset = LeRobotDataset(repo_id, root=root, download_videos=True)
|
||||
|
||||
total_frames_processed = 0
|
||||
|
||||
for ep_idx in range(dataset.meta.total_episodes):
|
||||
logging.info(f"Processing episode {ep_idx}/{dataset.meta.total_episodes}")
|
||||
|
||||
for vid_key in dataset.meta.video_keys:
|
||||
video_path = dataset.root / dataset.meta.get_video_file_path(ep_idx, vid_key)
|
||||
|
||||
if not video_path.exists():
|
||||
logging.warning(f"Video not found: {video_path}")
|
||||
continue
|
||||
|
||||
# Create image directory
|
||||
img_dir = dataset.root / f"images/chunk-{dataset.meta.get_episode_chunk(ep_idx)}/{vid_key}"
|
||||
img_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Decode all frames from video
|
||||
# Get episode length to decode all frames
|
||||
ep_length = dataset.meta.episodes[ep_idx]["length"]
|
||||
timestamps = [i / dataset.fps for i in range(ep_length)]
|
||||
|
||||
try:
|
||||
frames = decode_video_frames(video_path, timestamps, dataset.tolerance_s, dataset.video_backend)
|
||||
|
||||
# Save each frame as PNG
|
||||
for i, frame in enumerate(frames.squeeze(0)):
|
||||
img_path = img_dir / f"episode_{ep_idx:06d}_{i:06d}.png"
|
||||
# Convert tensor to PIL and save
|
||||
import torchvision.transforms as T
|
||||
to_pil = T.ToPILImage()
|
||||
pil_frame = to_pil(frame)
|
||||
pil_frame.save(img_path)
|
||||
|
||||
total_frames_processed += len(frames.squeeze(0))
|
||||
logging.info(f" Extracted {len(frames.squeeze(0))} frames to {img_dir}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to process {video_path}: {e}")
|
||||
continue
|
||||
|
||||
logging.info(f"Conversion complete! Processed {total_frames_processed} total frames")
|
||||
logging.info(f"You can now use download_videos=False to use the extracted images")
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Convert LeRobot video dataset to images")
|
||||
parser.add_argument("repo_id", help="Dataset repo ID (e.g., 'kenmacken/record-test-2')")
|
||||
parser.add_argument("--root", help="Local root directory", default=None)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
convert_dataset_videos_to_images(args.repo_id, args.root)
|
||||
@@ -18,7 +18,7 @@ Helper to recalibrate your device (robot or teleoperator).
|
||||
Example:
|
||||
|
||||
```shell
|
||||
lerobot-calibrate \
|
||||
python -m lerobot.calibrate \
|
||||
--teleop.type=so100_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \
|
||||
--teleop.id=blue
|
||||
|
||||
@@ -60,7 +60,7 @@ class OpenCVCamera(Camera):
|
||||
or port changes, especially on Linux. Use the provided utility script to find
|
||||
available camera indices or paths:
|
||||
```bash
|
||||
lerobot-find-cameras opencv
|
||||
python -m lerobot.find_cameras opencv
|
||||
```
|
||||
|
||||
The camera's default settings (FPS, resolution, color mode) are used unless
|
||||
@@ -165,7 +165,8 @@ class OpenCVCamera(Camera):
|
||||
self.videocapture.release()
|
||||
self.videocapture = None
|
||||
raise ConnectionError(
|
||||
f"Failed to open {self}.Run `lerobot-find-cameras opencv` to find available cameras."
|
||||
f"Failed to open {self}."
|
||||
f"Run `python -m lerobot.find_cameras opencv` to find available cameras."
|
||||
)
|
||||
|
||||
self._configure_capture_settings()
|
||||
|
||||
@@ -51,7 +51,7 @@ class RealSenseCamera(Camera):
|
||||
|
||||
Use the provided utility script to find available camera indices and default profiles:
|
||||
```bash
|
||||
lerobot-find-cameras realsense
|
||||
python -m lerobot.find_cameras realsense
|
||||
```
|
||||
|
||||
A `RealSenseCamera` instance requires a configuration object specifying the
|
||||
@@ -176,7 +176,8 @@ class RealSenseCamera(Camera):
|
||||
self.rs_profile = None
|
||||
self.rs_pipeline = None
|
||||
raise ConnectionError(
|
||||
f"Failed to open {self}.Run `lerobot-find-cameras realsense` to find available cameras."
|
||||
f"Failed to open {self}."
|
||||
"Run `python -m lerobot.find_cameras realsense` to find available cameras."
|
||||
) from e
|
||||
|
||||
self._configure_capture_settings()
|
||||
|
||||
@@ -33,6 +33,8 @@ class DatasetConfig:
|
||||
# Root directory where the dataset will be stored (e.g. 'dataset/path').
|
||||
root: str | None = None
|
||||
episodes: list[int] | None = None
|
||||
# Percentage of dataset to use (0-100). If set, overrides episodes parameter.
|
||||
percentage: float | None = None
|
||||
image_transforms: ImageTransformsConfig = field(default_factory=ImageTransformsConfig)
|
||||
revision: str | None = None
|
||||
use_imagenet_stats: bool = True
|
||||
|
||||
@@ -26,7 +26,7 @@ from huggingface_hub import hf_hub_download
|
||||
from huggingface_hub.constants import CONFIG_NAME
|
||||
from huggingface_hub.errors import HfHubHTTPError
|
||||
|
||||
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
||||
from lerobot.configs.types import FeatureType, PolicyFeature
|
||||
from lerobot.constants import ACTION, OBS_STATE
|
||||
from lerobot.optim.optimizers import OptimizerConfig
|
||||
from lerobot.optim.schedulers import LRSchedulerConfig
|
||||
@@ -53,7 +53,6 @@ class PreTrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC):
|
||||
"""
|
||||
|
||||
n_obs_steps: int = 1
|
||||
normalization_mapping: dict[str, NormalizationMode] = field(default_factory=dict)
|
||||
|
||||
input_features: dict[str, PolicyFeature] = field(default_factory=dict)
|
||||
output_features: dict[str, PolicyFeature] = field(default_factory=dict)
|
||||
|
||||
@@ -24,6 +24,7 @@ class FeatureType(str, Enum):
|
||||
ENV = "ENV"
|
||||
ACTION = "ACTION"
|
||||
REWARD = "REWARD"
|
||||
LANGUAGE = "LANGUAGE"
|
||||
|
||||
|
||||
class NormalizationMode(str, Enum):
|
||||
|
||||
@@ -21,6 +21,7 @@ OBS_ENV_STATE = "observation.environment_state"
|
||||
OBS_STATE = "observation.state"
|
||||
OBS_IMAGE = "observation.image"
|
||||
OBS_IMAGES = "observation.images"
|
||||
OBS_LANGUAGE = "observation.language"
|
||||
ACTION = "action"
|
||||
REWARD = "next.reward"
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from pprint import pformat
|
||||
|
||||
import torch
|
||||
@@ -87,10 +86,24 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas
|
||||
cfg.dataset.repo_id, root=cfg.dataset.root, revision=cfg.dataset.revision
|
||||
)
|
||||
delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta)
|
||||
|
||||
# Handle percentage parameter
|
||||
episodes = cfg.dataset.episodes
|
||||
if cfg.dataset.percentage is not None:
|
||||
# Calculate episodes based on percentage
|
||||
total_episodes = ds_meta.total_episodes
|
||||
num_episodes_to_use = max(1, int(total_episodes * cfg.dataset.percentage / 100))
|
||||
episodes = list(range(num_episodes_to_use))
|
||||
import logging
|
||||
|
||||
logging.info(
|
||||
f"Using {cfg.dataset.percentage}% of dataset: {num_episodes_to_use}/{total_episodes} episodes"
|
||||
)
|
||||
|
||||
dataset = LeRobotDataset(
|
||||
cfg.dataset.repo_id,
|
||||
root=cfg.dataset.root,
|
||||
episodes=cfg.dataset.episodes,
|
||||
episodes=episodes,
|
||||
delta_timestamps=delta_timestamps,
|
||||
image_transforms=image_transforms,
|
||||
revision=cfg.dataset.revision,
|
||||
|
||||
@@ -825,8 +825,6 @@ class LeRobotDataset(torch.utils.data.Dataset):
|
||||
"""
|
||||
if not episode_data:
|
||||
episode_buffer = self.episode_buffer
|
||||
else:
|
||||
episode_buffer = episode_data
|
||||
|
||||
validate_episode_buffer(episode_buffer, self.meta.total_episodes, self.features)
|
||||
|
||||
|
||||
@@ -0,0 +1,94 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import Any
|
||||
|
||||
from lerobot.datasets.utils import hw_to_dataset_features
|
||||
from lerobot.processor.pipeline import RobotProcessor
|
||||
|
||||
|
||||
def aggregate_pipeline_dataset_features(
|
||||
pipeline: RobotProcessor,
|
||||
initial_features: dict[str, Any],
|
||||
*,
|
||||
use_videos: bool = True,
|
||||
patterns: Sequence[str] | None = None,
|
||||
) -> dict[str, dict]:
|
||||
"""
|
||||
Aggregates the pipeline's features and returns a features dict ready for the dataset,
|
||||
filtered to only those keys matching any of the given patterns (for action/state only).
|
||||
|
||||
- `initial_features`: raw camera specs, e.g. {"front": (h,w,c), ...}
|
||||
- `use_videos`: whether to treat image features as video streams
|
||||
- `patterns`: regexes to filter action & state features; images are included
|
||||
whenever use_videos=True, regardless of patterns.
|
||||
"""
|
||||
import re
|
||||
|
||||
# Gather everything the pipeline features specifies, seeded with hardware cams:
|
||||
all_features = pipeline.transform_features(initial_features)
|
||||
|
||||
# Helper to decide which action/state keys survive the `patterns` filter:
|
||||
def keep(key: str) -> bool:
|
||||
if patterns is None:
|
||||
return True
|
||||
return any(re.search(pat, key) for pat in patterns)
|
||||
|
||||
# Start with hardware dict, injecting initial cameras if videos are ON:
|
||||
hw: dict[str, dict[str, Any]] = {}
|
||||
if use_videos:
|
||||
cams = {
|
||||
name: shape
|
||||
for name, shape in initial_features.items()
|
||||
if isinstance(shape, tuple) and len(shape) == 3
|
||||
}
|
||||
if cams:
|
||||
hw["observation"] = dict(cams)
|
||||
|
||||
# Go over every feature from the pipeline and merge:
|
||||
for full_key, ty in all_features.items():
|
||||
if full_key.startswith("action."):
|
||||
# action.<feat>
|
||||
if not keep(full_key):
|
||||
continue
|
||||
name = full_key[len("action.") :]
|
||||
hw.setdefault("action", {})[name] = ty
|
||||
|
||||
elif full_key.startswith("observation.state."):
|
||||
# observation.state.<feat>
|
||||
if not keep(full_key):
|
||||
continue
|
||||
name = full_key[len("observation.state.") :]
|
||||
hw.setdefault("observation", {})[name] = ty
|
||||
|
||||
elif full_key.startswith("observation.images."):
|
||||
# observation.images.<cam>
|
||||
# images obey ONLY the use_videos flag, not patterns
|
||||
if not use_videos:
|
||||
continue
|
||||
name = full_key[len("observation.images.") :]
|
||||
hw.setdefault("observation", {})[name] = ty
|
||||
|
||||
else:
|
||||
# anything else (e.g. policy-only features) is ignored here
|
||||
continue
|
||||
|
||||
out: dict[str, dict] = {}
|
||||
if "action" in hw:
|
||||
out.update(hw_to_dataset_features(hw["action"], "action", use_videos))
|
||||
if "observation" in hw:
|
||||
out.update(hw_to_dataset_features(hw["observation"], "observation", use_videos))
|
||||
|
||||
return out
|
||||
@@ -470,6 +470,50 @@ def dataset_to_policy_features(features: dict[str, dict]) -> dict[str, PolicyFea
|
||||
return policy_features
|
||||
|
||||
|
||||
def merge_features(*dicts: dict) -> dict:
|
||||
"""
|
||||
Merge LeRobot grouped feature dicts.
|
||||
|
||||
- For 1D numeric specs (dtype not image/video/string) with "names": we merge the names and recompute the shape.
|
||||
- For others (observation.images.*), last one wins (if they are identical).
|
||||
"""
|
||||
out: dict = {}
|
||||
for d in dicts:
|
||||
for key, value in d.items():
|
||||
if not isinstance(value, dict):
|
||||
out[key] = value
|
||||
continue
|
||||
|
||||
dtype = value.get("dtype")
|
||||
shape = value.get("shape")
|
||||
is_vector = (
|
||||
dtype not in ("image", "video", "string")
|
||||
and isinstance(shape, tuple)
|
||||
and len(shape) == 1
|
||||
and "names" in value
|
||||
)
|
||||
|
||||
if is_vector:
|
||||
# Initialize or retrieve the accumulating dict for this feature key
|
||||
target = out.setdefault(key, {"dtype": dtype, "names": [], "shape": (0,)})
|
||||
# Ensure consistent data types across merged entries
|
||||
if "dtype" in target and dtype != target["dtype"]:
|
||||
raise ValueError(f"dtype mismatch for '{key}': {target['dtype']} vs {dtype}")
|
||||
|
||||
# Merge feature names: append only new ones to preserve order without duplicates
|
||||
seen = set(target["names"])
|
||||
for n in value["names"]:
|
||||
if n not in seen:
|
||||
target["names"].append(n)
|
||||
seen.add(n)
|
||||
# Recompute the shape to reflect the updated number of features
|
||||
target["shape"] = (len(target["names"]),)
|
||||
else:
|
||||
# For images/videos and non-1D entries: override with the latest definition
|
||||
out[key] = value
|
||||
return out
|
||||
|
||||
|
||||
def create_empty_dataset_info(
|
||||
codebase_version: str,
|
||||
fps: int,
|
||||
|
||||
@@ -13,20 +13,24 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.0 to
|
||||
2.1. It will:
|
||||
This script converts a LeRobot dataset already pushed to the Hub from codebase version 2.0 to 2.1.
|
||||
It downloads metadata from a SOURCE dataset repo, computes/validates per-episode stats, updates
|
||||
the codebase version in `info.json`, and uploads the result to a DESTINATION dataset repo.
|
||||
It will:
|
||||
|
||||
- Generate per-episodes stats and writes them in `episodes_stats.jsonl`
|
||||
- Check consistency between these new stats and the old ones.
|
||||
- Remove the deprecated `stats.json`.
|
||||
- Update codebase_version in `info.json`.
|
||||
- Push this new version to the hub on the 'main' branch and tags it with "v2.1".
|
||||
- Push this new version to the destination repo/branch and tag it with the current codebase version.
|
||||
|
||||
Usage:
|
||||
|
||||
```bash
|
||||
python -m lerobot.datasets.v21.convert_dataset_v20_to_v21 \
|
||||
--repo-id=aliberts/koch_tutorial
|
||||
--source-repo-id=namespace/source_dataset \
|
||||
--dest-repo-id=namespace/destination_dataset \
|
||||
--branch=main
|
||||
```
|
||||
|
||||
"""
|
||||
@@ -54,48 +58,67 @@ class SuppressWarnings:
|
||||
|
||||
|
||||
def convert_dataset(
|
||||
repo_id: str,
|
||||
source_repo_id: str,
|
||||
dest_repo_id: str,
|
||||
branch: str | None = None,
|
||||
num_workers: int = 4,
|
||||
):
|
||||
# Download metadata from the source repo at v2.0
|
||||
with SuppressWarnings():
|
||||
dataset = LeRobotDataset(repo_id, revision=V20, force_cache_sync=True)
|
||||
dataset = LeRobotDataset(source_repo_id, revision=V20, force_cache_sync=True)
|
||||
|
||||
# Ensure we recompute fresh episodes stats
|
||||
if (dataset.root / EPISODES_STATS_PATH).is_file():
|
||||
(dataset.root / EPISODES_STATS_PATH).unlink()
|
||||
|
||||
# Compute and validate stats
|
||||
convert_stats(dataset, num_workers=num_workers)
|
||||
ref_stats = load_stats(dataset.root)
|
||||
check_aggregate_stats(dataset, ref_stats)
|
||||
|
||||
# Update codebase version in info.json
|
||||
dataset.meta.info["codebase_version"] = CODEBASE_VERSION
|
||||
write_info(dataset.meta.info, dataset.root)
|
||||
|
||||
dataset.push_to_hub(branch=branch, tag_version=False, allow_patterns="meta/")
|
||||
|
||||
# delete old stats.json file
|
||||
if (dataset.root / STATS_PATH).is_file:
|
||||
# Remove deprecated stats.json locally so it won't be uploaded
|
||||
if (dataset.root / STATS_PATH).is_file():
|
||||
(dataset.root / STATS_PATH).unlink()
|
||||
|
||||
# Push only meta/ to destination repo
|
||||
hub_api = HfApi()
|
||||
if hub_api.file_exists(
|
||||
repo_id=dataset.repo_id, filename=STATS_PATH, revision=branch, repo_type="dataset"
|
||||
):
|
||||
hub_api.delete_file(
|
||||
path_in_repo=STATS_PATH, repo_id=dataset.repo_id, revision=branch, repo_type="dataset"
|
||||
)
|
||||
hub_api.create_repo(repo_id=dest_repo_id, private=False, repo_type="dataset", exist_ok=True)
|
||||
if branch:
|
||||
hub_api.create_branch(repo_id=dest_repo_id, branch=branch, repo_type="dataset", exist_ok=True)
|
||||
|
||||
hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
|
||||
hub_api.upload_folder(
|
||||
repo_id=dest_repo_id,
|
||||
folder_path=str(dataset.root),
|
||||
repo_type="dataset",
|
||||
revision=branch,
|
||||
allow_patterns="meta/",
|
||||
)
|
||||
|
||||
# Ensure old stats.json is deleted on destination
|
||||
if hub_api.file_exists(repo_id=dest_repo_id, filename=STATS_PATH, revision=branch, repo_type="dataset"):
|
||||
hub_api.delete_file(path_in_repo=STATS_PATH, repo_id=dest_repo_id, revision=branch, repo_type="dataset")
|
||||
|
||||
# Tag destination with current codebase version
|
||||
hub_api.create_tag(dest_repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--repo-id",
|
||||
"--source-repo-id",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset "
|
||||
"(e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).",
|
||||
help="Source dataset repo id to download from (must be v2.0).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dest-repo-id",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Destination dataset repo id to upload the converted metadata to.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--branch",
|
||||
|
||||
@@ -20,7 +20,7 @@ Helper to find the camera devices available in your system.
|
||||
Example:
|
||||
|
||||
```shell
|
||||
lerobot-find-cameras
|
||||
python -m lerobot.find_cameras
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ Helper to find the USB port associated with your MotorsBus.
|
||||
Example:
|
||||
|
||||
```shell
|
||||
lerobot-find-port
|
||||
python -m lerobot.find_port
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
@@ -107,8 +107,6 @@ X_SERIES_ENCODINGS_TABLE = {
|
||||
"Goal_PWM": X_SERIES_CONTROL_TABLE["Goal_PWM"][1],
|
||||
"Goal_Current": X_SERIES_CONTROL_TABLE["Goal_Current"][1],
|
||||
"Goal_Velocity": X_SERIES_CONTROL_TABLE["Goal_Velocity"][1],
|
||||
"Goal_Position": X_SERIES_CONTROL_TABLE["Goal_Position"][1],
|
||||
"Present_Position": X_SERIES_CONTROL_TABLE["Present_Position"][1],
|
||||
"Present_PWM": X_SERIES_CONTROL_TABLE["Present_PWM"][1],
|
||||
"Present_Current": X_SERIES_CONTROL_TABLE["Present_Current"][1],
|
||||
"Present_Velocity": X_SERIES_CONTROL_TABLE["Present_Velocity"][1],
|
||||
|
||||
@@ -222,7 +222,7 @@ class MotorsBus(abc.ABC):
|
||||
A MotorsBus subclass instance requires a port (e.g. `FeetechMotorsBus(port="/dev/tty.usbmodem575E0031751"`)).
|
||||
To find the port, you can run our utility script:
|
||||
```bash
|
||||
lerobot-find-port.py
|
||||
python -m lerobot.find_port.py
|
||||
>>> Finding all available ports for the MotorsBus.
|
||||
>>> ["/dev/tty.usbmodem575E0032081", "/dev/tty.usbmodem575E0031751"]
|
||||
>>> Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
@@ -446,7 +446,7 @@ class MotorsBus(abc.ABC):
|
||||
except (FileNotFoundError, OSError, serial.SerialException) as e:
|
||||
raise ConnectionError(
|
||||
f"\nCould not connect on port '{self.port}'. Make sure you are using the correct port."
|
||||
"\nTry running `lerobot-find-port`\n"
|
||||
"\nTry running `python -m lerobot.find_port`\n"
|
||||
) from e
|
||||
|
||||
@abc.abstractmethod
|
||||
|
||||
@@ -15,6 +15,19 @@
|
||||
from .act.configuration_act import ACTConfig as ACTConfig
|
||||
from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig
|
||||
from .pi0.configuration_pi0 import PI0Config as PI0Config
|
||||
from .pi0.processor_pi0 import Pi0NewLineProcessor
|
||||
from .rlearn.configuration_rlearn import RLearNConfig as RLearNConfig
|
||||
from .smolvla.configuration_smolvla import SmolVLAConfig as SmolVLAConfig
|
||||
from .smolvla.processor_smolvla import SmolVLANewLineProcessor
|
||||
from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig
|
||||
from .vqbet.configuration_vqbet import VQBeTConfig as VQBeTConfig
|
||||
|
||||
__all__ = [
|
||||
"ACTConfig",
|
||||
"DiffusionConfig",
|
||||
"PI0Config",
|
||||
"SmolVLAConfig",
|
||||
"TDMPCConfig",
|
||||
"VQBeTConfig",
|
||||
"RLearNConfig",
|
||||
]
|
||||
|
||||
@@ -35,7 +35,6 @@ from torchvision.ops.misc import FrozenBatchNorm2d
|
||||
|
||||
from lerobot.constants import ACTION, OBS_IMAGES
|
||||
from lerobot.policies.act.configuration_act import ACTConfig
|
||||
from lerobot.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
|
||||
|
||||
@@ -51,27 +50,16 @@ class ACTPolicy(PreTrainedPolicy):
|
||||
def __init__(
|
||||
self,
|
||||
config: ACTConfig,
|
||||
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
config: Policy configuration class instance or None, in which case the default instantiation of
|
||||
the configuration class is used.
|
||||
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
|
||||
that they will be passed with a call to `load_state_dict` before the policy is used.
|
||||
"""
|
||||
super().__init__(config)
|
||||
config.validate_features()
|
||||
self.config = config
|
||||
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
self.model = ACT(config)
|
||||
|
||||
if config.temporal_ensemble_coeff is not None:
|
||||
@@ -137,23 +125,19 @@ class ACTPolicy(PreTrainedPolicy):
|
||||
"""Predict a chunk of actions given environment observations."""
|
||||
self.eval()
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
if self.config.image_features:
|
||||
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
|
||||
batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features]
|
||||
|
||||
actions = self.model(batch)[0]
|
||||
actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
|
||||
return actions
|
||||
|
||||
def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]:
|
||||
"""Run the batch through the model and compute the loss for training or validation."""
|
||||
batch = self.normalize_inputs(batch)
|
||||
if self.config.image_features:
|
||||
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
|
||||
batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features]
|
||||
|
||||
batch = self.normalize_targets(batch)
|
||||
actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch)
|
||||
|
||||
l1_loss = (
|
||||
@@ -303,7 +287,7 @@ class ACT(nn.Module):
|
||||
└───────────────────────┘
|
||||
"""
|
||||
|
||||
def __init__(self, config: ACTConfig):
|
||||
def __init__(self, config: ACTConfig, dataset_stats=None):
|
||||
# BERT style VAE encoder with input tokens [cls, robot_state, *action_sequence].
|
||||
# The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]).
|
||||
super().__init__()
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
from lerobot.policies.act.configuration_act import ACTConfig
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RenameProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
|
||||
|
||||
def make_act_processor(
|
||||
config: ACTConfig, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
input_steps = [
|
||||
RenameProcessor(rename_map={}),
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
output_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
@@ -35,7 +35,6 @@ from torch import Tensor, nn
|
||||
|
||||
from lerobot.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE
|
||||
from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
|
||||
from lerobot.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.utils import (
|
||||
get_device_from_parameters,
|
||||
@@ -57,7 +56,6 @@ class DiffusionPolicy(PreTrainedPolicy):
|
||||
def __init__(
|
||||
self,
|
||||
config: DiffusionConfig,
|
||||
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
@@ -70,14 +68,6 @@ class DiffusionPolicy(PreTrainedPolicy):
|
||||
config.validate_features()
|
||||
self.config = config
|
||||
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
# queues are populated during rollout of the policy, they contain the n latest observations and actions
|
||||
self._queues = None
|
||||
|
||||
@@ -106,9 +96,6 @@ class DiffusionPolicy(PreTrainedPolicy):
|
||||
batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues}
|
||||
actions = self.diffusion.generate_actions(batch)
|
||||
|
||||
# TODO(rcadene): make above methods return output dictionary?
|
||||
actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
|
||||
|
||||
return actions
|
||||
|
||||
@torch.no_grad()
|
||||
@@ -137,7 +124,6 @@ class DiffusionPolicy(PreTrainedPolicy):
|
||||
if ACTION in batch:
|
||||
batch.pop(ACTION)
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
if self.config.image_features:
|
||||
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
|
||||
batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
|
||||
@@ -153,11 +139,9 @@ class DiffusionPolicy(PreTrainedPolicy):
|
||||
|
||||
def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, None]:
|
||||
"""Run the batch through the model and compute the loss for training or validation."""
|
||||
batch = self.normalize_inputs(batch)
|
||||
if self.config.image_features:
|
||||
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
|
||||
batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
|
||||
batch = self.normalize_targets(batch)
|
||||
loss = self.diffusion.compute_loss(batch)
|
||||
# no output_dict so returning None
|
||||
return loss, None
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 Columbia Artificial Intelligence, Robotics Lab,
|
||||
# and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RenameProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
|
||||
|
||||
def make_diffusion_processor(
|
||||
config: DiffusionConfig, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
input_steps = [
|
||||
RenameProcessor(rename_map={}),
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
output_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
@@ -14,9 +14,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, TypedDict, cast
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from lerobot.configs.policies import PreTrainedConfig
|
||||
from lerobot.configs.types import FeatureType
|
||||
@@ -29,14 +34,16 @@ from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
|
||||
from lerobot.policies.pi0.configuration_pi0 import PI0Config
|
||||
from lerobot.policies.pi0fast.configuration_pi0fast import PI0FASTConfig
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.rlearn.configuration_rlearn import RLearNConfig
|
||||
from lerobot.policies.sac.configuration_sac import SACConfig
|
||||
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
|
||||
from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig
|
||||
from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig
|
||||
from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig
|
||||
from lerobot.processor.pipeline import RobotProcessor
|
||||
|
||||
|
||||
def get_policy_class(name: str) -> PreTrainedPolicy:
|
||||
def get_policy_class(name: str) -> type[PreTrainedPolicy]:
|
||||
"""Get the policy's class and config class given a name (matching the policy class' `name` attribute)."""
|
||||
if name == "tdmpc":
|
||||
from lerobot.policies.tdmpc.modeling_tdmpc import TDMPCPolicy
|
||||
@@ -74,6 +81,10 @@ def get_policy_class(name: str) -> PreTrainedPolicy:
|
||||
from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy
|
||||
|
||||
return SmolVLAPolicy
|
||||
elif name == "rlearn":
|
||||
from lerobot.policies.rlearn.modeling_rlearn import RLearNPolicy
|
||||
|
||||
return RLearNPolicy
|
||||
else:
|
||||
raise NotImplementedError(f"Policy with name {name} is not implemented.")
|
||||
|
||||
@@ -97,14 +108,143 @@ def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig:
|
||||
return SmolVLAConfig(**kwargs)
|
||||
elif policy_type == "reward_classifier":
|
||||
return RewardClassifierConfig(**kwargs)
|
||||
elif policy_type == "rlearn":
|
||||
return RLearNConfig(**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Policy type '{policy_type}' is not available.")
|
||||
|
||||
|
||||
class ProcessorConfigKwargs(TypedDict, total=False):
|
||||
"""Keyword arguments for the processor config."""
|
||||
|
||||
preprocessor_config_filename: str | None
|
||||
postprocessor_config_filename: str | None
|
||||
preprocessor_overrides: dict[str, Any] | None
|
||||
postprocessor_overrides: dict[str, Any] | None
|
||||
dataset_stats: dict[str, dict[str, torch.Tensor]] | None
|
||||
|
||||
|
||||
def make_processor(
|
||||
policy_cfg: PreTrainedConfig,
|
||||
pretrained_path: str | None = None,
|
||||
**kwargs: Unpack[ProcessorConfigKwargs],
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
"""Make a processor instance for a given policy type.
|
||||
|
||||
This function creates the appropriate processor configuration based on the policy type.
|
||||
Each policy type has its own processor with specific preprocessing steps.
|
||||
|
||||
Args:
|
||||
policy_cfg: The config of the policy to create a processor for (e.g., "act", "diffusion", etc.)
|
||||
pretrained_path: Optional path to load a pretrained processor from. If provided, loads
|
||||
the processor from this path instead of creating a new one.
|
||||
**kwargs: Additional keyword arguments passed to the processor creation.
|
||||
|
||||
Returns:
|
||||
Tuple of (input_processor, output_processor) for the policy.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the policy type doesn't have a processor implemented.
|
||||
"""
|
||||
if pretrained_path:
|
||||
# Load a pretrained processor
|
||||
# TODO(azouitine): Handle this case.
|
||||
return (
|
||||
RobotProcessor.from_pretrained(
|
||||
pretrained_model_name_or_path=pretrained_path,
|
||||
config_filename=kwargs.get("preprocessor_config_filename", "robot_preprocessor.json"),
|
||||
overrides=kwargs.get("preprocessor_overrides", {}),
|
||||
),
|
||||
RobotProcessor.from_pretrained(
|
||||
pretrained_model_name_or_path=pretrained_path,
|
||||
config_filename=kwargs.get("postprocessor_config_filename", "robot_postprocessor.json"),
|
||||
overrides=kwargs.get("postprocessor_overrides", {}),
|
||||
),
|
||||
)
|
||||
|
||||
# Create a new processor based on policy type
|
||||
if policy_cfg.type == "tdmpc":
|
||||
from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig
|
||||
from lerobot.policies.tdmpc.processor_tdmpc import make_tdmpc_processor
|
||||
|
||||
processors = make_tdmpc_processor(
|
||||
config=cast(TDMPCConfig, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "diffusion":
|
||||
from lerobot.policies.diffusion.processor_diffusion import make_diffusion_processor
|
||||
|
||||
processors = make_diffusion_processor(
|
||||
cast(DiffusionConfig, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "act":
|
||||
from lerobot.policies.act.processor_act import make_act_processor
|
||||
|
||||
processors = make_act_processor(
|
||||
config=cast(ACTConfig, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "vqbet":
|
||||
from lerobot.policies.vqbet.processor_vqbet import make_vqbet_processor
|
||||
|
||||
processors = make_vqbet_processor(
|
||||
config=cast(VQBeTConfig, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "pi0":
|
||||
from lerobot.policies.pi0.processor_pi0 import make_pi0_processor
|
||||
|
||||
processors = make_pi0_processor(
|
||||
config=cast(PI0Config, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "pi0fast":
|
||||
from lerobot.policies.pi0fast.processor_pi0fast import make_pi0fast_processor
|
||||
|
||||
processors = make_pi0fast_processor(
|
||||
cast(PI0Config, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "sac":
|
||||
from lerobot.policies.sac.processor_sac import make_sac_processor
|
||||
|
||||
processors = make_sac_processor(
|
||||
cast(SACConfig, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "reward_classifier":
|
||||
from lerobot.policies.sac.reward_model.processor_classifier import make_classifier_processor
|
||||
|
||||
processors = make_classifier_processor(
|
||||
cast(RewardClassifierConfig, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "smolvla":
|
||||
from lerobot.policies.smolvla.processor_smolvla import make_smolvla_processor
|
||||
|
||||
processors = make_smolvla_processor(
|
||||
cast(SmolVLAConfig, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
elif policy_cfg.type == "rlearn":
|
||||
from lerobot.policies.rlearn.processor_rlearn import make_rlearn_processor
|
||||
|
||||
processors = make_rlearn_processor(
|
||||
cast(RLearNConfig, policy_cfg), dataset_stats=kwargs.get("dataset_stats")
|
||||
)
|
||||
|
||||
else:
|
||||
raise NotImplementedError(f"Processor for policy type '{policy_cfg.type}' is not implemented.")
|
||||
|
||||
return processors
|
||||
|
||||
|
||||
def make_policy(
|
||||
cfg: PreTrainedConfig,
|
||||
ds_meta: LeRobotDatasetMetadata | None = None,
|
||||
env_cfg: EnvConfig | None = None,
|
||||
episode_data_index: dict | None = None,
|
||||
) -> PreTrainedPolicy:
|
||||
"""Make an instance of a policy class.
|
||||
|
||||
@@ -147,7 +287,6 @@ def make_policy(
|
||||
kwargs = {}
|
||||
if ds_meta is not None:
|
||||
features = dataset_to_policy_features(ds_meta.features)
|
||||
kwargs["dataset_stats"] = ds_meta.stats
|
||||
else:
|
||||
if not cfg.pretrained_path:
|
||||
logging.warning(
|
||||
@@ -155,12 +294,18 @@ def make_policy(
|
||||
"rather than a dataset. Normalization modules inside the policy will have infinite values "
|
||||
"by default without stats from a dataset."
|
||||
)
|
||||
if env_cfg is None:
|
||||
raise ValueError("env_cfg cannot be None when ds_meta is not provided")
|
||||
features = env_to_policy_features(env_cfg)
|
||||
|
||||
cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
|
||||
cfg.input_features = {key: ft for key, ft in features.items() if key not in cfg.output_features}
|
||||
kwargs["config"] = cfg
|
||||
|
||||
# Pass episode_data_index for RLearN policy to calculate proper progress
|
||||
if cfg.type == "rlearn" and episode_data_index is not None:
|
||||
kwargs["episode_data_index"] = episode_data_index
|
||||
|
||||
if cfg.pretrained_path:
|
||||
# Load a pretrained policy and override the config if needed (for example, if there are inference-time
|
||||
# hyperparameters that we want to vary).
|
||||
|
||||
@@ -30,7 +30,7 @@ pip install -e ".[pi0]"
|
||||
|
||||
Example of finetuning the pi0 pretrained model (`pi0_base` in `openpi`):
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.path=lerobot/pi0 \
|
||||
--dataset.repo_id=danaaubakirova/koch_test
|
||||
```
|
||||
@@ -38,7 +38,7 @@ lerobot-train \
|
||||
Example of finetuning the pi0 neural network with PaliGemma and expert Gemma
|
||||
pretrained with VLM default parameters before pi0 finetuning:
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=pi0 \
|
||||
--dataset.repo_id=danaaubakirova/koch_test
|
||||
```
|
||||
@@ -56,18 +56,15 @@ from collections import deque
|
||||
import torch
|
||||
import torch.nn.functional as F # noqa: N812
|
||||
from torch import Tensor, nn
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from lerobot.constants import ACTION, OBS_STATE
|
||||
from lerobot.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.constants import ACTION, OBS_LANGUAGE, OBS_STATE
|
||||
from lerobot.policies.pi0.configuration_pi0 import PI0Config
|
||||
from lerobot.policies.pi0.paligemma_with_expert import (
|
||||
PaliGemmaWithExpertConfig,
|
||||
PaliGemmaWithExpertModel,
|
||||
)
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.utils import log_model_loading_keys
|
||||
from lerobot.utils.utils import get_safe_dtype, init_logging
|
||||
from lerobot.utils.utils import get_safe_dtype
|
||||
|
||||
|
||||
def create_sinusoidal_pos_embedding(
|
||||
@@ -223,28 +220,17 @@ class PI0Policy(PreTrainedPolicy):
|
||||
def __init__(
|
||||
self,
|
||||
config: PI0Config,
|
||||
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
config: Policy configuration class instance or None, in which case the default instantiation of
|
||||
the configuration class is used.
|
||||
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
|
||||
that they will be passed with a call to `load_state_dict` before the policy is used.
|
||||
"""
|
||||
|
||||
super().__init__(config)
|
||||
config.validate_features()
|
||||
self.config = config
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
self.language_tokenizer = AutoTokenizer.from_pretrained("google/paligemma-3b-pt-224")
|
||||
self.model = PI0FlowMatching(config)
|
||||
|
||||
self.reset()
|
||||
@@ -253,99 +239,6 @@ class PI0Policy(PreTrainedPolicy):
|
||||
"""This should be called whenever the environment is reset."""
|
||||
self._action_queue = deque([], maxlen=self.config.n_action_steps)
|
||||
|
||||
@classmethod
|
||||
def _transform_state_dict_keys(cls, state_dict: dict) -> dict:
|
||||
"""
|
||||
Transform state dict keys to match expected model structure.
|
||||
|
||||
Transformations:
|
||||
- model.paligemma_with_expert.paligemma.language_model.lm_head ->
|
||||
model.paligemma_with_expert.paligemma.lm_head
|
||||
- model.paligemma_with_expert.paligemma.language_model.model ->
|
||||
model.paligemma_with_expert.paligemma.model.language_model
|
||||
- model.paligemma_with_expert.paligemma.vision_tower ->
|
||||
model.paligemma_with_expert.paligemma.model.vision_tower
|
||||
- model.paligemma_with_expert.paligemma.multi_modal_projector ->
|
||||
model.paligemma_with_expert.paligemma.model.multi_modal_projector
|
||||
|
||||
Also handles tied weights between lm_head.weight and
|
||||
embed_tokens.weight.
|
||||
"""
|
||||
import re
|
||||
|
||||
transformed_dict = {}
|
||||
|
||||
transformations = [
|
||||
(
|
||||
re.compile(r"\.paligemma_with_expert\.paligemma\.language_model\.lm_head"),
|
||||
".paligemma_with_expert.paligemma.lm_head",
|
||||
),
|
||||
(
|
||||
re.compile(r"\.paligemma_with_expert\.paligemma\.language_model\.model"),
|
||||
".paligemma_with_expert.paligemma.model.language_model",
|
||||
),
|
||||
(
|
||||
re.compile(r"\.paligemma_with_expert\.paligemma\.vision_tower"),
|
||||
".paligemma_with_expert.paligemma.model.vision_tower",
|
||||
),
|
||||
(
|
||||
re.compile(r"\.paligemma_with_expert\.paligemma\.multi_modal_projector"),
|
||||
".paligemma_with_expert.paligemma.model.multi_modal_projector",
|
||||
),
|
||||
]
|
||||
|
||||
for key, value in state_dict.items():
|
||||
new_key = key
|
||||
for pattern, replacement in transformations:
|
||||
new_key = pattern.sub(replacement, new_key)
|
||||
transformed_dict[new_key] = value
|
||||
|
||||
# Handle tied weights: lm_head.weight and embed_tokens.weight share memory
|
||||
lm_head_key = None
|
||||
embed_tokens_key = None
|
||||
|
||||
for key in transformed_dict:
|
||||
if key.endswith(".paligemma_with_expert.paligemma.lm_head.weight"):
|
||||
lm_head_key = key
|
||||
elif key.endswith(".paligemma_with_expert.paligemma.model.language_model.embed_tokens.weight"):
|
||||
embed_tokens_key = key
|
||||
if lm_head_key and embed_tokens_key:
|
||||
break
|
||||
|
||||
if lm_head_key and not embed_tokens_key:
|
||||
embed_tokens_key = lm_head_key.replace(
|
||||
".lm_head.weight", ".model.language_model.embed_tokens.weight"
|
||||
)
|
||||
transformed_dict[embed_tokens_key] = transformed_dict[lm_head_key]
|
||||
elif embed_tokens_key and not lm_head_key:
|
||||
lm_head_key = embed_tokens_key.replace(
|
||||
".model.language_model.embed_tokens.weight", ".lm_head.weight"
|
||||
)
|
||||
transformed_dict[lm_head_key] = transformed_dict[embed_tokens_key]
|
||||
|
||||
return transformed_dict
|
||||
|
||||
@classmethod
|
||||
def _load_as_safetensor(
|
||||
cls, model: "PI0Policy", model_file: str, map_location: str, strict: bool
|
||||
) -> "PI0Policy":
|
||||
"""Override to apply key transformations before loading."""
|
||||
from safetensors.torch import load_file
|
||||
|
||||
init_logging()
|
||||
# Load the state dict from file safely
|
||||
state_dict = load_file(model_file, device=map_location)
|
||||
|
||||
# Apply key transformations
|
||||
transformed_state_dict = cls._transform_state_dict_keys(state_dict)
|
||||
|
||||
# Load the transformed state dict
|
||||
msg = model.load_state_dict(transformed_state_dict, strict=strict)
|
||||
|
||||
# Log message
|
||||
log_model_loading_keys(msg.missing_keys, msg.unexpected_keys)
|
||||
return model
|
||||
|
||||
def get_optim_params(self) -> dict:
|
||||
return self.parameters()
|
||||
|
||||
@@ -377,14 +270,13 @@ class PI0Policy(PreTrainedPolicy):
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
|
||||
# Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
|
||||
# querying the policy.
|
||||
if len(self._action_queue) == 0:
|
||||
images, img_masks = self.prepare_images(batch)
|
||||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
lang_tokens = batch[f"{OBS_LANGUAGE}.tokens"]
|
||||
lang_masks = batch[f"{OBS_LANGUAGE}.attention_mask"]
|
||||
|
||||
actions = self.model.sample_actions(
|
||||
images, img_masks, lang_tokens, lang_masks, state, noise=noise
|
||||
@@ -394,8 +286,6 @@ class PI0Policy(PreTrainedPolicy):
|
||||
original_action_dim = self.config.action_feature.shape[0]
|
||||
actions = actions[:, :, :original_action_dim]
|
||||
|
||||
actions = self.unnormalize_outputs({"action": actions})["action"]
|
||||
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
actions = self._pi_aloha_encode_actions(actions)
|
||||
|
||||
@@ -410,12 +300,10 @@ class PI0Policy(PreTrainedPolicy):
|
||||
batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
|
||||
batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
batch = self.normalize_targets(batch)
|
||||
|
||||
images, img_masks = self.prepare_images(batch)
|
||||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
lang_tokens = batch[f"{OBS_LANGUAGE}.tokens"]
|
||||
lang_masks = batch[f"{OBS_LANGUAGE}.attention_mask"]
|
||||
actions = self.prepare_action(batch)
|
||||
actions_is_pad = batch.get("action_is_pad")
|
||||
|
||||
@@ -482,26 +370,6 @@ class PI0Policy(PreTrainedPolicy):
|
||||
|
||||
return images, img_masks
|
||||
|
||||
def prepare_language(self, batch) -> tuple[Tensor, Tensor]:
|
||||
"""Tokenize the text input"""
|
||||
device = batch[OBS_STATE].device
|
||||
tasks = batch["task"]
|
||||
|
||||
# PaliGemma prompt has to end with a new line
|
||||
tasks = [task if task.endswith("\n") else f"{task}\n" for task in tasks]
|
||||
|
||||
tokenized_prompt = self.language_tokenizer.__call__(
|
||||
tasks,
|
||||
padding="max_length",
|
||||
padding_side="right",
|
||||
max_length=self.config.tokenizer_max_length,
|
||||
return_tensors="pt",
|
||||
)
|
||||
lang_tokens = tokenized_prompt["input_ids"].to(device=device)
|
||||
lang_masks = tokenized_prompt["attention_mask"].to(device=device, dtype=torch.bool)
|
||||
|
||||
return lang_tokens, lang_masks
|
||||
|
||||
def _pi_aloha_decode_state(self, state):
|
||||
# Flip the joints.
|
||||
for motor_idx in [1, 2, 8, 9]:
|
||||
@@ -567,7 +435,7 @@ class PI0FlowMatching(nn.Module):
|
||||
└──────────────────────────────┘
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config: PI0Config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
|
||||
|
||||
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
from lerobot.configs.types import PolicyFeature
|
||||
from lerobot.policies.pi0.configuration_pi0 import PI0Config
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
TokenizerProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
from lerobot.processor.pipeline import (
|
||||
EnvTransition,
|
||||
ProcessorStep,
|
||||
ProcessorStepRegistry,
|
||||
TransitionKey,
|
||||
)
|
||||
from lerobot.processor.rename_processor import RenameProcessor
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register(name="pi0_new_line_processor")
|
||||
class Pi0NewLineProcessor(ProcessorStep):
|
||||
"""Add a new line to the end of the task if it doesn't have one.
|
||||
This is required for the PaliGemma tokenizer.
|
||||
"""
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
# Check if complementary_data exists
|
||||
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
|
||||
if complementary_data is None or "task" not in complementary_data:
|
||||
return transition
|
||||
|
||||
task = complementary_data["task"]
|
||||
if task is None:
|
||||
return transition
|
||||
|
||||
# Handle both string and list of strings
|
||||
if isinstance(task, str):
|
||||
# Single string: add newline if not present
|
||||
if not task.endswith("\n"):
|
||||
complementary_data["task"] = f"{task}\n"
|
||||
elif isinstance(task, list) and all(isinstance(t, str) for t in task):
|
||||
# List of strings: add newline to each if not present
|
||||
complementary_data["task"] = [t if t.endswith("\n") else f"{t}\n" for t in task]
|
||||
# If task is neither string nor list of strings, leave unchanged
|
||||
|
||||
return transition
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
"""Add tokenized task features to the features."""
|
||||
return features
|
||||
|
||||
def state_dict(self) -> dict[str, torch.Tensor]:
|
||||
"""Return state dictionary (empty for this processor)."""
|
||||
return {}
|
||||
|
||||
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
|
||||
"""Load state dictionary (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset processor state (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
"""Return configuration for serialization."""
|
||||
return {}
|
||||
|
||||
|
||||
def make_pi0_processor(
|
||||
config: PI0Config, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
# Add remaining processors
|
||||
input_steps: list[ProcessorStep] = [
|
||||
RenameProcessor(rename_map={}), # To mimic the same processor as pretrained one
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
Pi0NewLineProcessor(), # Add newlines before tokenization for PaliGemma
|
||||
TokenizerProcessor(
|
||||
tokenizer_name="google/paligemma-3b-pt-224",
|
||||
max_length=config.tokenizer_max_length,
|
||||
padding_side="right",
|
||||
padding="max_length",
|
||||
),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
|
||||
output_steps: list[ProcessorStep] = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
@@ -25,14 +25,14 @@ Disclaimer: It is not expected to perform as well as the original implementation
|
||||
|
||||
Example of finetuning the pi0+FAST pretrained model (`pi0_fast_base` in `openpi`):
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.path=lerobot/pi0fast_base \
|
||||
--dataset.repo_id=danaaubakirova/koch_test
|
||||
```
|
||||
|
||||
Example of training the pi0+FAST neural network with from scratch:
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=pi0fast \
|
||||
--dataset.repo_id=danaaubakirova/koch_test
|
||||
```
|
||||
@@ -58,7 +58,6 @@ from transformers.cache_utils import HybridCache, StaticCache
|
||||
from transformers.models.auto import CONFIG_MAPPING
|
||||
|
||||
from lerobot.constants import ACTION, OBS_STATE
|
||||
from lerobot.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.policies.pi0fast.configuration_pi0fast import PI0FASTConfig
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
|
||||
@@ -146,14 +145,6 @@ class PI0FASTPolicy(PreTrainedPolicy):
|
||||
config.validate_features()
|
||||
self.config = config
|
||||
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
self.language_tokenizer = AutoProcessor.from_pretrained("google/paligemma-3b-pt-224")
|
||||
self.model = PI0FAST(config)
|
||||
|
||||
@@ -221,8 +212,6 @@ class PI0FASTPolicy(PreTrainedPolicy):
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
|
||||
# Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
|
||||
# querying the policy.
|
||||
if len(self._action_queue) == 0:
|
||||
@@ -235,8 +224,6 @@ class PI0FASTPolicy(PreTrainedPolicy):
|
||||
] # self.config.max_action_dim # self.config.action_feature.shape[0]
|
||||
actions = actions[:, :, :original_action_dim]
|
||||
|
||||
actions = self.unnormalize_outputs({"action": actions})["action"]
|
||||
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
actions = self._pi_aloha_encode_actions(actions)
|
||||
|
||||
@@ -249,8 +236,6 @@ class PI0FASTPolicy(PreTrainedPolicy):
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
|
||||
batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
|
||||
batch = self.normalize_inputs(batch)
|
||||
batch = self.normalize_targets(batch)
|
||||
loss_dict = self.model.forward(batch)
|
||||
return loss_dict["loss"], loss_dict
|
||||
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
|
||||
from lerobot.policies.pi0.configuration_pi0 import PI0Config
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RenameProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
|
||||
|
||||
def make_pi0fast_processor(
|
||||
config: PI0Config, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
input_steps = [
|
||||
RenameProcessor(rename_map={}), # To mimic the same processor as pretrained one
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
output_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from lerobot.configs.policies import PreTrainedConfig
|
||||
from lerobot.configs.types import NormalizationMode
|
||||
|
||||
|
||||
@PreTrainedConfig.register_subclass("rlearn")
|
||||
@dataclass
|
||||
class RLearNConfig(PreTrainedConfig):
|
||||
"""Configuration for a video-language conditioned reward model (RLearN).
|
||||
|
||||
Inputs:
|
||||
- Visual frames (one or multiple cameras). Optionally a short sequence.
|
||||
- A language instruction/goal string.
|
||||
|
||||
Output:
|
||||
- Per-timestep reward logits or a single-step reward logit.
|
||||
|
||||
Notes:
|
||||
- This follows the ReWiND paper architecture. It uses frozen vision/text encoders
|
||||
(DINOv3 for vision, SigLIP2 for language) and trains a
|
||||
lightweight temporal aggregator + head.
|
||||
"""
|
||||
|
||||
# Encoders - Use SigLIP2 for both vision and text (shared checkpoint)
|
||||
vision_model_name: str = "google/siglip2-base-patch16-224"
|
||||
text_model_name: str = "google/siglip2-base-patch16-224"
|
||||
freeze_backbones: bool = True
|
||||
|
||||
# Sequence length, amount of past frames including current one to use in the temporal model
|
||||
max_seq_len: int = 16
|
||||
# Temporal sampling stride
|
||||
temporal_sampling_stride: int = 3 # Open x mostly has fps 10, and rewind has seq len 16, ours is 30fps so 30/10 = 3 stride lenght to have same timeframe!
|
||||
|
||||
# Model dimensions and transformer
|
||||
dim_model: int = 512
|
||||
num_layers: int = 4
|
||||
num_heads: int = 8
|
||||
ff_mult: int = 4 # Feed-forward multiplier, hidden = dim_model * ff_mult
|
||||
dropout: float = 0.05
|
||||
|
||||
# --- reward head options ---
|
||||
use_categorical_rewards: bool = False # classification over bins
|
||||
num_reward_bins: int = 25
|
||||
reward_min_value: float = 0.0 # for HL-Gauss range
|
||||
reward_max_value: float = 1.0
|
||||
use_hl_gauss_loss: bool = True # if False -> plain regression
|
||||
hl_gauss_num_bins: int = 25 # histogram resolution
|
||||
|
||||
# Inference-time subsampling and regularization
|
||||
inference_stride: int = 1 # inference_stride is an extra, second downsampling applied in forward after window sampling/rewind. Keep it at 1 to disable extra skipping
|
||||
frame_dropout_p: float = 0.10
|
||||
|
||||
# Training
|
||||
learning_rate: float = 5e-4
|
||||
weight_decay: float = 0.01
|
||||
head_lr_multiplier: float = 5.0
|
||||
logit_eps: float = 1e-4
|
||||
regularizer_warmup_steps: int = 500
|
||||
|
||||
# Performance optimizations
|
||||
use_amp: bool = False
|
||||
compile_model: bool = True
|
||||
|
||||
# ReWiND augmentation
|
||||
rewind_prob: float = 0.3 #0.8
|
||||
rewind_last3_prob: float = 0.0 #0.3
|
||||
mismatch_prob: float = 0.0 #0.2
|
||||
|
||||
# Normalization presets
|
||||
normalization_mapping: dict[str, NormalizationMode] = field(
|
||||
default_factory=lambda: {
|
||||
"VISUAL": NormalizationMode.MEAN_STD,
|
||||
}
|
||||
)
|
||||
|
||||
# Required path to episodes.jsonl for episode boundaries
|
||||
episodes_jsonl_path: str | None = "meta/episodes.jsonl"
|
||||
|
||||
def validate_features(self) -> None:
|
||||
# Require at least one image feature. Language is recommended but optional (can be blank).
|
||||
if not self.image_features:
|
||||
raise ValueError(
|
||||
"You must provide at least one image feature for RLearN (e.g. 'observation.image')."
|
||||
)
|
||||
|
||||
@property
|
||||
def observation_delta_indices(self) -> list | None:
|
||||
# Request a long enough context so in-window stride sampling can be >1.
|
||||
# We ask for (max_seq_len * temporal_sampling_stride) frames ending at t=0.
|
||||
# Example: max_seq_len=16, temporal_sampling_stride=3 → 48 deltas → ~46 frames available.
|
||||
total_needed = self.max_seq_len * max(1, int(self.temporal_sampling_stride))
|
||||
return list(range(1 - total_needed, 1))
|
||||
|
||||
@property
|
||||
def action_delta_indices(self) -> list | None:
|
||||
# Not an action chunking policy.
|
||||
return None
|
||||
|
||||
@property
|
||||
def reward_delta_indices(self) -> list | None:
|
||||
# ReWiND generates progress labels on-the-fly, doesn't need reward data
|
||||
return None
|
||||
|
||||
def get_optimizer_preset(self): # type: ignore[override]
|
||||
from lerobot.optim.optimizers import AdamWConfig
|
||||
|
||||
return AdamWConfig(lr=self.learning_rate, weight_decay=self.weight_decay)
|
||||
|
||||
def get_scheduler_preset(self): # type: ignore[override]
|
||||
# No scheduler by default.
|
||||
return None
|
||||
@@ -0,0 +1,392 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Standalone evaluation script for RLearN models.
|
||||
|
||||
This script evaluates RLearN reward models on episodes from a dataset,
|
||||
generating comparison plots between ground truth rewards and model predictions.
|
||||
|
||||
Usage:
|
||||
python src/lerobot/policies/rlearn/eval_script.py --model MODEL_NAME --dataset DATASET_REPO --episodes N
|
||||
|
||||
Example:
|
||||
python src/lerobot/policies/rlearn/eval_script.py --model pepijn223/rlearn_18 --dataset pepijn223/phone_pipeline_pickup1 --episodes 2
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add src to path for imports
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent.parent))
|
||||
|
||||
import warnings
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch
|
||||
from scipy.stats import spearmanr
|
||||
from tqdm import tqdm
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
# LeRobot imports
|
||||
from lerobot.constants import OBS_IMAGE, OBS_IMAGES, OBS_LANGUAGE
|
||||
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.policies.rlearn.modeling_rlearn import RLearNPolicy
|
||||
|
||||
|
||||
def _to_chw_float01(img):
|
||||
"""Ensure CHW float in [0,1]."""
|
||||
if isinstance(img, np.ndarray):
|
||||
img = torch.from_numpy(img)
|
||||
# HWC -> CHW if needed
|
||||
if len(img.shape) == 3 and img.shape[-1] in (1, 3, 4):
|
||||
img = img.permute(2, 0, 1)
|
||||
if img.dtype == torch.uint8:
|
||||
img = img.float() / 255.0
|
||||
else:
|
||||
img = img.float()
|
||||
return torch.clamp(img, 0.0, 1.0)
|
||||
|
||||
|
||||
def _get_language(frame_data):
|
||||
lang = None
|
||||
if OBS_LANGUAGE in frame_data:
|
||||
lang = frame_data[OBS_LANGUAGE]
|
||||
if isinstance(lang, list) and len(lang) > 0:
|
||||
lang = lang[0]
|
||||
elif "task" in frame_data:
|
||||
lang = frame_data["task"]
|
||||
return lang if isinstance(lang, str) else "No language provided"
|
||||
|
||||
|
||||
def _get_ground_truth_reward(frame_data):
|
||||
"""Try common keys for ground-truth reward. Return None if unavailable."""
|
||||
for key in ("reward", "rewards", "gt_reward", "progress"):
|
||||
if key in frame_data:
|
||||
r = frame_data[key]
|
||||
# unwrap single-element lists/arrays
|
||||
if isinstance(r, (list, np.ndarray)) and np.array(r).size == 1:
|
||||
r = float(np.array(r).reshape(-1)[0])
|
||||
try:
|
||||
return float(r)
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def extract_episode_frames_and_gt(dataset, episode_idx):
|
||||
"""Load a full episode: frames (T, C, H, W), language (str), gt_rewards (np.ndarray or None)."""
|
||||
ep_start = dataset.episode_data_index["from"][episode_idx].item()
|
||||
ep_end = dataset.episode_data_index["to"][episode_idx].item()
|
||||
T = ep_end - ep_start
|
||||
|
||||
frames = []
|
||||
gt_rewards = []
|
||||
language = None
|
||||
|
||||
for t in range(T):
|
||||
item = dataset[ep_start + t]
|
||||
|
||||
# image(s)
|
||||
if OBS_IMAGES in item:
|
||||
img = item[OBS_IMAGES]
|
||||
elif OBS_IMAGE in item:
|
||||
img = item[OBS_IMAGE]
|
||||
else:
|
||||
# try to find an image-like key
|
||||
img_keys = [k for k in item.keys() if "image" in k.lower()]
|
||||
if not img_keys:
|
||||
continue
|
||||
img = item[img_keys[0]]
|
||||
|
||||
frames.append(_to_chw_float01(img))
|
||||
|
||||
# language once
|
||||
if language is None:
|
||||
language = _get_language(item)
|
||||
|
||||
# ground-truth reward (optional)
|
||||
r = _get_ground_truth_reward(item)
|
||||
gt_rewards.append(r)
|
||||
|
||||
if not frames:
|
||||
return None, None, None
|
||||
|
||||
frames = torch.stack(frames) # (T, C, H, W)
|
||||
|
||||
# If all GT entries are None, treat as missing
|
||||
if all(r is None for r in gt_rewards):
|
||||
gt_rewards = None
|
||||
else:
|
||||
# Replace None by forward filling
|
||||
arr = np.array([np.nan if r is None else float(r) for r in gt_rewards], dtype=float)
|
||||
# forward/back fill
|
||||
if np.isnan(arr[0]):
|
||||
first_valid = np.flatnonzero(~np.isnan(arr))
|
||||
if len(first_valid) > 0:
|
||||
arr[0] = arr[first_valid[0]]
|
||||
else:
|
||||
arr[0] = 0.0
|
||||
for i in range(1, len(arr)):
|
||||
if np.isnan(arr[i]):
|
||||
arr[i] = arr[i - 1]
|
||||
gt_rewards = arr
|
||||
|
||||
return frames, language or "No language provided", gt_rewards
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def predict_rewards_sliding(model, frames, language, max_seq_len=16, batch_size=64, device="cuda", temporal_stride: int | None = None):
|
||||
"""
|
||||
Sliding-window prediction: for each frame i, create a window [max(0, i-L+1) .. i],
|
||||
left-pad by repeating the first frame to length L (<= 16), and take the prediction
|
||||
corresponding to the current frame's position in the window.
|
||||
Returns np.ndarray of shape (T,).
|
||||
"""
|
||||
T = frames.shape[0]
|
||||
cfg = getattr(model, "config", object())
|
||||
L = int(getattr(cfg, "max_seq_len", max_seq_len))
|
||||
L = min(L, max_seq_len) # hard-cap at 16
|
||||
# Use the same temporal stride as training (skip s-1 frames, take 1)
|
||||
if temporal_stride is None:
|
||||
temporal_stride = int(getattr(cfg, "temporal_sampling_stride", 1))
|
||||
temporal_stride = max(1, int(temporal_stride))
|
||||
|
||||
# Preprocessed tensor on device
|
||||
frames = frames.to(device)
|
||||
|
||||
windows = []
|
||||
frame_positions = [] # Track which temporal position each frame should use
|
||||
left_pad_counts = [] # Number of left-pad (OOB) frames per window
|
||||
|
||||
for i in range(T):
|
||||
# Build indices with stride s: [..., i-3, i] etc., left-padded by clamping to 0
|
||||
idxs = [i - (L - 1 - j) * temporal_stride for j in range(L)]
|
||||
pad_needed = sum(1 for k in idxs if k < 0)
|
||||
clamped = [0 if k < 0 else (T - 1 if k >= T else k) for k in idxs]
|
||||
window = frames[clamped] # (L, C, H, W)
|
||||
|
||||
# Use the last temporal position (current frame) for reading model output
|
||||
frame_pos = L - 1
|
||||
|
||||
windows.append(window)
|
||||
frame_positions.append(frame_pos)
|
||||
left_pad_counts.append(pad_needed)
|
||||
|
||||
preds = np.zeros(T, dtype=float)
|
||||
|
||||
for s in range(0, T, batch_size):
|
||||
e = min(s + batch_size, T)
|
||||
batch_windows = torch.stack(windows[s:e]) # (B, L, C, H, W)
|
||||
batch_positions = frame_positions[s:e]
|
||||
|
||||
batch = {OBS_IMAGES: batch_windows, OBS_LANGUAGE: [language] * (e - s)} # expects (B, L, C, H, W)
|
||||
|
||||
# Model returns (B, L) predictions for each temporal position
|
||||
values = model.predict_rewards(batch) # torch.Tensor (B, L)
|
||||
|
||||
# Apply eval-time padding rule: predictions for left-padded (OOB) frames are zero
|
||||
if values.dim() == 2 and len(left_pad_counts) >= (e - s):
|
||||
for b_idx in range(e - s):
|
||||
pad_n = left_pad_counts[s + b_idx]
|
||||
if pad_n > 0:
|
||||
values[b_idx, :pad_n] = 0.0
|
||||
|
||||
# Debug output removed - issue was identified and fixed
|
||||
|
||||
if values.dim() == 2:
|
||||
# Extract the prediction corresponding to each frame's position in its window
|
||||
batch_preds = []
|
||||
for b_idx, pos in enumerate(batch_positions):
|
||||
batch_preds.append(values[b_idx, pos].item())
|
||||
preds[s:e] = np.array(batch_preds)
|
||||
else:
|
||||
# Fallback: if model returns (B,), use as is
|
||||
preds[s:e] = values.detach().float().cpu().numpy()
|
||||
|
||||
return preds
|
||||
|
||||
|
||||
def plot_episode_eval(episode_idx, gt, pred, language, save_path=None, show=False, title_prefix="RLearN Eval"):
|
||||
"""Plot GT vs Predicted over time. Saves PNG if save_path is provided."""
|
||||
T = len(pred)
|
||||
x = np.arange(T)
|
||||
|
||||
plt.figure(figsize=(14, 8))
|
||||
plt.plot(x, pred, linewidth=2.5, marker="o", markersize=3, label="Predicted Reward", color="blue")
|
||||
|
||||
if gt is not None:
|
||||
plt.plot(x, gt, linestyle="--", linewidth=2.5, label="Ground-Truth Reward", color="orange")
|
||||
# Correlation between GT and Pred
|
||||
corr, p = spearmanr(gt, pred)
|
||||
corr_str = f"ρ(GT, Pred) = {0.0 if np.isnan(corr) else corr:.3f} (p={0.0 if np.isnan(p) else p:.3f})"
|
||||
else:
|
||||
expected = np.linspace(0, 1, T)
|
||||
plt.plot(x, expected, linestyle="--", linewidth=2.5, label="Expected Progress (0→1)", color="orange")
|
||||
corr, p = spearmanr(x, pred)
|
||||
corr_str = f"VOC-S ρ(t, Pred) = {0.0 if np.isnan(corr) else corr:.3f} (p={0.0 if np.isnan(p) else p:.3f})"
|
||||
|
||||
plt.title(f"{title_prefix} — Episode {episode_idx}\n{language}\n{corr_str}", fontsize=14)
|
||||
plt.xlabel("Frame Index", fontsize=12)
|
||||
plt.ylabel("Reward / Progress", fontsize=12)
|
||||
plt.legend(fontsize=11)
|
||||
plt.grid(True, alpha=0.3)
|
||||
plt.tight_layout()
|
||||
|
||||
if save_path is not None:
|
||||
plt.savefig(save_path, dpi=200, bbox_inches="tight")
|
||||
print(f"Saved eval image to: {save_path}")
|
||||
|
||||
if show:
|
||||
plt.show()
|
||||
else:
|
||||
plt.close()
|
||||
|
||||
|
||||
def eval_episode_sliding(
|
||||
episode_idx, dataset, model, save_dir=".", device="cuda", max_seq_len=16, batch_size=64, title_prefix="RLearN Eval"
|
||||
):
|
||||
"""End-to-end: load episode, predict with sliding 16-frame windows, and save PNG."""
|
||||
frames, language, gt = extract_episode_frames_and_gt(dataset, episode_idx)
|
||||
if frames is None:
|
||||
print(f"[Episode {episode_idx}] No frames found.")
|
||||
return None
|
||||
|
||||
model.eval()
|
||||
|
||||
pred = predict_rewards_sliding(
|
||||
model=model, frames=frames, language=language, max_seq_len=max_seq_len, batch_size=batch_size, device=device
|
||||
)
|
||||
|
||||
# Basic stats
|
||||
print(f"Episode {episode_idx}: T={len(pred)}, pred∈[{pred.min():.3f},{pred.max():.3f}]")
|
||||
if gt is not None:
|
||||
print(f"GT available: gt∈[{np.nanmin(gt):.3f},{np.nanmax(gt):.3f}]")
|
||||
|
||||
save_path = f"{save_dir}/episode_{episode_idx:04d}_eval.png"
|
||||
plot_episode_eval(
|
||||
episode_idx=episode_idx, gt=gt, pred=pred, language=language, save_path=save_path, show=False, title_prefix=title_prefix
|
||||
)
|
||||
return save_path
|
||||
|
||||
|
||||
def main():
|
||||
"""Main evaluation script for RLearN models."""
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser(description="Evaluate RLearN model on episodes with GT vs Predicted rewards")
|
||||
parser.add_argument("--model", type=str, required=True, help="Model name/path (e.g., pepijn223/rlearn_mse5)")
|
||||
parser.add_argument("--dataset", type=str, required=True, help="Dataset repo (e.g., pepijn223/phone_pipeline_pickup1)")
|
||||
parser.add_argument("--episodes", type=int, default=5, help="Number of episodes to evaluate")
|
||||
parser.add_argument("--output", type=str, default="./eval_results", help="Output directory for images")
|
||||
parser.add_argument(
|
||||
"--device",
|
||||
type=str,
|
||||
default="cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu",
|
||||
help="Device to use",
|
||||
)
|
||||
parser.add_argument("--batch_size", type=int, default=32, help="Batch size for sliding window evaluation")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Create output directory
|
||||
output_dir = Path(args.output)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print("🎯 RLearN Model Evaluation")
|
||||
print("=" * 60)
|
||||
print(f"Model: {args.model}")
|
||||
print(f"Dataset: {args.dataset}")
|
||||
print(f"Episodes: {args.episodes}")
|
||||
print(f"Device: {args.device}")
|
||||
print(f"Output: {output_dir}")
|
||||
print("=" * 60)
|
||||
|
||||
try:
|
||||
# Load dataset
|
||||
print("📁 Loading dataset...")
|
||||
|
||||
dataset = LeRobotDataset(
|
||||
repo_id=args.dataset,
|
||||
episodes=list(range(min(args.episodes, 50))), # Load enough episodes
|
||||
download_videos=True,
|
||||
)
|
||||
|
||||
print(f"✅ Dataset loaded: {dataset.num_episodes} episodes, {dataset.num_frames} frames")
|
||||
print(f" Features: {list(dataset.features.keys())}")
|
||||
print(f" FPS: {dataset.fps}")
|
||||
|
||||
# Load model
|
||||
print("\n🤖 Loading model...")
|
||||
|
||||
model = RLearNPolicy.from_pretrained(args.model)
|
||||
model = model.to(args.device)
|
||||
model.eval()
|
||||
|
||||
print(f"✅ Model loaded on {args.device}")
|
||||
print(f" Parameters: {sum(p.numel() for p in model.parameters()):,}")
|
||||
print(f" Trainable: {sum(p.numel() for p in model.parameters() if p.requires_grad):,}")
|
||||
print(f" Max sequence length: {model.config.max_seq_len}")
|
||||
|
||||
# Select episodes to evaluate
|
||||
total_available = min(dataset.num_episodes, args.episodes)
|
||||
episode_indices = list(range(total_available))
|
||||
|
||||
print(f"\n📊 Evaluating {len(episode_indices)} episodes...")
|
||||
print("=" * 60)
|
||||
|
||||
# Run sliding window evaluation on each episode
|
||||
saved_paths = []
|
||||
for i, ep_idx in enumerate(episode_indices):
|
||||
print(f"\n[{i+1}/{len(episode_indices)}] Processing Episode {ep_idx}")
|
||||
print("-" * 40)
|
||||
|
||||
try:
|
||||
save_path = eval_episode_sliding(
|
||||
episode_idx=ep_idx,
|
||||
dataset=dataset,
|
||||
model=model,
|
||||
save_dir=str(output_dir),
|
||||
device=args.device,
|
||||
batch_size=args.batch_size,
|
||||
title_prefix="RLearN Ground Truth vs Predicted",
|
||||
)
|
||||
|
||||
if save_path:
|
||||
saved_paths.append(save_path)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error processing episode {ep_idx}: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
continue
|
||||
|
||||
# Summary
|
||||
print("\n" + "=" * 60)
|
||||
print("✅ EVALUATION COMPLETE")
|
||||
print(f"📈 Generated {len(saved_paths)} evaluation plots")
|
||||
print(f"📁 Results saved to: {output_dir}")
|
||||
print("\nGenerated files:")
|
||||
for path in saved_paths:
|
||||
print(f" • {path}")
|
||||
|
||||
if saved_paths:
|
||||
print(f"\n💡 View the plots to compare ground truth vs predicted rewards!")
|
||||
print(f" Each plot shows the model's sliding 16-frame window predictions")
|
||||
print(f" against available ground truth rewards over the episode timeline.")
|
||||
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error during evaluation: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit(main())
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
from lerobot.configs.types import PolicyFeature
|
||||
from lerobot.constants import OBS_LANGUAGE
|
||||
from lerobot.policies.rlearn.configuration_rlearn import RLearNConfig
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RenameProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
TokenizerProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
from lerobot.processor.pipeline import (
|
||||
ComplementaryDataProcessor,
|
||||
EnvTransition,
|
||||
ProcessorStepRegistry,
|
||||
TransitionKey,
|
||||
)
|
||||
|
||||
|
||||
def make_rlearn_processor(
|
||||
config: RLearNConfig, dataset_stats: dict[str, dict[str, Any]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
"""Build pre/post processors for RLearN.
|
||||
|
||||
Responsibilities moved out of the model:
|
||||
- Normalize inputs (images) using dataset stats
|
||||
- Ensure batching
|
||||
- Map complementary_data.task to observation.language when available
|
||||
- Tokenize language into observation.language.tokens / attention_mask
|
||||
- Move to/from device
|
||||
"""
|
||||
|
||||
input_steps = [
|
||||
# No renaming by default, but keep for future extensibility
|
||||
RenameProcessor(rename_map={}),
|
||||
# Move heavy normalization to GPU after transfer for better parallelism
|
||||
ToBatchProcessor(),
|
||||
RLearnLanguageFromTaskProcessor(),
|
||||
# Use SigLIP2 for tokenizer to keep vocab aligned with text tower
|
||||
TokenizerProcessor(
|
||||
tokenizer_name=config.text_model_name,
|
||||
max_length=64,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
padding_side="right",
|
||||
),
|
||||
DeviceProcessor(device=config.device),
|
||||
# Move normalization after GPU transfer to use GPU acceleration
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
]
|
||||
|
||||
output_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@ProcessorStepRegistry.register(name="rlearn_language_from_task")
|
||||
class RLearnLanguageFromTaskProcessor(ComplementaryDataProcessor):
|
||||
"""Copy complementary_data['task'] into observation['observation.language'] if present.
|
||||
|
||||
This ensures the model can consume a raw language string when tokenization is not used,
|
||||
while TokenizerProcessor can still create tokenized fields.
|
||||
"""
|
||||
|
||||
task_key: str = "task"
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition: # type: ignore[override]
|
||||
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
|
||||
if not complementary_data or self.task_key not in complementary_data:
|
||||
return transition
|
||||
|
||||
task = complementary_data.get(self.task_key)
|
||||
if task is None:
|
||||
return transition
|
||||
|
||||
# Normalize to list[str]
|
||||
if isinstance(task, str):
|
||||
task_list = [task]
|
||||
elif isinstance(task, list) and all(isinstance(t, str) for t in task):
|
||||
task_list = task
|
||||
else:
|
||||
return transition
|
||||
|
||||
observation = transition.get(TransitionKey.OBSERVATION) or {}
|
||||
# Do not overwrite if user already provided observation.language
|
||||
if OBS_LANGUAGE not in observation:
|
||||
observation[OBS_LANGUAGE] = task_list
|
||||
transition[TransitionKey.OBSERVATION] = observation
|
||||
return transition
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]: # noqa: D401
|
||||
# Adds nothing to features; only mirrors complementary_data.task into observation
|
||||
return features
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
return {"task_key": self.task_key}
|
||||
@@ -0,0 +1,101 @@
|
||||
## General Value/Reward Learning:
|
||||
|
||||
I want to implement a general/universal vision and language value function or reward model for robotics/video tasks. Also called a video language conditioned reward model. Integrated with already existing LeRobot code if convenient, use the LeRobot Dataset for dataset and store the reward for a frame in the lerobot frame itself.
|
||||
|
||||
Inspired by these papers:
|
||||
|
||||
- ReWiND; https://arxiv.org/pdf/2505.10911 (Most applicable and main paper I want to implement ideas from) and code: https://github.com/lucidrains/rewind-reward-pytorch
|
||||
- LIV; https://arxiv.org/pdf/2306.00958 (Most applicable and 2nd main paper I want to implement ideas from) and code https://github.com/penn-pal-lab/LI
|
||||
- VLC: Video-Language Critic: Transferable Reward Functions for Language-Conditioned Robotics: https://arxiv.org/pdf/2405.19988 (Most applicable and 3rd paper I want to implement ideas from) and code: https://github.com/minttusofia/video_language_critic
|
||||
|
||||
And these papers which are also relevant:
|
||||
|
||||
- https://www.dyna.co/dyna-1/research (Main company I want to reproduce the eventual results from)
|
||||
- vip; https://arxiv.org/pdf/2210.00030
|
||||
- uvd; https://arxiv.org/pdf/2310.08581
|
||||
- vlm in context; https://arxiv.org/pdf/2411.04549
|
||||
- https://www.youtube.com/watch?v=JfZYtpEisoM
|
||||
|
||||
Little less relevant but still similar papers:
|
||||
|
||||
- Learning Generalizable Robotic Reward Functions from “In-The-Wild” Human Videos,
|
||||
- XIRL: Cross-embodiment Inverse Reinforcement Learning,
|
||||
- Video-Language Critic: Transferable Reward https://arxiv.org/pdf/2405.19988
|
||||
- Functions for Language-Conditioned Robotics,
|
||||
- LORel, Language-Driven Representation Learning for Robotics https://sites.google.com/view/robotlorel
|
||||
- RoboCLIP: One Demonstration is Enough to Learn Robot Policies https://arxiv.org/pdf/2310.07899
|
||||
- Points2Rewards: learn first key points and then uses the keypoints to learn general value function/policy https://semrob.github.io/docs/2025_rss_semrob.github.io_paper20.pdf
|
||||
- Language-Driven Representation Learning for Robotics: https://arxiv.org/pdf/2302.12766v1
|
||||
- R3M: A Universal Visual Representation for Robot Manipulation: https://arxiv.org/pdf/2203.12601v3
|
||||
|
||||
Input should be the current image or whole video and the task goal specified in text/language. Output is current reward.
|
||||
Archiutecture:
|
||||
_ inputs: video o1:T (or current o1:t), language z;
|
||||
_ DINO v3 ViT-B/16 (86M params): https://huggingface.co/facebook/dinov3-vitb16-pretrain-lvd1689m for vision encoding
|
||||
\_ sentence-transformers/all-MiniLM-L12-v2: https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2 for text encoding \* Temporal module: small causal transformer ("cross-modal sequential aggregator"), with first-frame positional embedding (to avoid position cheating), frame-dropout, and stride sampling; outputs per-timestep logits.
|
||||
|
||||
Loss: See this chatgpt thread: https://chatgpt.com/s/t_68999a50a0b081919abc365cdd205e01
|
||||
|
||||
Past images: (for example a reward method go to 3rd floor, has to know what floor it was on and what pas actions it did, can we attend or encorperate images of decision from history in one way?) Maybe via this paper: Learning Long-Context Diffusion Policies via Past-Token Prediction
|
||||
|
||||
Amount of frames needed for test/generalization: 1M frames? or ~20% of IPEC-COMMUNITY/bc_z_lerobot
|
||||
|
||||
Eval:
|
||||
Implement something like voc score , or ROC rank order correlation between reward leanredna and ev reward from sim, or use something else to do additional evaluation
|
||||
|
||||
Ideas:
|
||||
|
||||
- Incorporate training on multiple horizons: as in label same dataset for longer horizons: make a sandwich (long), put cheese on bread (medium) and even smaller horizons: go down or close gripper (small)
|
||||
- Incorporate navigation goals “walk towards the kitchen”, make sure we fix CLIP contrastive learning issue of positional text misunderstanding where model doesnnt learn difference between "horse right of cow" and "horse left of cow" “Move right” potentially train with more other data or even actionable world models such as Genie 3 (https://deepmind.google/discover/blog/genie-3-a-new-frontier-for-world-models/)
|
||||
|
||||
How to use a general reward model (use cases): - Train rl policy on it - Success detection - Do exploraion - Do task via planning and search to optimize reward - Filter out bad episodes in large datasets from imitation learning
|
||||
|
||||
Potential Datasets: (start with dataset that is most clean for this and works best with chosen way of doing evals)
|
||||
_ Epic-Kitchens-100
|
||||
_ Something-Something v. 2 Dataset https://www.qualcomm.com/developer/software/something-something-v-2-dataset
|
||||
_ Ego4D (3000 hours)
|
||||
_ Open X-Embodiment (OXE)
|
||||
\_ Agi bot world: https://huggingface.co/datasets/agibot-world/AgiBotWorld-Alpha
|
||||
|
||||
- GalexiAI dataset: https://opengalaxea.github.io/G0/
|
||||
_ GTEA+ Gaze: https://cbs.ic.gatech.edu/fpv/
|
||||
_ YouCook2 dataset
|
||||
\_ HOWTO100M: https://www.di.ens.fr/willow/research/howto100m/
|
||||
- Genie generated dataset?
|
||||
|
||||
### TODOs:
|
||||
|
||||
- Implement first architecture [x]
|
||||
- Implement processors [x]
|
||||
- Choose right loss metric(s) [x]
|
||||
- Make dataset with script that generated the dataset (IPEC-COMMUNITY/bc_z_lerobot) ready in lerobot format (and be able to visualize in dataset visualizer)
|
||||
- Annotate with ReWiND-style 0→1 progress rewards [x]
|
||||
- Visualize to check [x]
|
||||
- Implement eval score or metric that is robust and can deal with generalization/is a good metric to try different architectures. And use it in an eval jupyter notebook with visalization of the live reward next to the video for part of the dataset: VOC score and score with correct and incorrect language captions [x]
|
||||
- Do first training [x]
|
||||
- Implement on-the-fly progress label generation (no need for pre-annotated rewards) [x]
|
||||
- Try different losses
|
||||
- Only rewind loss [x]
|
||||
- Exactly similar to: https://github.com/lucidrains/rewind-reward-pytorch/blob/main/rewind_reward_pytorch/rewind_reward.py#L11 [x]
|
||||
- Try DINO v2 as encoder Base 86 M: with https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2 [x]
|
||||
- Test rewind (evaluate) [x]
|
||||
- benchmark siglip 2 vs this implementation forward pass, debug speed [x]
|
||||
- use siglip 2 [x]
|
||||
- Fix evaluation bug !!! []
|
||||
- Fix sample episode padding bug !!! []
|
||||
- Overfit on one episode []
|
||||
- Cleanup code? [] + enable language loss
|
||||
- Convert python -m lerobot.datasets.v21.convert_dataset_v20_to_v21 --repo-id=IPEC-COMMUNITY/bc_z_lerobot and train on 1 percent
|
||||
- Then on 10 percent []
|
||||
- Ablation 16 sucessive frame vs 16 frame samples with stride 2 or 4 []
|
||||
- Add more artificial text to dataset generated by vlm (google gemini) []
|
||||
- See google gemini vlm caption [] https://gemini.google.com/app/7e332ffaf32580f2
|
||||
- Multiple captions per video, creat method to generate as much data as possible etc [] https://arxiv.org/abs/2508.13446, https://arxiv.org/pdf/2412.04453
|
||||
- Add other datasets from OXE metioned in rewind []
|
||||
- Extend evaluation []
|
||||
- Ablation for size vision encoder, language encoder, temporal head []
|
||||
- Ablation one mlp head per frame or single mlp head []
|
||||
- Add other datasets metnioned here []
|
||||
- How can we improve spatial aware learning? solve issue of Contrastive learning and position []
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ import torch.nn.functional as F # noqa: N812
|
||||
from torch import Tensor
|
||||
from torch.distributions import MultivariateNormal, TanhTransform, Transform, TransformedDistribution
|
||||
|
||||
from lerobot.policies.normalize import NormalizeBuffer
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.sac.configuration_sac import SACConfig, is_image_feature
|
||||
from lerobot.policies.utils import get_device_from_parameters
|
||||
@@ -45,7 +44,6 @@ class SACPolicy(
|
||||
def __init__(
|
||||
self,
|
||||
config: SACConfig | None = None,
|
||||
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
|
||||
):
|
||||
super().__init__(config)
|
||||
config.validate_features()
|
||||
@@ -53,7 +51,6 @@ class SACPolicy(
|
||||
|
||||
# Determine action dimension and initialize all components
|
||||
continuous_action_dim = config.output_features["action"].shape[0]
|
||||
self._init_normalization(dataset_stats)
|
||||
self._init_encoders()
|
||||
self._init_critics(continuous_action_dim)
|
||||
self._init_actor(continuous_action_dim)
|
||||
@@ -88,8 +85,7 @@ class SACPolicy(
|
||||
|
||||
observations_features = None
|
||||
if self.shared_encoder and self.actor.encoder.has_images:
|
||||
# Cache and normalize image features
|
||||
observations_features = self.actor.encoder.get_cached_image_features(batch, normalize=True)
|
||||
observations_features = self.actor.encoder.get_cached_image_features(batch)
|
||||
|
||||
actions, _, _ = self.actor(batch, observations_features)
|
||||
|
||||
@@ -391,28 +387,12 @@ class SACPolicy(
|
||||
actor_loss = ((self.temperature * log_probs) - min_q_preds).mean()
|
||||
return actor_loss
|
||||
|
||||
def _init_normalization(self, dataset_stats):
|
||||
"""Initialize input/output normalization modules."""
|
||||
self.normalize_inputs = nn.Identity()
|
||||
self.normalize_targets = nn.Identity()
|
||||
if self.config.dataset_stats is not None:
|
||||
params = _convert_normalization_params_to_tensor(self.config.dataset_stats)
|
||||
self.normalize_inputs = NormalizeBuffer(
|
||||
self.config.input_features, self.config.normalization_mapping, params
|
||||
)
|
||||
stats = dataset_stats or params
|
||||
self.normalize_targets = NormalizeBuffer(
|
||||
self.config.output_features, self.config.normalization_mapping, stats
|
||||
)
|
||||
|
||||
def _init_encoders(self):
|
||||
"""Initialize shared or separate encoders for actor and critic."""
|
||||
self.shared_encoder = self.config.shared_encoder
|
||||
self.encoder_critic = SACObservationEncoder(self.config, self.normalize_inputs)
|
||||
self.encoder_critic = SACObservationEncoder(self.config)
|
||||
self.encoder_actor = (
|
||||
self.encoder_critic
|
||||
if self.shared_encoder
|
||||
else SACObservationEncoder(self.config, self.normalize_inputs)
|
||||
self.encoder_critic if self.shared_encoder else SACObservationEncoder(self.config)
|
||||
)
|
||||
|
||||
def _init_critics(self, continuous_action_dim):
|
||||
@@ -424,9 +404,7 @@ class SACPolicy(
|
||||
)
|
||||
for _ in range(self.config.num_critics)
|
||||
]
|
||||
self.critic_ensemble = CriticEnsemble(
|
||||
encoder=self.encoder_critic, ensemble=heads, output_normalization=self.normalize_targets
|
||||
)
|
||||
self.critic_ensemble = CriticEnsemble(encoder=self.encoder_critic, ensemble=heads)
|
||||
target_heads = [
|
||||
CriticHead(
|
||||
input_dim=self.encoder_critic.output_dim + continuous_action_dim,
|
||||
@@ -434,9 +412,7 @@ class SACPolicy(
|
||||
)
|
||||
for _ in range(self.config.num_critics)
|
||||
]
|
||||
self.critic_target = CriticEnsemble(
|
||||
encoder=self.encoder_critic, ensemble=target_heads, output_normalization=self.normalize_targets
|
||||
)
|
||||
self.critic_target = CriticEnsemble(encoder=self.encoder_critic, ensemble=target_heads)
|
||||
self.critic_target.load_state_dict(self.critic_ensemble.state_dict())
|
||||
|
||||
if self.config.use_torch_compile:
|
||||
@@ -490,10 +466,9 @@ class SACPolicy(
|
||||
class SACObservationEncoder(nn.Module):
|
||||
"""Encode image and/or state vector observations."""
|
||||
|
||||
def __init__(self, config: SACConfig, input_normalizer: nn.Module) -> None:
|
||||
def __init__(self, config: SACConfig) -> None:
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.input_normalization = input_normalizer
|
||||
self._init_image_layers()
|
||||
self._init_state_layers()
|
||||
self._compute_output_dim()
|
||||
@@ -568,11 +543,10 @@ class SACObservationEncoder(nn.Module):
|
||||
def forward(
|
||||
self, obs: dict[str, Tensor], cache: dict[str, Tensor] | None = None, detach: bool = False
|
||||
) -> Tensor:
|
||||
obs = self.input_normalization(obs)
|
||||
parts = []
|
||||
if self.has_images:
|
||||
if cache is None:
|
||||
cache = self.get_cached_image_features(obs, normalize=False)
|
||||
cache = self.get_cached_image_features(obs)
|
||||
parts.append(self._encode_images(cache, detach))
|
||||
if self.has_env:
|
||||
parts.append(self.env_encoder(obs["observation.environment_state"]))
|
||||
@@ -585,7 +559,7 @@ class SACObservationEncoder(nn.Module):
|
||||
"No parts to concatenate, you should have at least one image or environment state or state"
|
||||
)
|
||||
|
||||
def get_cached_image_features(self, obs: dict[str, Tensor], normalize: bool = False) -> dict[str, Tensor]:
|
||||
def get_cached_image_features(self, obs: dict[str, Tensor]) -> dict[str, Tensor]:
|
||||
"""Extract and optionally cache image features from observations.
|
||||
|
||||
This function processes image observations through the vision encoder once and returns
|
||||
@@ -597,26 +571,17 @@ class SACObservationEncoder(nn.Module):
|
||||
- The vision encoder forward pass is typically the main computational bottleneck during training and inference
|
||||
- Caching these features can provide 2-4x speedup in training and inference
|
||||
|
||||
Normalization behavior:
|
||||
- When called from inside forward(): set normalize=False since inputs are already normalized
|
||||
- When called from outside forward(): set normalize=True to ensure proper input normalization
|
||||
|
||||
Usage patterns:
|
||||
- Called in select_action() with normalize=True
|
||||
- Called in select_action()
|
||||
- Called in learner.py's get_observation_features() to pre-compute features for all policy components
|
||||
- Called internally by forward() with normalize=False
|
||||
- Called internally by forward()
|
||||
|
||||
Args:
|
||||
obs: Dictionary of observation tensors containing image keys
|
||||
normalize: Whether to normalize observations before encoding
|
||||
Set to True when calling directly from outside the encoder's forward method
|
||||
Set to False when calling from within forward() where inputs are already normalized
|
||||
|
||||
Returns:
|
||||
Dictionary mapping image keys to their corresponding encoded features
|
||||
"""
|
||||
if normalize:
|
||||
obs = self.input_normalization(obs)
|
||||
batched = torch.cat([obs[k] for k in self.image_keys], dim=0)
|
||||
out = self.image_encoder(batched)
|
||||
chunks = torch.chunk(out, len(self.image_keys), dim=0)
|
||||
@@ -747,7 +712,6 @@ class CriticEnsemble(nn.Module):
|
||||
Args:
|
||||
encoder (SACObservationEncoder): encoder for observations.
|
||||
ensemble (List[CriticHead]): list of critic heads.
|
||||
output_normalization (nn.Module): normalization layer for actions.
|
||||
init_final (float | None): optional initializer scale for final layers.
|
||||
|
||||
Forward returns a tensor of shape (num_critics, batch_size) containing Q-values.
|
||||
@@ -757,13 +721,11 @@ class CriticEnsemble(nn.Module):
|
||||
self,
|
||||
encoder: SACObservationEncoder,
|
||||
ensemble: list[CriticHead],
|
||||
output_normalization: nn.Module,
|
||||
init_final: float | None = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.encoder = encoder
|
||||
self.init_final = init_final
|
||||
self.output_normalization = output_normalization
|
||||
self.critics = nn.ModuleList(ensemble)
|
||||
|
||||
def forward(
|
||||
@@ -775,11 +737,6 @@ class CriticEnsemble(nn.Module):
|
||||
device = get_device_from_parameters(self)
|
||||
# Move each tensor in observations to device
|
||||
observations = {k: v.to(device) for k, v in observations.items()}
|
||||
# NOTE: We normalize actions it helps for sample efficiency
|
||||
actions: dict[str, torch.tensor] = {"action": actions}
|
||||
# NOTE: Normalization layer took dict in input and outputs a dict that why
|
||||
actions = self.output_normalization(actions)["action"]
|
||||
actions = actions.to(device)
|
||||
|
||||
obs_enc = self.encoder(observations, cache=observation_features)
|
||||
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
|
||||
from lerobot.policies.sac.configuration_sac import SACConfig
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RenameProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
|
||||
|
||||
def make_sac_processor(
|
||||
config: SACConfig, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
input_steps = [
|
||||
RenameProcessor(rename_map={}),
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
output_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
@@ -20,7 +20,6 @@ import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from lerobot.constants import OBS_IMAGE, REWARD
|
||||
from lerobot.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
|
||||
|
||||
@@ -108,22 +107,12 @@ class Classifier(PreTrainedPolicy):
|
||||
def __init__(
|
||||
self,
|
||||
config: RewardClassifierConfig,
|
||||
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
|
||||
):
|
||||
from transformers import AutoModel
|
||||
|
||||
super().__init__(config)
|
||||
self.config = config
|
||||
|
||||
# Initialize normalization (standardized with the policy framework)
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
# Set up encoder
|
||||
encoder = AutoModel.from_pretrained(self.config.model_name, trust_remote_code=True)
|
||||
# Extract vision model if we're given a multimodal model
|
||||
@@ -247,10 +236,6 @@ class Classifier(PreTrainedPolicy):
|
||||
|
||||
def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict[str, Tensor]]:
|
||||
"""Standard forward pass for training compatible with train.py."""
|
||||
# Normalize inputs if needed
|
||||
batch = self.normalize_inputs(batch)
|
||||
batch = self.normalize_targets(batch)
|
||||
|
||||
# Extract images and labels
|
||||
images, labels = self.extract_images_and_labels(batch)
|
||||
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
# !/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
IdentityProcessor,
|
||||
NormalizerProcessor,
|
||||
RobotProcessor,
|
||||
)
|
||||
|
||||
|
||||
def make_classifier_processor(
|
||||
config: RewardClassifierConfig, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
input_steps = [
|
||||
NormalizerProcessor(
|
||||
features=config.input_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
NormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
output_steps = [DeviceProcessor(device="cpu"), IdentityProcessor()]
|
||||
return RobotProcessor(steps=input_steps, name="classifier_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="classifier_postprocessor"
|
||||
)
|
||||
@@ -28,7 +28,7 @@ pip install -e ".[smolvla]"
|
||||
|
||||
Example of finetuning the smolvla pretrained model (`smolvla_base`):
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.path=lerobot/smolvla_base \
|
||||
--dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \
|
||||
--batch_size=64 \
|
||||
@@ -38,7 +38,7 @@ lerobot-train \
|
||||
Example of finetuning a smolVLA. SmolVLA is composed of a pretrained VLM,
|
||||
and an action expert.
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--policy.type=smolvla \
|
||||
--dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \
|
||||
--batch_size=64 \
|
||||
@@ -53,21 +53,13 @@ policy = SmolVLAPolicy.from_pretrained("lerobot/smolvla_base")
|
||||
"""
|
||||
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
from collections import deque
|
||||
|
||||
import safetensors
|
||||
import torch
|
||||
import torch.nn.functional as F # noqa: N812
|
||||
from torch import Tensor, nn
|
||||
from transformers import AutoProcessor
|
||||
|
||||
from lerobot.constants import ACTION, OBS_STATE
|
||||
from lerobot.policies.normalize import (
|
||||
Normalize,
|
||||
Unnormalize,
|
||||
)
|
||||
from lerobot.constants import ACTION, OBS_LANGUAGE, OBS_STATE
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig
|
||||
from lerobot.policies.smolvla.smolvlm_with_expert import SmolVLMWithExpertModel
|
||||
@@ -76,102 +68,6 @@ from lerobot.policies.utils import (
|
||||
)
|
||||
from lerobot.utils.utils import get_safe_dtype
|
||||
|
||||
# Matches ".soNNN", optionally followed by "-something", up to the "_buffer_" marker
|
||||
_VARIANT_RE = re.compile(r"\.so\d+(?:-[\w]+)?_buffer_")
|
||||
|
||||
|
||||
def canonicalise(k: str) -> str:
|
||||
"""
|
||||
Remove dataset-variant markers like '.so100-blue_' or '.so100_' from a
|
||||
normalisation-buffer key.
|
||||
"""
|
||||
return _VARIANT_RE.sub(".buffer_", k)
|
||||
|
||||
|
||||
def standardise_state_dict(
|
||||
checkpoint: dict[str, torch.Tensor], ref_keys: set[str], *, verbose: bool = True
|
||||
) -> tuple[dict[str, torch.Tensor], list[str]]:
|
||||
"""
|
||||
• Re-keys `checkpoint ` so that every entry matches the *reference* key set.
|
||||
• If several variant keys collapse to the same canonical name we keep the
|
||||
first one and log the collision.
|
||||
• Returns the new dict + a list of entries that could not be matched.
|
||||
"""
|
||||
out, collisions, unmatched = {}, {}, []
|
||||
|
||||
for k, v in checkpoint.items():
|
||||
canon = canonicalise(k)
|
||||
if canon in ref_keys:
|
||||
if canon in out: # duplicate after collapsing
|
||||
collisions.setdefault(canon, []).append(k)
|
||||
else:
|
||||
out[canon] = v
|
||||
else:
|
||||
unmatched.append(k)
|
||||
|
||||
if verbose:
|
||||
for canon, variants in collisions.items():
|
||||
print(f"[standardise_state_dict] '{canon}' ← {variants}")
|
||||
if unmatched:
|
||||
print(f"[standardise_state_dict] kept {len(unmatched)} unmatched keys")
|
||||
|
||||
out.update({k: checkpoint[k] for k in unmatched})
|
||||
return out, unmatched
|
||||
|
||||
|
||||
def rename_checkpoint_keys(checkpoint: dict, rename_str: str):
|
||||
"""
|
||||
Renames keys in a checkpoint dictionary based on the given rename string.
|
||||
|
||||
Args:
|
||||
checkpoint (dict): The checkpoint dictionary.
|
||||
rename_str (str): A string specifying key mappings in the format "old1//new1,old2//new2".
|
||||
|
||||
Returns:
|
||||
dict: The modified checkpoint with renamed keys.
|
||||
"""
|
||||
|
||||
rename_dict = dict(pair.split("//") for pair in rename_str.split(","))
|
||||
|
||||
new_checkpoint = {}
|
||||
for k, v in checkpoint.items():
|
||||
for old_key, new_key in rename_dict.items():
|
||||
if old_key in k:
|
||||
k = k.replace(old_key, new_key)
|
||||
new_checkpoint[k] = v
|
||||
return new_checkpoint
|
||||
|
||||
|
||||
def load_smolvla(
|
||||
model: torch.nn.Module,
|
||||
filename: str | os.PathLike,
|
||||
*,
|
||||
device: str = "cpu",
|
||||
checkpoint_keys_mapping: str = "",
|
||||
) -> torch.nn.Module:
|
||||
state_dict = safetensors.torch.load_file(filename, device=device)
|
||||
|
||||
# Optional user-supplied renames (e.g. "model._orig_mod.//model.")
|
||||
if checkpoint_keys_mapping and "//" in checkpoint_keys_mapping:
|
||||
state_dict = rename_checkpoint_keys(state_dict, checkpoint_keys_mapping)
|
||||
|
||||
state_dict, _ = standardise_state_dict(state_dict, set(model.state_dict().keys()))
|
||||
|
||||
# HACK(aliberts): to not overwrite normalization parameters as they should come from the dataset
|
||||
norm_keys = ("normalize_inputs", "normalize_targets", "unnormalize_outputs")
|
||||
state_dict = {k: v for k, v in state_dict.items() if not k.startswith(norm_keys)}
|
||||
|
||||
missing, unexpected = model.load_state_dict(state_dict, strict=False)
|
||||
|
||||
if not all(key.startswith(norm_keys) for key in missing) or unexpected:
|
||||
raise RuntimeError(
|
||||
"SmolVLA %d missing / %d unexpected keys",
|
||||
len(missing),
|
||||
len(unexpected),
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def create_sinusoidal_pos_embedding(
|
||||
time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu"
|
||||
@@ -326,28 +222,17 @@ class SmolVLAPolicy(PreTrainedPolicy):
|
||||
def __init__(
|
||||
self,
|
||||
config: SmolVLAConfig,
|
||||
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
config: Policy configuration class instance or None, in which case the default instantiation of
|
||||
the configuration class is used.
|
||||
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
|
||||
that they will be passed with a call to `load_state_dict` before the policy is used.
|
||||
"""
|
||||
|
||||
super().__init__(config)
|
||||
config.validate_features()
|
||||
self.config = config
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
self.language_tokenizer = AutoProcessor.from_pretrained(self.config.vlm_model_name).tokenizer
|
||||
self.model = VLAFlowMatching(config)
|
||||
self.reset()
|
||||
|
||||
@@ -357,23 +242,6 @@ class SmolVLAPolicy(PreTrainedPolicy):
|
||||
ACTION: deque(maxlen=self.config.n_action_steps),
|
||||
}
|
||||
|
||||
# HACK(aliberts, danaaubakirova): we overwrite this classmethod here to fix smolVLA-specific issues
|
||||
@classmethod
|
||||
def _load_as_safetensor(
|
||||
cls,
|
||||
model: "SmolVLAPolicy",
|
||||
model_file: str,
|
||||
map_location: str,
|
||||
strict: bool,
|
||||
):
|
||||
safetensors.torch.load_model(model, model_file, strict=strict, device=map_location)
|
||||
return load_smolvla(
|
||||
model,
|
||||
model_file,
|
||||
device=map_location,
|
||||
checkpoint_keys_mapping="model._orig_mod.//model.",
|
||||
)
|
||||
|
||||
def get_optim_params(self) -> dict:
|
||||
return self.parameters()
|
||||
|
||||
@@ -389,7 +257,8 @@ class SmolVLAPolicy(PreTrainedPolicy):
|
||||
|
||||
images, img_masks = self.prepare_images(batch)
|
||||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
lang_tokens = batch[f"{OBS_LANGUAGE}.tokens"]
|
||||
lang_masks = batch[f"{OBS_LANGUAGE}.attention_mask"]
|
||||
|
||||
actions = self.model.sample_actions(images, img_masks, lang_tokens, lang_masks, state, noise=noise)
|
||||
|
||||
@@ -397,8 +266,6 @@ class SmolVLAPolicy(PreTrainedPolicy):
|
||||
original_action_dim = self.config.action_feature.shape[0]
|
||||
actions = actions[:, :, :original_action_dim]
|
||||
|
||||
actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
|
||||
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
actions = self._pi_aloha_encode_actions(actions)
|
||||
|
||||
@@ -408,8 +275,6 @@ class SmolVLAPolicy(PreTrainedPolicy):
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
|
||||
return batch
|
||||
|
||||
@torch.no_grad()
|
||||
@@ -450,11 +315,11 @@ class SmolVLAPolicy(PreTrainedPolicy):
|
||||
if self.config.adapt_to_pi_aloha:
|
||||
batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE])
|
||||
batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
|
||||
batch = self.normalize_inputs(batch)
|
||||
batch = self.normalize_targets(batch)
|
||||
|
||||
images, img_masks = self.prepare_images(batch)
|
||||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
lang_tokens = batch[f"{OBS_LANGUAGE}.tokens"]
|
||||
lang_masks = batch[f"{OBS_LANGUAGE}.attention_mask"]
|
||||
actions = self.prepare_action(batch)
|
||||
actions_is_pad = batch.get("actions_id_pad")
|
||||
loss_dict = {}
|
||||
@@ -518,30 +383,6 @@ class SmolVLAPolicy(PreTrainedPolicy):
|
||||
img_masks.append(mask)
|
||||
return images, img_masks
|
||||
|
||||
def prepare_language(self, batch) -> tuple[Tensor, Tensor]:
|
||||
"""Tokenize the text input"""
|
||||
device = batch[OBS_STATE].device
|
||||
tasks = batch["task"]
|
||||
if isinstance(tasks, str):
|
||||
tasks = [tasks]
|
||||
|
||||
if len(tasks) == 1:
|
||||
tasks = [tasks[0] for _ in range(batch[OBS_STATE].shape[0])]
|
||||
|
||||
tasks = [task if task.endswith("\n") else f"{task}\n" for task in tasks]
|
||||
|
||||
tokenized_prompt = self.language_tokenizer.__call__(
|
||||
tasks,
|
||||
padding=self.config.pad_language_to,
|
||||
padding_side="right",
|
||||
max_length=self.config.tokenizer_max_length,
|
||||
return_tensors="pt",
|
||||
)
|
||||
lang_tokens = tokenized_prompt["input_ids"].to(device=device)
|
||||
lang_masks = tokenized_prompt["attention_mask"].to(device=device, dtype=torch.bool)
|
||||
|
||||
return lang_tokens, lang_masks
|
||||
|
||||
def _pi_aloha_decode_state(self, state):
|
||||
# Flip the joints.
|
||||
for motor_idx in [1, 2, 8, 9]:
|
||||
|
||||
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2025 HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
from lerobot.configs.types import PolicyFeature
|
||||
from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RenameProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
TokenizerProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
from lerobot.processor.pipeline import EnvTransition, ProcessorStep, ProcessorStepRegistry, TransitionKey
|
||||
|
||||
|
||||
def make_smolvla_processor(
|
||||
config: SmolVLAConfig, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
input_steps = [
|
||||
RenameProcessor(rename_map={}), # To mimic the same processor as pretrained one
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
SmolVLANewLineProcessor(),
|
||||
TokenizerProcessor(
|
||||
tokenizer_name=config.vlm_model_name,
|
||||
padding=config.pad_language_to,
|
||||
padding_side="right",
|
||||
max_length=config.tokenizer_max_length,
|
||||
),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
output_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register(name="smolvla_new_line_processor")
|
||||
class SmolVLANewLineProcessor(ProcessorStep):
|
||||
"""Add a new line to the end of the task if it doesn't have one."""
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
# Check if complementary_data exists
|
||||
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
|
||||
if complementary_data is None or "task" not in complementary_data:
|
||||
return transition
|
||||
|
||||
task = complementary_data["task"]
|
||||
if task is None:
|
||||
return transition
|
||||
|
||||
# Handle both string and list of strings
|
||||
if isinstance(task, str):
|
||||
# Single string: add newline if not present
|
||||
if not task.endswith("\n"):
|
||||
complementary_data["task"] = f"{task}\n"
|
||||
elif isinstance(task, list) and all(isinstance(t, str) for t in task):
|
||||
# List of strings: add newline to each if not present
|
||||
complementary_data["task"] = [t if t.endswith("\n") else f"{t}\n" for t in task]
|
||||
# If task is neither string nor list of strings, leave unchanged
|
||||
|
||||
return transition
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
"""Adds nothing to the features."""
|
||||
return features
|
||||
|
||||
def state_dict(self) -> dict[str, torch.Tensor]:
|
||||
"""Return state dictionary (empty for this processor)."""
|
||||
return {}
|
||||
|
||||
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
|
||||
"""Load state dictionary (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset processor state (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
"""Return configuration for serialization."""
|
||||
return {}
|
||||
@@ -36,7 +36,6 @@ import torch.nn.functional as F # noqa: N812
|
||||
from torch import Tensor
|
||||
|
||||
from lerobot.constants import ACTION, OBS_ENV_STATE, OBS_IMAGE, OBS_STATE, REWARD
|
||||
from lerobot.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig
|
||||
from lerobot.policies.utils import get_device_from_parameters, get_output_shape, populate_queues
|
||||
@@ -63,26 +62,19 @@ class TDMPCPolicy(PreTrainedPolicy):
|
||||
config_class = TDMPCConfig
|
||||
name = "tdmpc"
|
||||
|
||||
def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None):
|
||||
def __init__(
|
||||
self,
|
||||
config: TDMPCConfig,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
config: Policy configuration class instance or None, in which case the default instantiation of
|
||||
the configuration class is used.
|
||||
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
|
||||
that they will be passed with a call to `load_state_dict` before the policy is used.
|
||||
"""
|
||||
super().__init__(config)
|
||||
config.validate_features()
|
||||
self.config = config
|
||||
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
self.model = TDMPCTOLD(config)
|
||||
self.model_target = deepcopy(self.model)
|
||||
for param in self.model_target.parameters():
|
||||
@@ -137,7 +129,6 @@ class TDMPCPolicy(PreTrainedPolicy):
|
||||
|
||||
actions = torch.clamp(actions, -1, +1)
|
||||
|
||||
actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
|
||||
return actions
|
||||
|
||||
@torch.no_grad()
|
||||
@@ -147,11 +138,12 @@ class TDMPCPolicy(PreTrainedPolicy):
|
||||
if ACTION in batch:
|
||||
batch.pop(ACTION)
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
|
||||
if self.config.image_features:
|
||||
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
|
||||
batch[OBS_IMAGE] = batch[next(iter(self.config.image_features))]
|
||||
# NOTE: for offline evaluation, we have action in the batch, so we need to pop it out
|
||||
if ACTION in batch:
|
||||
batch.pop(ACTION)
|
||||
|
||||
self._queues = populate_queues(self._queues, batch)
|
||||
|
||||
@@ -320,11 +312,9 @@ class TDMPCPolicy(PreTrainedPolicy):
|
||||
"""
|
||||
device = get_device_from_parameters(self)
|
||||
|
||||
batch = self.normalize_inputs(batch)
|
||||
if self.config.image_features:
|
||||
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
|
||||
batch[OBS_IMAGE] = batch[next(iter(self.config.image_features))]
|
||||
batch = self.normalize_targets(batch)
|
||||
|
||||
info = {}
|
||||
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 Nicklas Hansen, Xiaolong Wang, Hao Su,
|
||||
# and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RenameProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
|
||||
|
||||
def make_tdmpc_processor(
|
||||
config: TDMPCConfig, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
input_steps = [
|
||||
RenameProcessor(rename_map={}),
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
output_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
@@ -28,7 +28,6 @@ import torchvision
|
||||
from torch import Tensor, nn
|
||||
|
||||
from lerobot.constants import ACTION, OBS_IMAGES, OBS_STATE
|
||||
from lerobot.policies.normalize import Normalize, Unnormalize
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.utils import get_device_from_parameters, get_output_shape, populate_queues
|
||||
from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig
|
||||
@@ -48,7 +47,6 @@ class VQBeTPolicy(PreTrainedPolicy):
|
||||
def __init__(
|
||||
self,
|
||||
config: VQBeTConfig | None = None,
|
||||
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
@@ -61,14 +59,6 @@ class VQBeTPolicy(PreTrainedPolicy):
|
||||
config.validate_features()
|
||||
self.config = config
|
||||
|
||||
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
|
||||
self.normalize_targets = Normalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
self.unnormalize_outputs = Unnormalize(
|
||||
config.output_features, config.normalization_mapping, dataset_stats
|
||||
)
|
||||
|
||||
self.vqbet = VQBeTModel(config)
|
||||
|
||||
self.reset()
|
||||
@@ -128,7 +118,6 @@ class VQBeTPolicy(PreTrainedPolicy):
|
||||
def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
|
||||
batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues}
|
||||
actions = self.vqbet(batch, rollout=True)[:, : self.config.action_chunk_size]
|
||||
actions = self.unnormalize_outputs({ACTION: actions})[ACTION]
|
||||
return actions
|
||||
|
||||
@torch.no_grad()
|
||||
@@ -142,10 +131,12 @@ class VQBeTPolicy(PreTrainedPolicy):
|
||||
# NOTE: for offline evaluation, we have action in the batch, so we need to pop it out
|
||||
if ACTION in batch:
|
||||
batch.pop(ACTION)
|
||||
batch = self.normalize_inputs(batch)
|
||||
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
|
||||
# NOTE: It's important that this happens after stacking the images into a single key.
|
||||
batch["observation.images"] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
|
||||
# NOTE: for offline evaluation, we have action in the batch, so we need to pop it out
|
||||
if ACTION in batch:
|
||||
batch.pop(ACTION)
|
||||
|
||||
self._queues = populate_queues(self._queues, batch)
|
||||
|
||||
@@ -165,10 +156,8 @@ class VQBeTPolicy(PreTrainedPolicy):
|
||||
|
||||
def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]:
|
||||
"""Run the batch through the model and compute the loss for training or validation."""
|
||||
batch = self.normalize_inputs(batch)
|
||||
batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
|
||||
batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
|
||||
batch = self.normalize_targets(batch)
|
||||
# VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://huggingface.co/papers/2403.03181)
|
||||
if not self.vqbet.action_head.vqvae_model.discretized.item():
|
||||
# loss: total loss of training RVQ
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 Seungjae Lee and Yibin Wang and Haritheja Etukuru
|
||||
# and H. Jin Kim and Nur Muhammad Mahi Shafiullah and Lerrel Pinto
|
||||
# and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig
|
||||
from lerobot.processor import (
|
||||
DeviceProcessor,
|
||||
NormalizerProcessor,
|
||||
RenameProcessor,
|
||||
RobotProcessor,
|
||||
ToBatchProcessor,
|
||||
UnnormalizerProcessor,
|
||||
)
|
||||
|
||||
|
||||
def make_vqbet_processor(
|
||||
config: VQBeTConfig, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
|
||||
) -> tuple[RobotProcessor, RobotProcessor]:
|
||||
input_steps = [
|
||||
RenameProcessor(rename_map={}), # Let the possibility to the user to rename the keys
|
||||
NormalizerProcessor(
|
||||
features={**config.input_features, **config.output_features},
|
||||
norm_map=config.normalization_mapping,
|
||||
stats=dataset_stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
DeviceProcessor(device=config.device),
|
||||
]
|
||||
output_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(
|
||||
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
|
||||
),
|
||||
]
|
||||
return RobotProcessor(steps=input_steps, name="robot_preprocessor"), RobotProcessor(
|
||||
steps=output_steps, name="robot_postprocessor"
|
||||
)
|
||||
@@ -14,8 +14,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .batch_processor import ToBatchProcessor
|
||||
from .device_processor import DeviceProcessor
|
||||
from .normalize_processor import NormalizerProcessor, UnnormalizerProcessor
|
||||
from .normalize_processor import NormalizerProcessor, UnnormalizerProcessor, hotswap_stats
|
||||
from .observation_processor import VanillaObservationProcessor
|
||||
from .pipeline import (
|
||||
ActionProcessor,
|
||||
@@ -32,6 +33,7 @@ from .pipeline import (
|
||||
TruncatedProcessor,
|
||||
)
|
||||
from .rename_processor import RenameProcessor
|
||||
from .tokenizer_processor import TokenizerProcessor
|
||||
|
||||
__all__ = [
|
||||
"ActionProcessor",
|
||||
@@ -42,12 +44,15 @@ __all__ = [
|
||||
"InfoProcessor",
|
||||
"NormalizerProcessor",
|
||||
"UnnormalizerProcessor",
|
||||
"hotswap_stats",
|
||||
"ObservationProcessor",
|
||||
"ProcessorStep",
|
||||
"ProcessorStepRegistry",
|
||||
"RenameProcessor",
|
||||
"RewardProcessor",
|
||||
"RobotProcessor",
|
||||
"ToBatchProcessor",
|
||||
"TokenizerProcessor",
|
||||
"TransitionKey",
|
||||
"TruncatedProcessor",
|
||||
"VanillaObservationProcessor",
|
||||
|
||||
@@ -0,0 +1,139 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
|
||||
from lerobot.configs.types import PolicyFeature
|
||||
from lerobot.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE
|
||||
from lerobot.processor.pipeline import EnvTransition, ProcessorStepRegistry, TransitionKey
|
||||
|
||||
|
||||
@dataclass
|
||||
@ProcessorStepRegistry.register(name="to_batch_processor")
|
||||
class ToBatchProcessor:
|
||||
"""Processor that adds batch dimensions to observations and actions when needed.
|
||||
|
||||
This processor ensures that observations and actions have proper batch dimensions for model processing:
|
||||
|
||||
- For state observations (observation.state, observation.environment_state):
|
||||
Adds batch dimension (unsqueeze at dim=0) if tensor is 1-dimensional
|
||||
|
||||
- For image observations (observation.image, observation.images.*):
|
||||
Adds batch dimension (unsqueeze at dim=0) if tensor is 3-dimensional (H, W, C)
|
||||
|
||||
- For actions:
|
||||
Adds batch dimension (unsqueeze at dim=0) if tensor is 1-dimensional
|
||||
|
||||
- For task field in complementary data:
|
||||
Wraps string task in a list to add batch dimension
|
||||
(task must be a string or list of strings)
|
||||
|
||||
This is useful when processing single transitions that need to be batched for
|
||||
model inference or when converting from unbatched environment outputs to
|
||||
batched model inputs.
|
||||
|
||||
The processor only modifies tensors that need batching and leaves already
|
||||
batched tensors unchanged.
|
||||
|
||||
Example:
|
||||
```python
|
||||
# State: (7,) -> (1, 7)
|
||||
# Image: (224, 224, 3) -> (1, 224, 224, 3)
|
||||
# Action: (4,) -> (1, 4)
|
||||
# Task: "pick_cube" -> ["pick_cube"]
|
||||
# Already batched: (1, 7) -> (1, 7) [unchanged]
|
||||
```
|
||||
"""
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
self._process_observation(transition)
|
||||
self._process_action(transition)
|
||||
self._process_complementary_data(transition)
|
||||
return transition
|
||||
|
||||
def _process_observation(self, transition: EnvTransition) -> None:
|
||||
"""Process observation component in-place, adding batch dimensions where needed."""
|
||||
observation = transition.get(TransitionKey.OBSERVATION)
|
||||
if observation is None:
|
||||
return
|
||||
|
||||
# Process state observations - add batch dim if 1D
|
||||
for state_key in [OBS_STATE, OBS_ENV_STATE]:
|
||||
if state_key in observation:
|
||||
state_value = observation[state_key]
|
||||
if isinstance(state_value, Tensor) and state_value.dim() == 1:
|
||||
observation[state_key] = state_value.unsqueeze(0)
|
||||
|
||||
# Process single image observation - add batch dim if 3D
|
||||
if OBS_IMAGE in observation:
|
||||
image_value = observation[OBS_IMAGE]
|
||||
if isinstance(image_value, Tensor) and image_value.dim() == 3:
|
||||
observation[OBS_IMAGE] = image_value.unsqueeze(0)
|
||||
|
||||
# Process multiple image observations - add batch dim if 3D
|
||||
for key, value in observation.items():
|
||||
if key.startswith(f"{OBS_IMAGES}.") and isinstance(value, Tensor) and value.dim() == 3:
|
||||
observation[key] = value.unsqueeze(0)
|
||||
|
||||
def _process_action(self, transition: EnvTransition) -> None:
|
||||
"""Process action component in-place, adding batch dimension if needed."""
|
||||
action = transition.get(TransitionKey.ACTION)
|
||||
if action is not None and isinstance(action, Tensor) and action.dim() == 1:
|
||||
transition[TransitionKey.ACTION] = action.unsqueeze(0)
|
||||
|
||||
def _process_complementary_data(self, transition: EnvTransition) -> None:
|
||||
"""Process complementary data in-place, handling task field batching."""
|
||||
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
|
||||
if complementary_data is None:
|
||||
return
|
||||
|
||||
# Process task field - wrap string in list to add batch dimension
|
||||
if "task" in complementary_data:
|
||||
task_value = complementary_data["task"]
|
||||
if isinstance(task_value, str):
|
||||
complementary_data["task"] = [task_value]
|
||||
|
||||
# Process index field - add batch dim if 0D
|
||||
if "index" in complementary_data:
|
||||
index_value = complementary_data["index"]
|
||||
if isinstance(index_value, Tensor) and index_value.dim() == 0:
|
||||
complementary_data["index"] = index_value.unsqueeze(0)
|
||||
|
||||
# Process task_index field - add batch dim if 0D
|
||||
if "task_index" in complementary_data:
|
||||
task_index_value = complementary_data["task_index"]
|
||||
if isinstance(task_index_value, Tensor) and task_index_value.dim() == 0:
|
||||
complementary_data["task_index"] = task_index_value.unsqueeze(0)
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
"""Return configuration for serialization."""
|
||||
return {}
|
||||
|
||||
def state_dict(self) -> dict[str, torch.Tensor]:
|
||||
"""Return state dictionary (empty for this processor)."""
|
||||
return {}
|
||||
|
||||
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
|
||||
"""Load state dictionary (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset processor state (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
@@ -0,0 +1,225 @@
|
||||
# !/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable, Sequence
|
||||
from copy import deepcopy
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from scipy.spatial.transform import Rotation
|
||||
|
||||
from .pipeline import EnvTransition, TransitionKey
|
||||
|
||||
|
||||
def _to_tensor(x: torch.Tensor | np.ndarray | Sequence[int | float]):
|
||||
if isinstance(x, torch.Tensor):
|
||||
return x
|
||||
if isinstance(x, np.ndarray):
|
||||
# Keep images (uint8 HWC) and python objects as-is
|
||||
if x.dtype == np.uint8 or x.dtype == np.object_:
|
||||
return x
|
||||
# Scalars/arrays to float32 tensor
|
||||
return torch.as_tensor(x, dtype=torch.float32)
|
||||
# Anything else to float32 tensor
|
||||
return torch.as_tensor(x, dtype=torch.float32)
|
||||
|
||||
|
||||
def _from_tensor(x: Any):
|
||||
if isinstance(x, torch.Tensor):
|
||||
return x.item() if x.numel() == 1 else x.detach().cpu().numpy()
|
||||
return x
|
||||
|
||||
|
||||
def _is_image(arr: Any) -> bool:
|
||||
return isinstance(arr, np.ndarray) and arr.dtype == np.uint8 and arr.ndim == 3
|
||||
|
||||
|
||||
def _split_obs_to_state_and_images(obs: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
||||
state, images = {}, {}
|
||||
for k, v in obs.items():
|
||||
if _is_image(v):
|
||||
images[k] = v
|
||||
else:
|
||||
state[k] = v
|
||||
return state, images
|
||||
|
||||
|
||||
def make_obs_act_transition(
|
||||
*, obs: dict[str, Any] | None = None, act: dict[str, Any] | None = None
|
||||
) -> EnvTransition:
|
||||
return {
|
||||
TransitionKey.OBSERVATION: {} if obs is None else obs,
|
||||
TransitionKey.ACTION: {} if act is None else act,
|
||||
TransitionKey.INFO: {},
|
||||
TransitionKey.COMPLEMENTARY_DATA: {},
|
||||
TransitionKey.REWARD: None,
|
||||
TransitionKey.DONE: None,
|
||||
TransitionKey.TRUNCATED: None,
|
||||
}
|
||||
|
||||
|
||||
def to_transition_teleop_action(action: dict[str, Any]) -> EnvTransition:
|
||||
"""
|
||||
Convert a raw teleop action dict into an EnvTransition under the ACTION TransitionKey.
|
||||
"""
|
||||
act_dict: dict[str, Any] = {}
|
||||
for k, v in action.items():
|
||||
# Check if the value is a type that should not be converted to a tensor.
|
||||
if isinstance(v, (Rotation, dict)):
|
||||
act_dict[f"action.{k}"] = v
|
||||
continue
|
||||
|
||||
arr = np.array(v) if np.isscalar(v) else v
|
||||
act_dict[f"action.{k}"] = _to_tensor(arr)
|
||||
|
||||
return make_obs_act_transition(act=act_dict)
|
||||
|
||||
|
||||
# TODO(Adil, Pepijn): Overtime we can maybe add these converters to pipeline.py itself
|
||||
def to_transition_robot_observation(observation: dict[str, Any]) -> EnvTransition:
|
||||
"""
|
||||
Convert a raw robot observation dict into an EnvTransition under the OBSERVATION TransitionKey.
|
||||
"""
|
||||
state, images = _split_obs_to_state_and_images(observation)
|
||||
|
||||
obs_dict: dict[str, Any] = {}
|
||||
for k, v in state.items():
|
||||
arr = np.array(v) if np.isscalar(v) else v
|
||||
obs_dict[f"observation.state.{k}"] = _to_tensor(arr)
|
||||
|
||||
for cam, img in images.items():
|
||||
obs_dict[f"observation.images.{cam}"] = img
|
||||
|
||||
return make_obs_act_transition(obs=obs_dict)
|
||||
|
||||
|
||||
def to_output_robot_action(transition: EnvTransition) -> dict[str, Any]:
|
||||
"""
|
||||
Converts a EnvTransition under the ACTION TransitionKey to a dict with keys ending in '.pos' for raw robot actions.
|
||||
"""
|
||||
out: dict[str, Any] = {}
|
||||
action_dict = transition.get(TransitionKey.ACTION) or {}
|
||||
|
||||
for k, v in action_dict.items():
|
||||
if isinstance(k, str) and k.startswith("action.") and k.endswith((".pos", ".vel")):
|
||||
out_key = k[len("action.") :] # Strip the 'action.' prefix.
|
||||
out[out_key] = float(v)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def to_dataset_frame(
|
||||
transitions_or_transition: EnvTransition | Iterable[EnvTransition], features: dict[str, dict]
|
||||
) -> dict[str, any]:
|
||||
"""
|
||||
Converts a single EnvTransition or an iterable of them into a flat,
|
||||
dataset-friendly dictionary for training or evaluation, according to
|
||||
the provided `features` spec.
|
||||
|
||||
Args:
|
||||
transitions_or_transition: Either a single EnvTransition dict
|
||||
or an iterable of them (which will be merged).
|
||||
features (dict[str, dict]):
|
||||
A feature specification dictionary:
|
||||
- 'action': dict with 'names': list of action feature names
|
||||
- 'observation.state': dict with 'names': list of state feature names
|
||||
- keys starting with 'observation.images.' are passed through
|
||||
|
||||
Returns:
|
||||
batch (dict[str, any]): Flat dictionary containing:
|
||||
- numpy arrays for "observation.state" and "action"
|
||||
- any image tensors defined in features
|
||||
- next.{reward,done,truncated}
|
||||
- info dict
|
||||
- *_is_pad flags and task from complementary_data
|
||||
"""
|
||||
action_names = features.get("action", {}).get("names", [])
|
||||
obs_state_names = features.get("observation.state", {}).get("names", [])
|
||||
image_keys = [k for k in features if k.startswith("observation.images.")]
|
||||
|
||||
def _merge(base: EnvTransition, other: EnvTransition) -> EnvTransition:
|
||||
out = deepcopy(base)
|
||||
for key in (
|
||||
TransitionKey.OBSERVATION,
|
||||
TransitionKey.ACTION,
|
||||
TransitionKey.INFO,
|
||||
TransitionKey.COMPLEMENTARY_DATA,
|
||||
):
|
||||
if other.get(key):
|
||||
out.setdefault(key, {}).update(deepcopy(other[key]))
|
||||
for k in (TransitionKey.REWARD, TransitionKey.DONE, TransitionKey.TRUNCATED):
|
||||
if k in other:
|
||||
out[k] = other[k]
|
||||
return out
|
||||
|
||||
def _ensure_transition(obj) -> EnvTransition:
|
||||
# single transition
|
||||
if isinstance(obj, dict) and any(isinstance(k, TransitionKey) for k in obj):
|
||||
return obj
|
||||
# iterable of transitions
|
||||
if isinstance(obj, Iterable):
|
||||
items = list(obj)
|
||||
if not items:
|
||||
return {}
|
||||
acc = items[0]
|
||||
for t in items[1:]:
|
||||
acc = _merge(acc, t)
|
||||
return acc
|
||||
raise TypeError("Expected EnvTransition or iterable of them")
|
||||
|
||||
tr = _ensure_transition(transitions_or_transition)
|
||||
obs = tr.get(TransitionKey.OBSERVATION, {}) or {}
|
||||
act = tr.get(TransitionKey.ACTION, {}) or {}
|
||||
batch: dict[str, any] = {}
|
||||
|
||||
# Images passthrough
|
||||
for k in image_keys:
|
||||
if k in obs:
|
||||
batch[k] = obs[k]
|
||||
|
||||
# Observation.state vector
|
||||
if obs_state_names:
|
||||
vals = [_from_tensor(obs.get(f"observation.state.{n}", 0.0)) for n in obs_state_names]
|
||||
batch["observation.state"] = np.asarray(vals, dtype=np.float32)
|
||||
|
||||
# Action vector
|
||||
if action_names:
|
||||
vals = [_from_tensor(act.get(f"action.{n}", 0.0)) for n in action_names]
|
||||
batch["action"] = np.asarray(vals, dtype=np.float32)
|
||||
|
||||
# Next.* fields
|
||||
if tr.get(TransitionKey.REWARD) is not None:
|
||||
batch["next.reward"] = _from_tensor(tr[TransitionKey.REWARD])
|
||||
if tr.get(TransitionKey.DONE) is not None:
|
||||
batch["next.done"] = _from_tensor(tr[TransitionKey.DONE])
|
||||
if tr.get(TransitionKey.TRUNCATED) is not None:
|
||||
batch["next.truncated"] = _from_tensor(tr[TransitionKey.TRUNCATED])
|
||||
|
||||
# Complementary data flags and task
|
||||
comp = tr.get(TransitionKey.COMPLEMENTARY_DATA) or {}
|
||||
if comp:
|
||||
# pad flags
|
||||
for k, v in comp.items():
|
||||
if k.endswith("_is_pad"):
|
||||
batch[k] = v
|
||||
# task label
|
||||
if comp.get("task") is not None:
|
||||
batch["task"] = comp["task"]
|
||||
|
||||
return batch
|
||||
@@ -19,24 +19,63 @@ from typing import Any
|
||||
import torch
|
||||
|
||||
from lerobot.configs.types import PolicyFeature
|
||||
from lerobot.processor.pipeline import EnvTransition, TransitionKey
|
||||
from lerobot.processor.pipeline import EnvTransition, ProcessorStepRegistry, TransitionKey
|
||||
from lerobot.utils.utils import get_safe_torch_device
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register("device_processor")
|
||||
@dataclass
|
||||
class DeviceProcessor:
|
||||
"""Processes transitions by moving tensors to the specified device.
|
||||
"""Processes transitions by moving tensors to the specified device and optionally converting float dtypes.
|
||||
|
||||
This processor ensures that all tensors in the transition are moved to the
|
||||
specified device (CPU or GPU) before they are returned.
|
||||
specified device (CPU or GPU) before they are returned. It can also convert
|
||||
floating-point tensors to a specified dtype while preserving non-float types
|
||||
(int, long, bool, etc.).
|
||||
"""
|
||||
|
||||
device: torch.device = "cpu"
|
||||
device: str = "cpu"
|
||||
float_dtype: str | None = None
|
||||
_device: torch.device | None = None
|
||||
|
||||
def __post_init__(self):
|
||||
self.device = get_safe_torch_device(self.device)
|
||||
self._device = get_safe_torch_device(self.device)
|
||||
self.device = self._device.type
|
||||
self.non_blocking = "cuda" in str(self.device)
|
||||
|
||||
# Validate and convert float_dtype string to torch dtype
|
||||
if self.float_dtype is not None:
|
||||
dtype_mapping = {
|
||||
"float16": torch.float16,
|
||||
"float32": torch.float32,
|
||||
"float64": torch.float64,
|
||||
"bfloat16": torch.bfloat16,
|
||||
"half": torch.float16,
|
||||
"float": torch.float32,
|
||||
"double": torch.float64,
|
||||
}
|
||||
|
||||
if self.float_dtype not in dtype_mapping:
|
||||
available_dtypes = list(dtype_mapping.keys())
|
||||
raise ValueError(
|
||||
f"Invalid float_dtype '{self.float_dtype}'. Available options: {available_dtypes}"
|
||||
)
|
||||
|
||||
self._target_float_dtype = dtype_mapping[self.float_dtype]
|
||||
else:
|
||||
self._target_float_dtype = None
|
||||
|
||||
def _process_tensor(self, tensor: torch.Tensor) -> torch.Tensor:
|
||||
"""Process a tensor by moving to device and optionally converting float dtype."""
|
||||
# Move to device first
|
||||
tensor = tensor.to(self.device, non_blocking=self.non_blocking)
|
||||
|
||||
# Convert float dtype if specified and tensor is floating point
|
||||
if self._target_float_dtype is not None and tensor.is_floating_point():
|
||||
tensor = tensor.to(dtype=self._target_float_dtype)
|
||||
|
||||
return tensor
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
# Create a copy of the transition
|
||||
new_transition = transition.copy()
|
||||
@@ -45,7 +84,7 @@ class DeviceProcessor:
|
||||
observation = transition.get(TransitionKey.OBSERVATION)
|
||||
if observation is not None:
|
||||
new_observation = {
|
||||
k: v.to(self.device, non_blocking=self.non_blocking) if isinstance(v, torch.Tensor) else v
|
||||
k: self._process_tensor(v) if isinstance(v, torch.Tensor) else v
|
||||
for k, v in observation.items()
|
||||
}
|
||||
new_transition[TransitionKey.OBSERVATION] = new_observation
|
||||
@@ -53,30 +92,54 @@ class DeviceProcessor:
|
||||
# Process action tensor
|
||||
action = transition.get(TransitionKey.ACTION)
|
||||
if action is not None and isinstance(action, torch.Tensor):
|
||||
new_transition[TransitionKey.ACTION] = action.to(self.device, non_blocking=self.non_blocking)
|
||||
new_transition[TransitionKey.ACTION] = self._process_tensor(action)
|
||||
|
||||
# Process reward tensor
|
||||
reward = transition.get(TransitionKey.REWARD)
|
||||
if reward is not None and isinstance(reward, torch.Tensor):
|
||||
new_transition[TransitionKey.REWARD] = reward.to(self.device, non_blocking=self.non_blocking)
|
||||
new_transition[TransitionKey.REWARD] = self._process_tensor(reward)
|
||||
|
||||
# Process done tensor
|
||||
done = transition.get(TransitionKey.DONE)
|
||||
if done is not None and isinstance(done, torch.Tensor):
|
||||
new_transition[TransitionKey.DONE] = done.to(self.device, non_blocking=self.non_blocking)
|
||||
new_transition[TransitionKey.DONE] = self._process_tensor(done)
|
||||
|
||||
# Process truncated tensor
|
||||
truncated = transition.get(TransitionKey.TRUNCATED)
|
||||
if truncated is not None and isinstance(truncated, torch.Tensor):
|
||||
new_transition[TransitionKey.TRUNCATED] = truncated.to(
|
||||
self.device, non_blocking=self.non_blocking
|
||||
)
|
||||
new_transition[TransitionKey.TRUNCATED] = self._process_tensor(truncated)
|
||||
|
||||
# Process complementary data tensors
|
||||
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
|
||||
if complementary_data is not None:
|
||||
new_complementary_data = {}
|
||||
|
||||
# Process all items in complementary_data
|
||||
for key, value in complementary_data.items():
|
||||
if isinstance(value, torch.Tensor):
|
||||
new_complementary_data[key] = self._process_tensor(value)
|
||||
else:
|
||||
new_complementary_data[key] = value
|
||||
|
||||
new_transition[TransitionKey.COMPLEMENTARY_DATA] = new_complementary_data
|
||||
|
||||
return new_transition
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
"""Return configuration for serialization."""
|
||||
return {"device": self.device}
|
||||
return {"device": self.device, "float_dtype": self.float_dtype}
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def state_dict(self) -> dict[str, torch.Tensor]:
|
||||
"""Return state dictionary (empty for this processor)."""
|
||||
return {}
|
||||
|
||||
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
|
||||
"""Load state dictionary (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset processor state (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
@@ -0,0 +1,502 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Generic script to migrate any policy model with normalization layers to the new pipeline-based system.
|
||||
|
||||
This script:
|
||||
1. Loads an existing pretrained policy model
|
||||
2. Extracts normalization statistics from the model
|
||||
3. Creates both preprocessor and postprocessor:
|
||||
- Preprocessor: normalizes both inputs (observations) and outputs (actions) for training
|
||||
- Postprocessor: unnormalizes outputs (actions) for inference
|
||||
4. Removes normalization layers from the model state_dict
|
||||
5. Saves the new model and both processors
|
||||
|
||||
Usage:
|
||||
python src/lerobot/processor/migrate_policy_normalization.py \
|
||||
--pretrained-path lerobot/act_aloha_sim_transfer_cube_human \
|
||||
--policy-type act \
|
||||
--push-to-hub
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
from huggingface_hub import hf_hub_download
|
||||
from safetensors.torch import load_file as load_safetensors
|
||||
|
||||
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
||||
from lerobot.processor.batch_processor import ToBatchProcessor
|
||||
from lerobot.processor.device_processor import DeviceProcessor
|
||||
from lerobot.processor.normalize_processor import NormalizerProcessor, UnnormalizerProcessor
|
||||
from lerobot.processor.pipeline import RobotProcessor
|
||||
from lerobot.processor.rename_processor import RenameProcessor
|
||||
|
||||
# Policy type to class mapping
|
||||
POLICY_CLASSES = {
|
||||
"act": "lerobot.policies.act.modeling_act.ACTPolicy",
|
||||
"diffusion": "lerobot.policies.diffusion.modeling_diffusion.DiffusionPolicy",
|
||||
"pi0": "lerobot.policies.pi0.modeling_pi0.PI0Policy",
|
||||
"pi0fast": "lerobot.policies.pi0fast.modeling_pi0fast.PI0FASTPolicy",
|
||||
"smolvla": "lerobot.policies.smolvla.modeling_smolvla.SmolVLAPolicy",
|
||||
"tdmpc": "lerobot.policies.tdmpc.modeling_tdmpc.TDMPCPolicy",
|
||||
"vqbet": "lerobot.policies.vqbet.modeling_vqbet.VQBeTPolicy",
|
||||
"sac": "lerobot.policies.sac.modeling_sac.SACPolicy",
|
||||
"classifier": "lerobot.policies.classifier.modeling_classifier.ClassifierPolicy",
|
||||
}
|
||||
|
||||
|
||||
def extract_normalization_stats(state_dict: dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:
|
||||
"""Extract normalization statistics from model state_dict."""
|
||||
stats = {}
|
||||
|
||||
# Define patterns to match and their prefixes to remove
|
||||
normalization_patterns = [
|
||||
"normalize_inputs.buffer_",
|
||||
"unnormalize_outputs.buffer_",
|
||||
"normalize_targets.buffer_",
|
||||
"normalize.", # Must come after normalize_* patterns
|
||||
"unnormalize.", # Must come after unnormalize_* patterns
|
||||
"input_normalizer.",
|
||||
"output_normalizer.",
|
||||
]
|
||||
|
||||
# Process each key in state_dict
|
||||
for key, tensor in state_dict.items():
|
||||
# Try each pattern
|
||||
for pattern in normalization_patterns:
|
||||
if key.startswith(pattern):
|
||||
# Extract the remaining part after the pattern
|
||||
remaining = key[len(pattern) :]
|
||||
parts = remaining.split(".")
|
||||
|
||||
# Need at least feature name and stat type
|
||||
if len(parts) >= 2:
|
||||
# Last part is the stat type (mean, std, min, max, etc.)
|
||||
stat_type = parts[-1]
|
||||
# Everything else is the feature name
|
||||
feature_name = ".".join(parts[:-1]).replace("_", ".")
|
||||
|
||||
# Add to stats
|
||||
if feature_name not in stats:
|
||||
stats[feature_name] = {}
|
||||
stats[feature_name][stat_type] = tensor.clone()
|
||||
|
||||
# Only process the first matching pattern
|
||||
break
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def detect_features_and_norm_modes(
|
||||
config: dict[str, Any], stats: dict[str, dict[str, torch.Tensor]]
|
||||
) -> tuple[dict[str, PolicyFeature], dict[FeatureType, NormalizationMode]]:
|
||||
"""Detect features and normalization modes from config and stats."""
|
||||
features = {}
|
||||
norm_modes = {}
|
||||
|
||||
# First, check if there's a normalization_mapping in the config
|
||||
if "normalization_mapping" in config:
|
||||
print(f"Found normalization_mapping in config: {config['normalization_mapping']}")
|
||||
# Extract normalization modes from config
|
||||
for feature_name, mode_str in config["normalization_mapping"].items():
|
||||
# Convert string to NormalizationMode enum
|
||||
if mode_str == "mean_std":
|
||||
mode = NormalizationMode.MEAN_STD
|
||||
elif mode_str == "min_max":
|
||||
mode = NormalizationMode.MIN_MAX
|
||||
else:
|
||||
print(f"Warning: Unknown normalization mode '{mode_str}' for feature '{feature_name}'")
|
||||
continue
|
||||
|
||||
# Determine feature type from feature name
|
||||
if "image" in feature_name or "visual" in feature_name:
|
||||
feature_type = FeatureType.VISUAL
|
||||
elif "state" in feature_name:
|
||||
feature_type = FeatureType.STATE
|
||||
elif "action" in feature_name:
|
||||
feature_type = FeatureType.ACTION
|
||||
else:
|
||||
feature_type = FeatureType.STATE
|
||||
|
||||
norm_modes[feature_type] = mode
|
||||
|
||||
# Try to extract from config
|
||||
if "features" in config:
|
||||
for key, feature_config in config["features"].items():
|
||||
shape = feature_config.get("shape", feature_config.get("dim"))
|
||||
shape = (shape,) if isinstance(shape, int) else tuple(shape)
|
||||
|
||||
# Determine feature type
|
||||
if "image" in key or "visual" in key:
|
||||
feature_type = FeatureType.VISUAL
|
||||
elif "state" in key:
|
||||
feature_type = FeatureType.STATE
|
||||
elif "action" in key:
|
||||
feature_type = FeatureType.ACTION
|
||||
else:
|
||||
feature_type = FeatureType.STATE # Default
|
||||
|
||||
features[key] = PolicyFeature(feature_type, shape)
|
||||
|
||||
# If no features in config, infer from stats
|
||||
if not features:
|
||||
for key, stat_dict in stats.items():
|
||||
# Get shape from any stat tensor
|
||||
tensor = next(iter(stat_dict.values()))
|
||||
shape = tuple(tensor.shape)
|
||||
|
||||
# Determine feature type based on key
|
||||
if "image" in key or "visual" in key or "pixels" in key:
|
||||
feature_type = FeatureType.VISUAL
|
||||
elif "state" in key or "joint" in key or "position" in key:
|
||||
feature_type = FeatureType.STATE
|
||||
elif "action" in key:
|
||||
feature_type = FeatureType.ACTION
|
||||
else:
|
||||
feature_type = FeatureType.STATE
|
||||
|
||||
features[key] = PolicyFeature(feature_type, shape)
|
||||
|
||||
# If normalization modes weren't in config, determine based on available stats
|
||||
if not norm_modes:
|
||||
for key, stat_dict in stats.items():
|
||||
if key in features:
|
||||
if "mean" in stat_dict and "std" in stat_dict:
|
||||
feature_type = features[key].type
|
||||
if feature_type not in norm_modes:
|
||||
norm_modes[feature_type] = NormalizationMode.MEAN_STD
|
||||
elif "min" in stat_dict and "max" in stat_dict:
|
||||
feature_type = features[key].type
|
||||
if feature_type not in norm_modes:
|
||||
norm_modes[feature_type] = NormalizationMode.MIN_MAX
|
||||
|
||||
# Default normalization modes if not detected
|
||||
if FeatureType.VISUAL not in norm_modes:
|
||||
norm_modes[FeatureType.VISUAL] = NormalizationMode.MEAN_STD
|
||||
if FeatureType.STATE not in norm_modes:
|
||||
norm_modes[FeatureType.STATE] = NormalizationMode.MIN_MAX
|
||||
if FeatureType.ACTION not in norm_modes:
|
||||
norm_modes[FeatureType.ACTION] = NormalizationMode.MEAN_STD
|
||||
|
||||
return features, norm_modes
|
||||
|
||||
|
||||
def remove_normalization_layers(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
||||
"""Remove normalization layers from state_dict."""
|
||||
new_state_dict = {}
|
||||
|
||||
# Patterns to remove
|
||||
remove_patterns = [
|
||||
"normalize_inputs.",
|
||||
"unnormalize_outputs.",
|
||||
"normalize_targets.", # Added pattern for target normalization
|
||||
"normalize.",
|
||||
"unnormalize.",
|
||||
"input_normalizer.",
|
||||
"output_normalizer.",
|
||||
"normalizer.",
|
||||
]
|
||||
|
||||
for key, tensor in state_dict.items():
|
||||
should_remove = any(pattern in key for pattern in remove_patterns)
|
||||
if not should_remove:
|
||||
new_state_dict[key] = tensor
|
||||
|
||||
return new_state_dict
|
||||
|
||||
|
||||
def convert_features_to_policy_features(features_dict: dict[str, dict]) -> dict[str, PolicyFeature]:
|
||||
"""Convert features from old format to PolicyFeature objects."""
|
||||
converted_features = {}
|
||||
|
||||
for key, feature_dict in features_dict.items():
|
||||
# Determine feature type based on key
|
||||
if "image" in key or "visual" in key:
|
||||
feature_type = FeatureType.VISUAL
|
||||
elif "state" in key:
|
||||
feature_type = FeatureType.STATE
|
||||
elif "action" in key:
|
||||
feature_type = FeatureType.ACTION
|
||||
else:
|
||||
feature_type = FeatureType.STATE
|
||||
|
||||
# Get shape from feature dict
|
||||
shape = feature_dict.get("shape", feature_dict.get("dim"))
|
||||
shape = (shape,) if isinstance(shape, int) else tuple(shape)
|
||||
|
||||
converted_features[key] = PolicyFeature(feature_type, shape)
|
||||
|
||||
return converted_features
|
||||
|
||||
|
||||
def load_model_from_hub(
|
||||
repo_id: str, revision: str = None
|
||||
) -> tuple[dict[str, torch.Tensor], dict[str, Any], dict[str, Any]]:
|
||||
"""Load model state_dict and config from hub."""
|
||||
# Download files
|
||||
safetensors_path = hf_hub_download(repo_id=repo_id, filename="model.safetensors", revision=revision)
|
||||
|
||||
config_path = hf_hub_download(repo_id=repo_id, filename="config.json", revision=revision)
|
||||
train_config_path = hf_hub_download(repo_id=repo_id, filename="train_config.json", revision=revision)
|
||||
|
||||
# Load state_dict
|
||||
state_dict = load_safetensors(safetensors_path)
|
||||
|
||||
# Load config
|
||||
with open(config_path) as f:
|
||||
config = json.load(f)
|
||||
|
||||
with open(train_config_path) as f:
|
||||
train_config = json.load(f)
|
||||
|
||||
return state_dict, config, train_config
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Migrate policy models with normalization layers to new pipeline system"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pretrained-path",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to pretrained model (hub repo or local directory)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-dir",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Output directory for migrated model (default: same as pretrained-path)",
|
||||
)
|
||||
parser.add_argument("--push-to-hub", action="store_true", help="Push migrated model to hub")
|
||||
parser.add_argument(
|
||||
"--hub-repo-id",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Hub repository ID for pushing (default: same as pretrained-path)",
|
||||
)
|
||||
parser.add_argument("--revision", type=str, default=None, help="Revision of the model to load")
|
||||
parser.add_argument("--private", action="store_true", help="Make the hub repository private")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load model and config
|
||||
print(f"Loading model from {args.pretrained_path}...")
|
||||
if os.path.isdir(args.pretrained_path):
|
||||
# Local directory
|
||||
state_dict = load_safetensors(os.path.join(args.pretrained_path, "model.safetensors"))
|
||||
with open(os.path.join(args.pretrained_path, "config.json")) as f:
|
||||
config = json.load(f)
|
||||
with open(os.path.join(args.pretrained_path, "train_config.json")) as f:
|
||||
train_config = json.load(f)
|
||||
else:
|
||||
# Hub repository
|
||||
state_dict, config, train_config = load_model_from_hub(args.pretrained_path, args.revision)
|
||||
|
||||
# Extract normalization statistics
|
||||
print("Extracting normalization statistics...")
|
||||
stats = extract_normalization_stats(state_dict)
|
||||
|
||||
print(f"Found normalization statistics for: {list(stats.keys())}")
|
||||
|
||||
# Detect input features and normalization modes
|
||||
print("Detecting features and normalization modes...")
|
||||
features, norm_map = detect_features_and_norm_modes(config, stats)
|
||||
|
||||
print(f"Detected features: {list(features.keys())}")
|
||||
print(f"Normalization modes: {norm_map}")
|
||||
|
||||
# Remove normalization layers from state_dict
|
||||
print("Removing normalization layers from model...")
|
||||
new_state_dict = remove_normalization_layers(state_dict)
|
||||
|
||||
removed_keys = set(state_dict.keys()) - set(new_state_dict.keys())
|
||||
if removed_keys:
|
||||
print(f"Removed {len(removed_keys)} normalization layer keys")
|
||||
|
||||
# Determine output path
|
||||
if args.output_dir:
|
||||
output_dir = Path(args.output_dir)
|
||||
else:
|
||||
if os.path.isdir(args.pretrained_path):
|
||||
output_dir = Path(args.pretrained_path).parent / f"{Path(args.pretrained_path).name}_migrated"
|
||||
else:
|
||||
output_dir = Path(f"./{args.pretrained_path.replace('/', '_')}_migrated")
|
||||
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Clean up config - remove normalization_mapping field
|
||||
cleaned_config = dict(config)
|
||||
if "normalization_mapping" in cleaned_config:
|
||||
print("Removing 'normalization_mapping' field from config")
|
||||
del cleaned_config["normalization_mapping"]
|
||||
policy_type = deepcopy(cleaned_config["type"])
|
||||
|
||||
del cleaned_config["type"]
|
||||
|
||||
# Instantiate the policy model with cleaned config and load the cleaned state dict
|
||||
print(f"Instantiating {policy_type} policy model...")
|
||||
policy_class_path = POLICY_CLASSES[policy_type]
|
||||
module_path, class_name = policy_class_path.rsplit(".", 1)
|
||||
|
||||
module = importlib.import_module(module_path)
|
||||
policy_class = getattr(module, class_name)
|
||||
|
||||
# Create config class instance
|
||||
config_module_path = module_path.replace("modeling", "configuration")
|
||||
config_module = importlib.import_module(config_module_path)
|
||||
# Handle special cases for config class names
|
||||
config_class_names = {
|
||||
"act": "ACTConfig",
|
||||
"diffusion": "DiffusionConfig",
|
||||
"pi0": "PI0Config",
|
||||
"pi0fast": "PI0FASTConfig",
|
||||
"smolvla": "SmolVLAConfig",
|
||||
"tdmpc": "TDMPCConfig",
|
||||
"vqbet": "VQBeTConfig",
|
||||
"sac": "SACConfig",
|
||||
"classifier": "ClassifierConfig",
|
||||
}
|
||||
config_class_name = config_class_names.get(policy_type, f"{policy_type.upper()}Config")
|
||||
config_class = getattr(config_module, config_class_name)
|
||||
|
||||
# Convert input_features and output_features to PolicyFeature objects - these are mandatory
|
||||
if "input_features" not in cleaned_config:
|
||||
raise ValueError("Missing mandatory 'input_features' in config")
|
||||
if "output_features" not in cleaned_config:
|
||||
raise ValueError("Missing mandatory 'output_features' in config")
|
||||
|
||||
cleaned_config["input_features"] = convert_features_to_policy_features(cleaned_config["input_features"])
|
||||
cleaned_config["output_features"] = convert_features_to_policy_features(cleaned_config["output_features"])
|
||||
|
||||
# Create config instance from cleaned config dict
|
||||
policy_config = config_class(**cleaned_config)
|
||||
|
||||
# Create policy instance - some policies expect dataset_stats
|
||||
policy = policy_class(policy_config)
|
||||
|
||||
# Load the cleaned state dict
|
||||
policy.load_state_dict(new_state_dict, strict=True)
|
||||
print("Successfully loaded cleaned state dict into policy model")
|
||||
|
||||
# Now create preprocessor and postprocessor with cleaned_config available
|
||||
print("Creating preprocessor and postprocessor...")
|
||||
# The pattern from existing processor factories:
|
||||
# - Preprocessor has two NormalizerProcessors: one for input_features, one for output_features
|
||||
# - Postprocessor has one UnnormalizerProcessor for output_features only
|
||||
|
||||
# Get features from cleaned_config (now they're PolicyFeature objects)
|
||||
input_features = cleaned_config.get("input_features", {})
|
||||
output_features = cleaned_config.get("output_features", {})
|
||||
|
||||
# Create preprocessor with two normalizers (following the pattern from processor factories)
|
||||
preprocessor_steps = [
|
||||
RenameProcessor(rename_map={}),
|
||||
NormalizerProcessor(
|
||||
features={**input_features, **output_features},
|
||||
norm_map=norm_map,
|
||||
stats=stats,
|
||||
),
|
||||
ToBatchProcessor(),
|
||||
DeviceProcessor(device=policy_config.device),
|
||||
]
|
||||
preprocessor = RobotProcessor(steps=preprocessor_steps, name="robot_preprocessor")
|
||||
|
||||
# Create postprocessor with unnormalizer for outputs only
|
||||
postprocessor_steps = [
|
||||
DeviceProcessor(device="cpu"),
|
||||
UnnormalizerProcessor(features=output_features, norm_map=norm_map, stats=stats),
|
||||
]
|
||||
postprocessor = RobotProcessor(steps=postprocessor_steps, name="robot_postprocessor")
|
||||
|
||||
# Determine hub repo ID if pushing to hub
|
||||
if args.push_to_hub:
|
||||
if args.hub_repo_id:
|
||||
hub_repo_id = args.hub_repo_id
|
||||
else:
|
||||
if not os.path.isdir(args.pretrained_path):
|
||||
# Use same repo with "_migrated" suffix
|
||||
hub_repo_id = f"{args.pretrained_path}_migrated"
|
||||
else:
|
||||
raise ValueError("--hub-repo-id must be specified when pushing local model to hub")
|
||||
else:
|
||||
hub_repo_id = None
|
||||
|
||||
# Save preprocessor and postprocessor to root directory
|
||||
print(f"Saving preprocessor to {output_dir}...")
|
||||
preprocessor.save_pretrained(output_dir)
|
||||
if args.push_to_hub:
|
||||
preprocessor.push_to_hub(repo_id=hub_repo_id, private=args.private)
|
||||
|
||||
print(f"Saving postprocessor to {output_dir}...")
|
||||
postprocessor.save_pretrained(output_dir)
|
||||
if args.push_to_hub:
|
||||
postprocessor.push_to_hub(repo_id=hub_repo_id, private=args.private)
|
||||
|
||||
# Save model using the policy's save_pretrained method
|
||||
print(f"Saving model to {output_dir}...")
|
||||
policy.save_pretrained(
|
||||
output_dir, push_to_hub=args.push_to_hub, repo_id=hub_repo_id, private=args.private
|
||||
)
|
||||
|
||||
# Generate and save model card
|
||||
print("Generating model card...")
|
||||
# Get metadata from original config
|
||||
dataset_repo_id = train_config.get("repo_id", "unknown")
|
||||
license = config.get("license", "apache-2.0")
|
||||
|
||||
tags = config.get("tags", ["robotics", "lerobot", policy_type]) or ["robotics", "lerobot", policy_type]
|
||||
tags = set(tags).union({"robotics", "lerobot", policy_type})
|
||||
tags = list(tags)
|
||||
|
||||
# Generate model card
|
||||
card = policy.generate_model_card(
|
||||
dataset_repo_id=dataset_repo_id, model_type=policy_type, license=license, tags=tags
|
||||
)
|
||||
|
||||
# Save model card locally
|
||||
card.save(str(output_dir / "README.md"))
|
||||
print(f"Model card saved to {output_dir / 'README.md'}")
|
||||
# Push model card to hub if requested
|
||||
if args.push_to_hub:
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
api = HfApi()
|
||||
api.upload_file(
|
||||
path_or_fileobj=str(output_dir / "README.md"),
|
||||
path_in_repo="README.md",
|
||||
repo_id=hub_repo_id,
|
||||
repo_type="model",
|
||||
commit_message="Add model card for migrated model",
|
||||
)
|
||||
print("Model card pushed to hub")
|
||||
|
||||
print("\nMigration complete!")
|
||||
print(f"Migrated model saved to: {output_dir}")
|
||||
if args.push_to_hub:
|
||||
print(f"Successfully pushed to https://huggingface.co/{hub_repo_id}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Mapping
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
@@ -10,7 +11,7 @@ from torch import Tensor
|
||||
|
||||
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
||||
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.processor.pipeline import EnvTransition, ProcessorStepRegistry, TransitionKey
|
||||
from lerobot.processor.pipeline import EnvTransition, ProcessorStepRegistry, RobotProcessor, TransitionKey
|
||||
|
||||
|
||||
def _convert_stats_to_tensors(stats: dict[str, dict[str, Any]]) -> dict[str, dict[str, Tensor]]:
|
||||
@@ -115,7 +116,7 @@ class NormalizerProcessor:
|
||||
if self.normalize_keys is not None and not isinstance(self.normalize_keys, set):
|
||||
self.normalize_keys = set(self.normalize_keys)
|
||||
|
||||
def _normalize_obs(self, observation):
|
||||
def _normalize_obs(self, observation, normalized_info):
|
||||
if observation is None:
|
||||
return None
|
||||
|
||||
@@ -128,7 +129,20 @@ class NormalizerProcessor:
|
||||
|
||||
processed = dict(observation)
|
||||
for key in keys_to_norm:
|
||||
if key not in processed or key not in self._tensor_stats:
|
||||
if key not in processed or key not in self.features:
|
||||
continue
|
||||
|
||||
# Check the normalization mode for this feature type
|
||||
feature = self.features[key]
|
||||
norm_mode = self.norm_map.get(feature.type, NormalizationMode.IDENTITY)
|
||||
|
||||
# Skip normalization if mode is IDENTITY
|
||||
if norm_mode is NormalizationMode.IDENTITY:
|
||||
normalized_info[key] = "IDENTITY"
|
||||
continue
|
||||
|
||||
# Skip if no stats available for this key
|
||||
if key not in self._tensor_stats:
|
||||
continue
|
||||
|
||||
orig_val = processed[key]
|
||||
@@ -139,16 +153,35 @@ class NormalizerProcessor:
|
||||
)
|
||||
stats = {k: v.to(tensor.device) for k, v in self._tensor_stats[key].items()}
|
||||
|
||||
if "mean" in stats and "std" in stats:
|
||||
mean, std = stats["mean"], stats["std"]
|
||||
processed[key] = (tensor - mean) / (std + self.eps)
|
||||
elif "min" in stats and "max" in stats:
|
||||
min_val, max_val = stats["min"], stats["max"]
|
||||
processed[key] = 2 * (tensor - min_val) / (max_val - min_val + self.eps) - 1
|
||||
if norm_mode is NormalizationMode.MEAN_STD:
|
||||
if "mean" in stats and "std" in stats:
|
||||
mean, std = stats["mean"], stats["std"]
|
||||
processed[key] = (tensor - mean) / (std + self.eps)
|
||||
normalized_info[key] = "MEAN_STD"
|
||||
elif norm_mode is NormalizationMode.MIN_MAX:
|
||||
if "min" in stats and "max" in stats:
|
||||
min_val, max_val = stats["min"], stats["max"]
|
||||
processed[key] = 2 * (tensor - min_val) / (max_val - min_val + self.eps) - 1
|
||||
normalized_info[key] = "MIN_MAX"
|
||||
else:
|
||||
raise ValueError(f"Unsupported normalization mode: {norm_mode}")
|
||||
|
||||
return processed
|
||||
|
||||
def _normalize_action(self, action):
|
||||
if action is None or "action" not in self._tensor_stats:
|
||||
def _normalize_action(self, action, normalized_info):
|
||||
if action is None:
|
||||
return action
|
||||
|
||||
# Check the normalization mode for actions
|
||||
norm_mode = self.norm_map.get(FeatureType.ACTION, NormalizationMode.IDENTITY)
|
||||
|
||||
# Skip normalization if mode is IDENTITY
|
||||
if norm_mode is NormalizationMode.IDENTITY:
|
||||
normalized_info["action"] = "IDENTITY"
|
||||
return action
|
||||
|
||||
# Skip if no stats available for actions
|
||||
if "action" not in self._tensor_stats:
|
||||
return action
|
||||
|
||||
tensor = (
|
||||
@@ -157,22 +190,42 @@ class NormalizerProcessor:
|
||||
else torch.as_tensor(action, dtype=torch.float32)
|
||||
)
|
||||
stats = {k: v.to(tensor.device) for k, v in self._tensor_stats["action"].items()}
|
||||
if "mean" in stats and "std" in stats:
|
||||
mean, std = stats["mean"], stats["std"]
|
||||
return (tensor - mean) / (std + self.eps)
|
||||
if "min" in stats and "max" in stats:
|
||||
min_val, max_val = stats["min"], stats["max"]
|
||||
return 2 * (tensor - min_val) / (max_val - min_val + self.eps) - 1
|
||||
raise ValueError("Action stats must contain either ('mean','std') or ('min','max')")
|
||||
|
||||
if norm_mode is NormalizationMode.MEAN_STD:
|
||||
if "mean" in stats and "std" in stats:
|
||||
mean, std = stats["mean"], stats["std"]
|
||||
normalized_info["action"] = "MEAN_STD"
|
||||
return (tensor - mean) / (std + self.eps)
|
||||
elif norm_mode is NormalizationMode.MIN_MAX:
|
||||
if "min" in stats and "max" in stats:
|
||||
min_val, max_val = stats["min"], stats["max"]
|
||||
normalized_info["action"] = "MIN_MAX"
|
||||
return 2 * (tensor - min_val) / (max_val - min_val + self.eps) - 1
|
||||
else:
|
||||
raise ValueError(f"Unsupported normalization mode: {norm_mode}")
|
||||
|
||||
# If we reach here, the required stats for the normalization mode are not available
|
||||
raise ValueError(f"Action stats must contain appropriate values for {norm_mode} normalization")
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
observation = self._normalize_obs(transition.get(TransitionKey.OBSERVATION))
|
||||
action = self._normalize_action(transition.get(TransitionKey.ACTION))
|
||||
# Track what was normalized
|
||||
normalized_info = {}
|
||||
|
||||
observation = self._normalize_obs(transition.get(TransitionKey.OBSERVATION), normalized_info)
|
||||
action = self._normalize_action(transition.get(TransitionKey.ACTION), normalized_info)
|
||||
|
||||
# Create a new transition with normalized values
|
||||
new_transition = transition.copy()
|
||||
new_transition[TransitionKey.OBSERVATION] = observation
|
||||
new_transition[TransitionKey.ACTION] = action
|
||||
|
||||
# Add normalization info to complementary data
|
||||
if normalized_info:
|
||||
comp_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
|
||||
comp_data = {} if comp_data is None else dict(comp_data)
|
||||
comp_data["normalized_keys"] = normalized_info
|
||||
new_transition[TransitionKey.COMPLEMENTARY_DATA] = comp_data
|
||||
|
||||
return new_transition
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
@@ -204,7 +257,7 @@ class NormalizerProcessor:
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@@ -253,14 +306,28 @@ class UnnormalizerProcessor:
|
||||
self.stats = self.stats or {}
|
||||
self._tensor_stats = _convert_stats_to_tensors(self.stats)
|
||||
|
||||
def _unnormalize_obs(self, observation):
|
||||
def _unnormalize_obs(self, observation, unnormalized_info):
|
||||
if observation is None:
|
||||
return None
|
||||
keys = [k for k, ft in self.features.items() if ft.type is not FeatureType.ACTION]
|
||||
processed = dict(observation)
|
||||
for key in keys:
|
||||
if key not in processed or key not in self._tensor_stats:
|
||||
if key not in processed or key not in self.features:
|
||||
continue
|
||||
|
||||
# Check the normalization mode for this feature type
|
||||
feature = self.features[key]
|
||||
norm_mode = self.norm_map.get(feature.type, NormalizationMode.IDENTITY)
|
||||
|
||||
# Skip unnormalization if mode is IDENTITY
|
||||
if norm_mode is NormalizationMode.IDENTITY:
|
||||
unnormalized_info[key] = "IDENTITY"
|
||||
continue
|
||||
|
||||
# Skip if no stats available for this key
|
||||
if key not in self._tensor_stats:
|
||||
continue
|
||||
|
||||
orig_val = processed[key]
|
||||
tensor = (
|
||||
orig_val.to(dtype=torch.float32)
|
||||
@@ -268,39 +335,80 @@ class UnnormalizerProcessor:
|
||||
else torch.as_tensor(orig_val, dtype=torch.float32)
|
||||
)
|
||||
stats = {k: v.to(tensor.device) for k, v in self._tensor_stats[key].items()}
|
||||
if "mean" in stats and "std" in stats:
|
||||
mean, std = stats["mean"], stats["std"]
|
||||
processed[key] = tensor * std + mean
|
||||
elif "min" in stats and "max" in stats:
|
||||
min_val, max_val = stats["min"], stats["max"]
|
||||
processed[key] = (tensor + 1) / 2 * (max_val - min_val) + min_val
|
||||
|
||||
if norm_mode is NormalizationMode.MEAN_STD:
|
||||
if "mean" in stats and "std" in stats:
|
||||
mean, std = stats["mean"], stats["std"]
|
||||
processed[key] = tensor * std + mean
|
||||
unnormalized_info[key] = "MEAN_STD"
|
||||
elif norm_mode is NormalizationMode.MIN_MAX:
|
||||
if "min" in stats and "max" in stats:
|
||||
min_val, max_val = stats["min"], stats["max"]
|
||||
processed[key] = (tensor + 1) / 2 * (max_val - min_val) + min_val
|
||||
unnormalized_info[key] = "MIN_MAX"
|
||||
else:
|
||||
raise ValueError(f"Unsupported normalization mode: {norm_mode}")
|
||||
|
||||
return processed
|
||||
|
||||
def _unnormalize_action(self, action):
|
||||
if action is None or "action" not in self._tensor_stats:
|
||||
def _unnormalize_action(self, action, unnormalized_info):
|
||||
if action is None:
|
||||
return action
|
||||
|
||||
# Check the normalization mode for actions
|
||||
norm_mode = self.norm_map.get(FeatureType.ACTION, NormalizationMode.IDENTITY)
|
||||
|
||||
# Skip unnormalization if mode is IDENTITY
|
||||
if norm_mode is NormalizationMode.IDENTITY:
|
||||
unnormalized_info["action"] = "IDENTITY"
|
||||
return action
|
||||
|
||||
# Skip if no stats available for actions
|
||||
if "action" not in self._tensor_stats:
|
||||
return action
|
||||
|
||||
tensor = (
|
||||
action.to(dtype=torch.float32)
|
||||
if isinstance(action, torch.Tensor)
|
||||
else torch.as_tensor(action, dtype=torch.float32)
|
||||
)
|
||||
stats = {k: v.to(tensor.device) for k, v in self._tensor_stats["action"].items()}
|
||||
if "mean" in stats and "std" in stats:
|
||||
mean, std = stats["mean"], stats["std"]
|
||||
return tensor * std + mean
|
||||
if "min" in stats and "max" in stats:
|
||||
min_val, max_val = stats["min"], stats["max"]
|
||||
return (tensor + 1) / 2 * (max_val - min_val) + min_val
|
||||
raise ValueError("Action stats must contain either ('mean','std') or ('min','max')")
|
||||
|
||||
if norm_mode is NormalizationMode.MEAN_STD:
|
||||
if "mean" in stats and "std" in stats:
|
||||
mean, std = stats["mean"], stats["std"]
|
||||
unnormalized_info["action"] = "MEAN_STD"
|
||||
return tensor * std + mean
|
||||
elif norm_mode is NormalizationMode.MIN_MAX:
|
||||
if "min" in stats and "max" in stats:
|
||||
min_val, max_val = stats["min"], stats["max"]
|
||||
unnormalized_info["action"] = "MIN_MAX"
|
||||
return (tensor + 1) / 2 * (max_val - min_val) + min_val
|
||||
else:
|
||||
raise ValueError(f"Unsupported normalization mode: {norm_mode}")
|
||||
|
||||
# If we reach here, the required stats for the normalization mode are not available
|
||||
raise ValueError(f"Action stats must contain appropriate values for {norm_mode} normalization")
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
observation = self._unnormalize_obs(transition.get(TransitionKey.OBSERVATION))
|
||||
action = self._unnormalize_action(transition.get(TransitionKey.ACTION))
|
||||
# Track what was unnormalized
|
||||
unnormalized_info = {}
|
||||
|
||||
observation = self._unnormalize_obs(transition.get(TransitionKey.OBSERVATION), unnormalized_info)
|
||||
action = self._unnormalize_action(transition.get(TransitionKey.ACTION), unnormalized_info)
|
||||
|
||||
# Create a new transition with unnormalized values
|
||||
new_transition = transition.copy()
|
||||
new_transition[TransitionKey.OBSERVATION] = observation
|
||||
new_transition[TransitionKey.ACTION] = action
|
||||
|
||||
# Add unnormalization info to complementary data
|
||||
if unnormalized_info:
|
||||
comp_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
|
||||
comp_data = {} if comp_data is None else dict(comp_data)
|
||||
comp_data["unnormalized_keys"] = unnormalized_info
|
||||
new_transition[TransitionKey.COMPLEMENTARY_DATA] = comp_data
|
||||
|
||||
return new_transition
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
@@ -327,5 +435,41 @@ class UnnormalizerProcessor:
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
def hotswap_stats(robot_processor: RobotProcessor, stats: dict[str, dict[str, Any]]) -> RobotProcessor:
|
||||
robot_processor = deepcopy(robot_processor)
|
||||
for step in robot_processor.steps:
|
||||
if isinstance(step, NormalizerProcessor) or isinstance(step, UnnormalizerProcessor):
|
||||
step: NormalizerProcessor | UnnormalizerProcessor
|
||||
step.stats = stats
|
||||
step._tensor_stats = _convert_stats_to_tensors(stats)
|
||||
return robot_processor
|
||||
|
||||
|
||||
def rename_stats(stats: dict[str, dict[str, Any]], rename_map: dict[str, str]) -> dict[str, dict[str, Any]]:
|
||||
"""Rename keys in the stats dictionary according to the provided mapping.
|
||||
|
||||
Args:
|
||||
stats: The statistics dictionary with structure {feature_key: {stat_name: value}}
|
||||
rename_map: Dictionary mapping old key names to new key names
|
||||
|
||||
Returns:
|
||||
A new stats dictionary with renamed keys
|
||||
|
||||
Example:
|
||||
>>> stats = {"observation.state": {"mean": 0.0, "std": 1.0}, "action": {"mean": 0.5, "std": 0.5}}
|
||||
>>> rename_map = {"observation.state": "observation.robot_state"}
|
||||
>>> new_stats = rename_stats(stats, rename_map)
|
||||
>>> # new_stats will have "observation.robot_state" instead of "observation.state"
|
||||
"""
|
||||
renamed_stats = {}
|
||||
|
||||
for old_key, sub_stats in stats.items():
|
||||
# Use the new key if it exists in the rename map, otherwise keep the old key
|
||||
new_key = rename_map.get(old_key, old_key)
|
||||
renamed_stats[new_key] = deepcopy(sub_stats)
|
||||
|
||||
return renamed_stats
|
||||
|
||||
@@ -106,9 +106,8 @@ class VanillaObservationProcessor(ObservationProcessor):
|
||||
def observation(self, observation):
|
||||
return self._process_observation(observation)
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
"""Transforms feature keys to a standardized contract.
|
||||
|
||||
This method handles several renaming patterns:
|
||||
- Exact matches (e.g., 'pixels' -> 'OBS_IMAGE').
|
||||
- Prefixed exact matches (e.g., 'observation.pixels' -> 'OBS_IMAGE').
|
||||
|
||||
@@ -23,7 +23,7 @@ from copy import deepcopy
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Protocol, TypedDict
|
||||
from typing import Any, Protocol, TypedDict, runtime_checkable
|
||||
|
||||
import torch
|
||||
from huggingface_hub import ModelHubMixin, hf_hub_download
|
||||
@@ -132,6 +132,7 @@ class ProcessorStepRegistry:
|
||||
cls._registry.clear()
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class ProcessorStep(Protocol):
|
||||
"""Structural typing interface for a single processor step.
|
||||
|
||||
@@ -145,7 +146,6 @@ class ProcessorStep(Protocol):
|
||||
|
||||
**Required**:
|
||||
- ``__call__(transition: EnvTransition) -> EnvTransition``
|
||||
- ``feature_contract(features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]``
|
||||
|
||||
Optional helper protocol:
|
||||
* ``get_config() -> dict[str, Any]`` – User-defined JSON-serializable
|
||||
@@ -158,6 +158,8 @@ class ProcessorStep(Protocol):
|
||||
* ``load_state_dict(state)`` – Inverse of ``state_dict``. Receives a dict
|
||||
containing torch tensors only.
|
||||
* ``reset()`` – Clear internal buffers at episode boundaries.
|
||||
* ``transform_features(features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]``
|
||||
If present, this method will be called to aggregate the dataset features of all steps.
|
||||
|
||||
Example separation:
|
||||
- get_config(): {"name": "my_step", "learning_rate": 0.01, "window_size": 10}
|
||||
@@ -174,7 +176,7 @@ class ProcessorStep(Protocol):
|
||||
|
||||
def reset(self) -> None: ...
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]: ...
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]: ...
|
||||
|
||||
|
||||
def _default_batch_to_transition(batch: dict[str, Any]) -> EnvTransition: # noqa: D401
|
||||
@@ -201,10 +203,16 @@ def _default_batch_to_transition(batch: dict[str, Any]) -> EnvTransition: # noq
|
||||
observation_keys = {k: v for k, v in batch.items() if k.startswith("observation.")}
|
||||
observation = observation_keys if observation_keys else None
|
||||
|
||||
# Extract padding and task keys for complementary data
|
||||
# Extract padding, task, index, and task_index keys for complementary data
|
||||
pad_keys = {k: v for k, v in batch.items() if "_is_pad" in k}
|
||||
task_key = {"task": batch["task"]} if "task" in batch else {}
|
||||
complementary_data = {**pad_keys, **task_key} if pad_keys or task_key else {}
|
||||
index_key = {"index": batch["index"]} if "index" in batch else {}
|
||||
task_index_key = {"task_index": batch["task_index"]} if "task_index" in batch else {}
|
||||
complementary_data = (
|
||||
{**pad_keys, **task_key, **index_key, **task_index_key}
|
||||
if pad_keys or task_key or index_key or task_index_key
|
||||
else {}
|
||||
)
|
||||
|
||||
transition: EnvTransition = {
|
||||
TransitionKey.OBSERVATION: observation,
|
||||
@@ -231,7 +239,7 @@ def _default_transition_to_batch(transition: EnvTransition) -> dict[str, Any]:
|
||||
"info": transition.get(TransitionKey.INFO, {}),
|
||||
}
|
||||
|
||||
# Add padding and task data from complementary_data
|
||||
# Add padding, task, index, and task_index data from complementary_data
|
||||
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
|
||||
if complementary_data:
|
||||
pad_data = {k: v for k, v in complementary_data.items() if "_is_pad" in k}
|
||||
@@ -240,6 +248,12 @@ def _default_transition_to_batch(transition: EnvTransition) -> dict[str, Any]:
|
||||
if "task" in complementary_data:
|
||||
batch["task"] = complementary_data["task"]
|
||||
|
||||
if "index" in complementary_data:
|
||||
batch["index"] = complementary_data["index"]
|
||||
|
||||
if "task_index" in complementary_data:
|
||||
batch["task_index"] = complementary_data["task_index"]
|
||||
|
||||
# Handle observation - flatten dict to observation.* keys if it's a dict
|
||||
observation = transition.get(TransitionKey.OBSERVATION)
|
||||
if isinstance(observation, dict):
|
||||
@@ -342,7 +356,10 @@ class RobotProcessor(ModelHubMixin):
|
||||
hook(idx, current_transition)
|
||||
|
||||
# Convert back to original format if needed
|
||||
return self.to_output(current_transition) if called_with_batch else current_transition
|
||||
if called_with_batch or self.to_output is not _default_transition_to_batch:
|
||||
return self.to_output(current_transition)
|
||||
else:
|
||||
return current_transition
|
||||
|
||||
def _prepare_transition(self, data: EnvTransition | dict[str, Any]) -> tuple[EnvTransition, bool]:
|
||||
"""Prepare and validate transition data for processing.
|
||||
@@ -575,10 +592,9 @@ class RobotProcessor(ModelHubMixin):
|
||||
if config_filename is None:
|
||||
# Try common config names
|
||||
common_names = [
|
||||
"processor.json",
|
||||
"preprocessor.json",
|
||||
"postprocessor.json",
|
||||
"robotprocessor.json",
|
||||
"robot_processor.json",
|
||||
"robot_preprocessor.json",
|
||||
"robot_postprocessor.json",
|
||||
]
|
||||
config_path = None
|
||||
for name in common_names:
|
||||
@@ -808,23 +824,15 @@ class RobotProcessor(ModelHubMixin):
|
||||
f"Step {i} ({type(step).__name__}) must define __call__(transition) -> EnvTransition"
|
||||
)
|
||||
|
||||
fc = getattr(step, "feature_contract", None)
|
||||
if not callable(fc):
|
||||
raise TypeError(
|
||||
f"Step {i} ({type(step).__name__}) must define feature_contract(features) -> dict[str, Any]"
|
||||
)
|
||||
|
||||
def feature_contract(self, initial_features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, initial_features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
"""
|
||||
Apply ALL steps in order. Each step must implement
|
||||
feature_contract(features) and return a dict (full or incremental schema).
|
||||
Apply ALL steps in order. Only if a step has a features method, it will be called.
|
||||
We aggregate the dataset features of all steps.
|
||||
"""
|
||||
features: dict[str, PolicyFeature] = deepcopy(initial_features)
|
||||
|
||||
for _, step in enumerate(self.steps):
|
||||
out = step.feature_contract(features)
|
||||
if not isinstance(out, dict):
|
||||
raise TypeError(f"{step.__class__.__name__}.feature_contract must return dict[str, Any]")
|
||||
out = step.transform_features(features)
|
||||
features = out
|
||||
return features
|
||||
|
||||
@@ -884,7 +892,7 @@ class ObservationProcessor:
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@@ -944,7 +952,7 @@ class ActionProcessor:
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@@ -1003,7 +1011,7 @@ class RewardProcessor:
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@@ -1067,7 +1075,7 @@ class DoneProcessor:
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@@ -1127,7 +1135,7 @@ class TruncatedProcessor:
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@@ -1192,7 +1200,7 @@ class InfoProcessor:
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@@ -1238,7 +1246,7 @@ class ComplementaryDataProcessor:
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@@ -1260,5 +1268,5 @@ class IdentityProcessor:
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
@@ -43,7 +43,7 @@ class RenameProcessor(ObservationProcessor):
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
return {"rename_map": self.rename_map}
|
||||
|
||||
def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
"""Transforms:
|
||||
- Each key in the observation that appears in `rename_map` is renamed to its value.
|
||||
- Keys not in `rename_map` remain unchanged.
|
||||
|
||||
@@ -0,0 +1,229 @@
|
||||
"""
|
||||
Tokenizer processor for handling text tokenization in robot transitions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import torch
|
||||
|
||||
from lerobot.configs.types import FeatureType, PolicyFeature
|
||||
from lerobot.constants import OBS_LANGUAGE
|
||||
from lerobot.processor.pipeline import EnvTransition, ProcessorStepRegistry, TransitionKey
|
||||
from lerobot.utils.import_utils import _transformers_available
|
||||
|
||||
if TYPE_CHECKING or _transformers_available:
|
||||
from transformers import AutoTokenizer
|
||||
else:
|
||||
AutoTokenizer = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@ProcessorStepRegistry.register(name="tokenizer_processor")
|
||||
class TokenizerProcessor:
|
||||
"""Tokenizes text tasks in complementary data using a huggingface tokenizer.
|
||||
|
||||
This processor handles tokenization of task strings found in the complementary_data
|
||||
using a specified pretrained tokenizer from Hugging Face. It adds tokenized versions
|
||||
to the observation data for model processing while preserving the original task string.
|
||||
|
||||
The processor supports both single strings and lists of strings as task inputs.
|
||||
|
||||
Args:
|
||||
tokenizer_name: Name of the pretrained tokenizer to load from Hugging Face Hub
|
||||
(e.g., "bert-base-uncased", "microsoft/DialoGPT-medium"). This will be used
|
||||
with AutoTokenizer.from_pretrained(). If tokenizer is provided, this is ignored.
|
||||
tokenizer: A tokenizer object (e.g., from transformers library) that implements
|
||||
the __call__ method. If provided, tokenizer_name is ignored. This parameter
|
||||
is not serialized and must be provided via overrides when loading.
|
||||
max_length: Maximum sequence length for tokenization. Defaults to 512.
|
||||
task_key: Key in complementary_data containing the task text. Defaults to "task".
|
||||
padding: Padding strategy for tokenization. Defaults to "max_length".
|
||||
truncation: Whether to truncate sequences longer than max_length. Defaults to True.
|
||||
|
||||
Examples:
|
||||
Using tokenizer name (auto-loaded):
|
||||
```python
|
||||
processor = TokenizerProcessor(tokenizer_name="bert-base-uncased", max_length=128)
|
||||
```
|
||||
|
||||
Using custom tokenizer object:
|
||||
```python
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
custom_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||
processor = TokenizerProcessor(tokenizer=custom_tokenizer, max_length=128)
|
||||
```
|
||||
"""
|
||||
|
||||
tokenizer_name: str | None = None
|
||||
tokenizer: Any | None = None # Otherwise transformers is not available in the core dependencies
|
||||
max_length: int = 512
|
||||
task_key: str = "task"
|
||||
padding_side: str = "right"
|
||||
padding: str = "max_length"
|
||||
truncation: bool = True
|
||||
|
||||
# Internal tokenizer instance (not serialized)
|
||||
_tokenizer: Any = field(default=None, init=False, repr=False)
|
||||
|
||||
def __post_init__(self):
|
||||
"""Initialize the tokenizer from the provided tokenizer or tokenizer name."""
|
||||
if not _transformers_available:
|
||||
raise ImportError(
|
||||
"The 'transformers' library is not installed. "
|
||||
"Please install it with `pip install 'lerobot[transformers-dep]'` to use TokenizerProcessor."
|
||||
)
|
||||
|
||||
if self.tokenizer is not None:
|
||||
# Use provided tokenizer object directly
|
||||
self._tokenizer = self.tokenizer
|
||||
elif self.tokenizer_name is not None:
|
||||
if AutoTokenizer is None:
|
||||
raise ImportError("AutoTokenizer is not available")
|
||||
self._tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Either 'tokenizer' or 'tokenizer_name' must be provided. "
|
||||
"Pass a tokenizer object directly or a tokenizer name to auto-load."
|
||||
)
|
||||
|
||||
def get_task(self, transition: EnvTransition) -> list[str] | None:
|
||||
"""Extract and normalize task from complementary data.
|
||||
|
||||
Args:
|
||||
transition: Input transition containing complementary_data.
|
||||
|
||||
Returns:
|
||||
List of task strings if task is present, None otherwise.
|
||||
"""
|
||||
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
|
||||
if complementary_data is None:
|
||||
return None
|
||||
|
||||
if self.task_key not in complementary_data:
|
||||
return None
|
||||
|
||||
task = complementary_data[self.task_key]
|
||||
if task is None:
|
||||
return None
|
||||
|
||||
# Convert to list of strings
|
||||
if isinstance(task, str):
|
||||
return [task]
|
||||
elif isinstance(task, list) and all(isinstance(t, str) for t in task):
|
||||
return task
|
||||
|
||||
return None
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
"""Process the transition by tokenizing the task text.
|
||||
|
||||
Args:
|
||||
transition: Input transition containing complementary_data with task text.
|
||||
|
||||
Returns:
|
||||
Modified transition with tokenized task added to observation.
|
||||
|
||||
Raises:
|
||||
ValueError: If tokenizer initialization failed.
|
||||
"""
|
||||
task = self.get_task(transition)
|
||||
if task is None:
|
||||
return transition
|
||||
|
||||
# Tokenize the task
|
||||
tokenized_prompt = self._tokenize_text(task)
|
||||
|
||||
# Get or create observation dict
|
||||
observation = transition.get(TransitionKey.OBSERVATION)
|
||||
if observation is None:
|
||||
observation = {}
|
||||
else:
|
||||
observation = dict(observation) # Make a copy
|
||||
|
||||
# Add tokenized data to observation
|
||||
input_ids = tokenized_prompt["input_ids"]
|
||||
attention_mask = tokenized_prompt.get("attention_mask")
|
||||
if attention_mask is None:
|
||||
# Some tokenizers (e.g., SigLIP text) may not return attention_mask; default to ones
|
||||
attention_mask = torch.ones_like(input_ids)
|
||||
observation[f"{OBS_LANGUAGE}.tokens"] = input_ids
|
||||
observation[f"{OBS_LANGUAGE}.attention_mask"] = attention_mask.to(dtype=torch.bool)
|
||||
|
||||
transition[TransitionKey.OBSERVATION.value] = observation # type: ignore[misc]
|
||||
return transition
|
||||
|
||||
def _tokenize_text(self, text: str | list[str]) -> dict[str, torch.Tensor]:
|
||||
"""Tokenize text using the configured tokenizer.
|
||||
|
||||
Args:
|
||||
text: Text string or list of strings to tokenize.
|
||||
|
||||
Returns:
|
||||
Dictionary containing tokenized output with keys like 'input_ids', 'attention_mask'.
|
||||
"""
|
||||
return self._tokenizer(
|
||||
text,
|
||||
max_length=self.max_length,
|
||||
truncation=self.truncation,
|
||||
padding=self.padding,
|
||||
padding_side=self.padding_side,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
def get_config(self) -> dict[str, Any]:
|
||||
"""Return configuration for serialization.
|
||||
|
||||
Note: Only tokenizer_name is saved, not the tokenizer object itself.
|
||||
When loading, provide the tokenizer via overrides if needed.
|
||||
"""
|
||||
config = {
|
||||
"max_length": self.max_length,
|
||||
"task_key": self.task_key,
|
||||
"padding_side": self.padding_side,
|
||||
"padding": self.padding,
|
||||
"truncation": self.truncation,
|
||||
}
|
||||
|
||||
# Only include tokenizer_name if it was used (not when tokenizer object was provided)
|
||||
if self.tokenizer_name is not None:
|
||||
config["tokenizer_name"] = self.tokenizer_name
|
||||
|
||||
return config
|
||||
|
||||
def state_dict(self) -> dict[str, torch.Tensor]:
|
||||
"""Return state dictionary (empty for this processor)."""
|
||||
return {}
|
||||
|
||||
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
|
||||
"""Load state dictionary (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset processor state (no-op for this processor)."""
|
||||
pass
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
"""Add tokenized task features to the feature contract.
|
||||
|
||||
Args:
|
||||
features: Input feature dictionary.
|
||||
|
||||
Returns:
|
||||
Updated feature dictionary with tokenized task features added.
|
||||
"""
|
||||
# Add features for tokenized output if they don't exist
|
||||
# Standard tokenizer output includes tokens and attention_mask
|
||||
tokens_key = f"{OBS_LANGUAGE}.tokens"
|
||||
attention_mask_key = f"{OBS_LANGUAGE}.attention_mask"
|
||||
|
||||
if tokens_key not in features:
|
||||
features[tokens_key] = PolicyFeature(type=FeatureType.LANGUAGE, shape=(self.max_length,))
|
||||
|
||||
if attention_mask_key not in features:
|
||||
features[attention_mask_key] = PolicyFeature(type=FeatureType.LANGUAGE, shape=(self.max_length,))
|
||||
|
||||
return features
|
||||
+151
-38
@@ -18,7 +18,7 @@ Records a dataset. Actions for the robot can be either generated by teleoperatio
|
||||
Example:
|
||||
|
||||
```shell
|
||||
lerobot-record \
|
||||
python -m lerobot.record \
|
||||
--robot.type=so100_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.cameras="{laptop: {type: opencv, camera_index: 0, width: 640, height: 480}}" \
|
||||
@@ -36,7 +36,7 @@ lerobot-record \
|
||||
|
||||
Example recording with bimanual so100:
|
||||
```shell
|
||||
lerobot-record \
|
||||
python -m lerobot.record \
|
||||
--robot.type=bi_so100_follower \
|
||||
--robot.left_arm_port=/dev/tty.usbmodem5A460851411 \
|
||||
--robot.right_arm_port=/dev/tty.usbmodem5A460812391 \
|
||||
@@ -59,7 +59,7 @@ lerobot-record \
|
||||
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import asdict, dataclass
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
|
||||
@@ -72,10 +72,19 @@ from lerobot.configs import parser
|
||||
from lerobot.configs.policies import PreTrainedConfig
|
||||
from lerobot.datasets.image_writer import safe_stop_image_writer
|
||||
from lerobot.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features
|
||||
from lerobot.datasets.utils import hw_to_dataset_features
|
||||
from lerobot.datasets.video_utils import VideoEncodingManager
|
||||
from lerobot.policies.factory import make_policy
|
||||
from lerobot.policies.factory import make_policy, make_processor
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.processor import RobotProcessor
|
||||
from lerobot.processor.converters import (
|
||||
to_dataset_frame,
|
||||
to_output_robot_action,
|
||||
to_transition_robot_observation,
|
||||
to_transition_teleop_action,
|
||||
)
|
||||
from lerobot.processor.normalize_processor import rename_stats
|
||||
from lerobot.processor.pipeline import IdentityProcessor, TransitionKey
|
||||
from lerobot.robots import ( # noqa: F401
|
||||
Robot,
|
||||
RobotConfig,
|
||||
@@ -149,6 +158,8 @@ class DatasetRecordConfig:
|
||||
# Number of episodes to record before batch encoding videos
|
||||
# Set to 1 for immediate encoding (default behavior), or higher for batched encoding
|
||||
video_encoding_batch_size: int = 1
|
||||
# Rename map for the observation to override the image and state keys
|
||||
rename_map: dict[str, str] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.single_task is None:
|
||||
@@ -187,6 +198,36 @@ class RecordConfig:
|
||||
return ["policy"]
|
||||
|
||||
|
||||
""" --------------- record_loop() data flow --------------------------
|
||||
[ Robot ]
|
||||
V
|
||||
[ robot.get_observation() ] ---> raw_obs
|
||||
V
|
||||
[ robot_observation_processor ] ---> obs_transition
|
||||
V
|
||||
.-----( ACTION LOGIC )------------------.
|
||||
V V
|
||||
[ From Teleoperator ] [ From Policy ]
|
||||
| |
|
||||
| [teleop.get_action] -> raw_action | [predict_action]
|
||||
| | | |
|
||||
| V | V
|
||||
| [teleop_action_processor] | |
|
||||
| | | |
|
||||
'---> teleop_transition '---> policy_transition
|
||||
| |
|
||||
'-------------------------.-------------'
|
||||
V
|
||||
[ robot_action_processor ] --> robot_action_to_send
|
||||
V
|
||||
[ robot.send_action() ] -- (Robot Executes)
|
||||
V
|
||||
( Transitions are merged & added to Dataset )
|
||||
V
|
||||
( Rerun Log / Loop Wait )
|
||||
"""
|
||||
|
||||
|
||||
@safe_stop_image_writer
|
||||
def record_loop(
|
||||
robot: Robot,
|
||||
@@ -195,15 +236,30 @@ def record_loop(
|
||||
dataset: LeRobotDataset | None = None,
|
||||
teleop: Teleoperator | list[Teleoperator] | None = None,
|
||||
policy: PreTrainedPolicy | None = None,
|
||||
preprocessor: RobotProcessor | None = None,
|
||||
postprocessor: RobotProcessor | None = None,
|
||||
control_time_s: int | None = None,
|
||||
teleop_action_processor: RobotProcessor | None = None, # runs after teleop
|
||||
robot_action_processor: RobotProcessor | None = None, # runs before robot
|
||||
robot_observation_processor: RobotProcessor | None = None, # runs after robot
|
||||
single_task: str | None = None,
|
||||
display_data: bool = False,
|
||||
):
|
||||
teleop_action_processor = teleop_action_processor or RobotProcessor(
|
||||
steps=[IdentityProcessor()], to_transition=to_transition_teleop_action, to_output=lambda tr: tr
|
||||
)
|
||||
robot_action_processor = robot_action_processor or RobotProcessor(
|
||||
steps=[IdentityProcessor()], to_transition=lambda tr: tr, to_output=to_output_robot_action
|
||||
)
|
||||
robot_observation_processor = robot_observation_processor or RobotProcessor(
|
||||
steps=[IdentityProcessor()], to_transition=to_transition_robot_observation, to_output=lambda tr: tr
|
||||
)
|
||||
|
||||
if dataset is not None and dataset.fps != fps:
|
||||
raise ValueError(f"The dataset fps should be equal to requested fps ({dataset.fps} != {fps}).")
|
||||
|
||||
teleop_arm = teleop_keyboard = None
|
||||
if isinstance(teleop, list):
|
||||
if isinstance(teleop, list): # For LeKiwi
|
||||
teleop_keyboard = next((t for t in teleop if isinstance(t, KeyboardTeleop)), None)
|
||||
teleop_arm = next(
|
||||
(
|
||||
@@ -219,9 +275,20 @@ def record_loop(
|
||||
"For multi-teleop, the list must contain exactly one KeyboardTeleop and one arm teleoperator. Currently only supported for LeKiwi robot."
|
||||
)
|
||||
|
||||
# if policy is given it needs cleaning up
|
||||
if policy is not None:
|
||||
# Reset policy and processor if they are provided
|
||||
if policy is not None and preprocessor is not None and postprocessor is not None:
|
||||
policy.reset()
|
||||
preprocessor.reset()
|
||||
postprocessor.reset()
|
||||
|
||||
# Reset custom pipelines
|
||||
teleop_action_processor.reset()
|
||||
robot_action_processor.reset()
|
||||
robot_observation_processor.reset()
|
||||
|
||||
policy_transition = None
|
||||
teleop_transition = None
|
||||
obs_transition = None
|
||||
|
||||
timestamp = 0
|
||||
start_episode_t = time.perf_counter()
|
||||
@@ -232,51 +299,87 @@ def record_loop(
|
||||
events["exit_early"] = False
|
||||
break
|
||||
|
||||
observation = robot.get_observation()
|
||||
# Get robot observation
|
||||
obs = robot.get_observation()
|
||||
|
||||
if policy is not None or dataset is not None:
|
||||
observation_frame = build_dataset_frame(dataset.features, observation, prefix="observation")
|
||||
# Applies a pipeline to the raw robot observation, default is IdentityProcessor
|
||||
obs_transition = robot_observation_processor(obs)
|
||||
|
||||
# Get action from either policy or teleop
|
||||
if policy is not None and preprocessor is not None and postprocessor is not None:
|
||||
if dataset is not None:
|
||||
observation_frame = to_dataset_frame(
|
||||
obs_transition, dataset.features
|
||||
) # Convert the observation to the dataset format
|
||||
|
||||
if policy is not None:
|
||||
action_values = predict_action(
|
||||
observation_frame,
|
||||
policy,
|
||||
get_safe_torch_device(policy.config.device),
|
||||
policy.config.use_amp,
|
||||
observation=observation_frame,
|
||||
policy=policy,
|
||||
device=get_safe_torch_device(policy.config.device),
|
||||
preprocessor=preprocessor,
|
||||
postprocessor=postprocessor,
|
||||
use_amp=policy.config.use_amp,
|
||||
task=single_task,
|
||||
robot_type=robot.robot_type,
|
||||
)
|
||||
action = {key: action_values[i].item() for i, key in enumerate(robot.action_features)}
|
||||
elif policy is None and isinstance(teleop, Teleoperator):
|
||||
action = teleop.get_action()
|
||||
elif policy is None and isinstance(teleop, list):
|
||||
# TODO(pepijn, steven): clean the record loop for use of multiple robots (possibly with pipeline)
|
||||
|
||||
action_names = dataset.features["action"]["names"]
|
||||
policy_action = {f"action.{name}": float(action_values[i]) for i, name in enumerate(action_names)}
|
||||
policy_transition = {
|
||||
TransitionKey.ACTION: policy_action,
|
||||
TransitionKey.COMPLEMENTARY_DATA: {},
|
||||
}
|
||||
|
||||
elif isinstance(teleop, Teleoperator):
|
||||
act = teleop.get_action()
|
||||
|
||||
# Applies a pipeline to the raw teleop action, default is IdentityProcessor
|
||||
teleop_transition = teleop_action_processor(act)
|
||||
|
||||
elif isinstance(teleop, list):
|
||||
arm_action = teleop_arm.get_action()
|
||||
arm_action = {f"arm_{k}": v for k, v in arm_action.items()}
|
||||
|
||||
keyboard_action = teleop_keyboard.get_action()
|
||||
base_action = robot._from_keyboard_to_base_action(keyboard_action)
|
||||
|
||||
action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
|
||||
act = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
|
||||
teleop_transition = teleop_action_processor(act)
|
||||
else:
|
||||
logging.info(
|
||||
"No policy or teleoperator provided, skipping action generation."
|
||||
"This is likely to happen when resetting the environment without a teleop device."
|
||||
"The robot won't be at its rest position at the start of the next episode."
|
||||
"No policy or teleoperator provided, skipping action generation. "
|
||||
"This is likely to happen during environment reset."
|
||||
)
|
||||
continue
|
||||
# Still continue to next loop to respect timing
|
||||
|
||||
# Applies a pipeline to the action, default is IdentityProcessor
|
||||
# IMPORTANT: action_pipeline.to_output must return a dict suitable for robot.send_action()
|
||||
if policy_transition is not None:
|
||||
robot_action_to_send = robot_action_processor(policy_transition)
|
||||
else:
|
||||
robot_action_to_send = robot_action_processor(teleop_transition)
|
||||
|
||||
# Send action to robot
|
||||
# Action can eventually be clipped using `max_relative_target`,
|
||||
# so action actually sent is saved in the dataset.
|
||||
sent_action = robot.send_action(action)
|
||||
# so action actually sent is saved in the dataset. action = postprocessor.process(action)
|
||||
# TODO(pepijn, adil): we should use a pipeline step to clip the action, so the sent action is the action that we input to the robot.
|
||||
_ = robot.send_action(robot_action_to_send)
|
||||
|
||||
# Write to dataset
|
||||
if dataset is not None:
|
||||
action_frame = build_dataset_frame(dataset.features, sent_action, prefix="action")
|
||||
frame = {**observation_frame, **action_frame}
|
||||
# If to_dataset_frame is provided, use it to merge the transitions.
|
||||
merged = []
|
||||
if obs_transition is not None: # The observation from the robot
|
||||
merged.append(obs_transition)
|
||||
if teleop_transition is not None: # The action from teleop
|
||||
merged.append(teleop_transition)
|
||||
if policy_transition is not None: # The action from policy
|
||||
merged.append(policy_transition)
|
||||
frame = to_dataset_frame(
|
||||
merged if len(merged) > 1 else merged[0], dataset.features
|
||||
) # Convert the observation to the dataset format
|
||||
dataset.add_frame(frame, task=single_task)
|
||||
|
||||
if display_data:
|
||||
log_rerun_data(observation, action)
|
||||
log_rerun_data([obs_transition, teleop_transition or policy_transition])
|
||||
|
||||
dt_s = time.perf_counter() - start_loop_t
|
||||
busy_wait(1 / fps - dt_s)
|
||||
@@ -328,6 +431,18 @@ def record(cfg: RecordConfig) -> LeRobotDataset:
|
||||
|
||||
# Load pretrained policy
|
||||
policy = None if cfg.policy is None else make_policy(cfg.policy, ds_meta=dataset.meta)
|
||||
preprocessor = None
|
||||
postprocessor = None
|
||||
if cfg.policy is not None:
|
||||
preprocessor, postprocessor = make_processor(
|
||||
policy_cfg=cfg.policy,
|
||||
pretrained_path=cfg.policy.pretrained_path,
|
||||
dataset_stats=rename_stats(dataset.meta.stats, cfg.dataset.rename_map),
|
||||
preprocessor_overrides={
|
||||
"device_processor": {"device": cfg.policy.device},
|
||||
"rename_processor": {"rename_map": cfg.dataset.rename_map},
|
||||
},
|
||||
)
|
||||
|
||||
robot.connect()
|
||||
if teleop is not None:
|
||||
@@ -345,6 +460,8 @@ def record(cfg: RecordConfig) -> LeRobotDataset:
|
||||
fps=cfg.dataset.fps,
|
||||
teleop=teleop,
|
||||
policy=policy,
|
||||
preprocessor=preprocessor,
|
||||
postprocessor=postprocessor,
|
||||
dataset=dataset,
|
||||
control_time_s=cfg.dataset.episode_time_s,
|
||||
single_task=cfg.dataset.single_task,
|
||||
@@ -393,9 +510,5 @@ def record(cfg: RecordConfig) -> LeRobotDataset:
|
||||
return dataset
|
||||
|
||||
|
||||
def main():
|
||||
record()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
record()
|
||||
|
||||
@@ -18,7 +18,7 @@ Replays the actions of an episode from a dataset on a robot.
|
||||
Examples:
|
||||
|
||||
```shell
|
||||
lerobot-replay \
|
||||
python -m lerobot.replay \
|
||||
--robot.type=so100_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=black \
|
||||
@@ -28,7 +28,7 @@ lerobot-replay \
|
||||
|
||||
Example replay with bimanual so100:
|
||||
```shell
|
||||
lerobot-replay \
|
||||
python -m lerobot.replay \
|
||||
--robot.type=bi_so100_follower \
|
||||
--robot.left_arm_port=/dev/tty.usbmodem5A460851411 \
|
||||
--robot.right_arm_port=/dev/tty.usbmodem5A460812391 \
|
||||
|
||||
@@ -14,6 +14,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .config_so100_follower import SO100FollowerConfig, SO100FollowerEndEffectorConfig
|
||||
from .config_so100_follower import SO100FollowerConfig
|
||||
from .so100_follower import SO100Follower
|
||||
from .so100_follower_end_effector import SO100FollowerEndEffector
|
||||
|
||||
@@ -39,35 +39,3 @@ class SO100FollowerConfig(RobotConfig):
|
||||
|
||||
# Set to `True` for backward compatibility with previous policies/dataset
|
||||
use_degrees: bool = False
|
||||
|
||||
|
||||
@RobotConfig.register_subclass("so100_follower_end_effector")
|
||||
@dataclass
|
||||
class SO100FollowerEndEffectorConfig(SO100FollowerConfig):
|
||||
"""Configuration for the SO100FollowerEndEffector robot."""
|
||||
|
||||
# Path to URDF file for kinematics
|
||||
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo:
|
||||
# https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
|
||||
urdf_path: str | None = None
|
||||
|
||||
# End-effector frame name in the URDF
|
||||
target_frame_name: str = "gripper_frame_link"
|
||||
|
||||
# Default bounds for the end-effector position (in meters)
|
||||
end_effector_bounds: dict[str, list[float]] = field(
|
||||
default_factory=lambda: {
|
||||
"min": [-1.0, -1.0, -1.0], # min x, y, z
|
||||
"max": [1.0, 1.0, 1.0], # max x, y, z
|
||||
}
|
||||
)
|
||||
|
||||
max_gripper_pos: float = 50
|
||||
|
||||
end_effector_step_sizes: dict[str, float] = field(
|
||||
default_factory=lambda: {
|
||||
"x": 0.02,
|
||||
"y": 0.02,
|
||||
"z": 0.02,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -0,0 +1,447 @@
|
||||
# !/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
import numpy as np
|
||||
from scipy.spatial.transform import Rotation
|
||||
|
||||
from lerobot.configs.types import PolicyFeature
|
||||
from lerobot.model.kinematics import RobotKinematics
|
||||
from lerobot.processor.pipeline import (
|
||||
ActionProcessor,
|
||||
ComplementaryDataProcessor,
|
||||
EnvTransition,
|
||||
ObservationProcessor,
|
||||
ProcessorStepRegistry,
|
||||
TransitionKey,
|
||||
)
|
||||
from lerobot.robots.robot import Robot
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register("ee_reference_and_delta")
|
||||
@dataclass
|
||||
class EEReferenceAndDelta:
|
||||
"""
|
||||
Compute the desired end-effector pose from the target pose and the current pose.
|
||||
|
||||
Input ACTION keys:
|
||||
{
|
||||
"action.ee.{x,y,z,wx,wy,wz}" : float
|
||||
"complementary_data.raw_joint_positions": dict,
|
||||
}
|
||||
|
||||
Output ACTION keys:
|
||||
{
|
||||
"action.ee.{x,y,z,wx,wy,wz}" : float
|
||||
}
|
||||
"""
|
||||
|
||||
kinematics: RobotKinematics
|
||||
end_effector_step_sizes: dict
|
||||
motor_names: list[str]
|
||||
|
||||
reference_ee_pose: np.ndarray | None = field(default=None, init=False, repr=False)
|
||||
_prev_enabled: bool = field(default=False, init=False, repr=False)
|
||||
_command_when_disabled: np.ndarray | None = field(default=None, init=False, repr=False)
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
act = transition.get(TransitionKey.ACTION) or {}
|
||||
comp = transition.get(TransitionKey.COMPLEMENTARY_DATA) or {}
|
||||
|
||||
# Get joint positions from complimentary data
|
||||
raw = comp.get("raw_joint_positions", None)
|
||||
if raw is None:
|
||||
raise ValueError(
|
||||
"raw_joint_positions is not in complementary data and is required for EEReferenceAndDelta"
|
||||
)
|
||||
|
||||
q = np.array([float(raw[n]) for n in self.motor_names], dtype=float)
|
||||
|
||||
# Current pose from FK on measured joints
|
||||
t_curr = self.kinematics.forward_kinematics(q)
|
||||
|
||||
enabled = bool(act.pop("action.enabled", 0))
|
||||
tx = float(act.pop("action.target_x", 0.0))
|
||||
ty = float(act.pop("action.target_y", 0.0))
|
||||
tz = float(act.pop("action.target_z", 0.0))
|
||||
wx = float(act.pop("action.target_wx", 0.0))
|
||||
wy = float(act.pop("action.target_wy", 0.0))
|
||||
wz = float(act.pop("action.target_wz", 0.0))
|
||||
|
||||
desired = None
|
||||
|
||||
if enabled:
|
||||
# Latch a reference at the rising edge; also be defensive if None
|
||||
if not self._prev_enabled or self.reference_ee_pose is None:
|
||||
self.reference_ee_pose = t_curr.copy()
|
||||
|
||||
ref = self.reference_ee_pose if self.reference_ee_pose is not None else t_curr
|
||||
|
||||
delta_p = np.array(
|
||||
[
|
||||
tx * self.end_effector_step_sizes["x"],
|
||||
ty * self.end_effector_step_sizes["y"],
|
||||
tz * self.end_effector_step_sizes["z"],
|
||||
],
|
||||
dtype=float,
|
||||
)
|
||||
r_abs = Rotation.from_rotvec([wx, wy, wz]).as_matrix()
|
||||
|
||||
desired = np.eye(4, dtype=float)
|
||||
desired[:3, :3] = ref[:3, :3] @ r_abs
|
||||
desired[:3, 3] = ref[:3, 3] + delta_p
|
||||
|
||||
self._command_when_disabled = desired.copy()
|
||||
else:
|
||||
# While disabled, keep sending the same command to avoid drift.
|
||||
if self._command_when_disabled is None:
|
||||
# If we've never had an enabled command yet, freeze current FK pose once.
|
||||
self._command_when_disabled = t_curr.copy()
|
||||
desired = self._command_when_disabled.copy()
|
||||
|
||||
# Write action fields
|
||||
pos = desired[:3, 3]
|
||||
tw = Rotation.from_matrix(desired[:3, :3]).as_rotvec()
|
||||
act.update(
|
||||
{
|
||||
"action.ee.x": float(pos[0]),
|
||||
"action.ee.y": float(pos[1]),
|
||||
"action.ee.z": float(pos[2]),
|
||||
"action.ee.wx": float(tw[0]),
|
||||
"action.ee.wy": float(tw[1]),
|
||||
"action.ee.wz": float(tw[2]),
|
||||
}
|
||||
)
|
||||
|
||||
self._prev_enabled = enabled
|
||||
transition[TransitionKey.ACTION] = act
|
||||
return transition
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register("ee_bounds_and_safety")
|
||||
@dataclass
|
||||
class EEBoundsAndSafety(ActionProcessor):
|
||||
"""
|
||||
Clip the end-effector pose to the bounds and check for jumps.
|
||||
|
||||
Input ACTION keys:
|
||||
{
|
||||
"action.ee.{x,y,z,wx,wy,wz}" : float
|
||||
}
|
||||
|
||||
Output ACTION keys:
|
||||
{
|
||||
"action.ee.{x,y,z,wx,wy,wz}" : float
|
||||
}
|
||||
"""
|
||||
|
||||
end_effector_bounds: dict
|
||||
max_ee_step_m: float = 0.05
|
||||
max_ee_twist_step_rad: float = 0.20
|
||||
_last_pos: np.ndarray | None = field(default=None, init=False, repr=False)
|
||||
|
||||
def action(self, act: dict | None) -> dict:
|
||||
x = act.pop("action.ee.x", None)
|
||||
y = act.pop("action.ee.y", None)
|
||||
z = act.pop("action.ee.z", None)
|
||||
wx = act.pop("action.ee.wx", None)
|
||||
wy = act.pop("action.ee.wy", None)
|
||||
wz = act.pop("action.ee.wz", None)
|
||||
|
||||
if None in (x, y, z, wx, wy, wz):
|
||||
return act
|
||||
|
||||
pos = np.array([x, y, z], dtype=float)
|
||||
twist = np.array([wx, wy, wz], dtype=float)
|
||||
|
||||
# Clip position
|
||||
pos = np.clip(pos, self.end_effector_bounds["min"], self.end_effector_bounds["max"])
|
||||
|
||||
# Check for jumps in position
|
||||
if self._last_pos is not None:
|
||||
dpos = pos - self._last_pos
|
||||
n = float(np.linalg.norm(dpos))
|
||||
if n > self.max_ee_step_m and n > 0:
|
||||
pos = self._last_pos + dpos * (self.max_ee_step_m / n)
|
||||
raise ValueError(f"EE jump {n:.3f}m > {self.max_ee_step_m}m")
|
||||
|
||||
self._last_pos = pos
|
||||
self._last_twist = twist
|
||||
|
||||
act.update(
|
||||
{
|
||||
"action.ee.x": float(pos[0]),
|
||||
"action.ee.y": float(pos[1]),
|
||||
"action.ee.z": float(pos[2]),
|
||||
"action.ee.wx": float(twist[0]),
|
||||
"action.ee.wy": float(twist[1]),
|
||||
"action.ee.wz": float(twist[2]),
|
||||
}
|
||||
)
|
||||
return act
|
||||
|
||||
def reset(self):
|
||||
self._last_pos = None
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
# Because this is last step we specify the dataset features of this step that we want to be stored in the dataset
|
||||
features["action.ee.x"] = float
|
||||
features["action.ee.y"] = float
|
||||
features["action.ee.z"] = float
|
||||
features["action.ee.wx"] = float
|
||||
features["action.ee.wy"] = float
|
||||
features["action.ee.wz"] = float
|
||||
return features
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register("inverse_kinematics_ee_to_joints")
|
||||
@dataclass
|
||||
class InverseKinematicsEEToJoints:
|
||||
"""
|
||||
Compute the desired joint positions from the desired end-effector pose.
|
||||
|
||||
Input ACTION keys:
|
||||
{
|
||||
"action.ee.{x,y,z,wx,wy,wz}" : float
|
||||
"complementary_data.raw_joint_positions": dict,
|
||||
}
|
||||
|
||||
Output ACTION keys:
|
||||
{
|
||||
"action.joint_name_1.pos": float,
|
||||
"action.joint_name_2.pos": float,
|
||||
...
|
||||
"action.joint_name_n.pos": float,
|
||||
}
|
||||
"""
|
||||
|
||||
kinematics: RobotKinematics
|
||||
motor_names: list[str]
|
||||
q_curr: np.ndarray | None = field(default=None, init=False, repr=False)
|
||||
initial_guess_current_joints: bool = True
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
act = transition.get(TransitionKey.ACTION) or {}
|
||||
comp = transition.get(TransitionKey.COMPLEMENTARY_DATA) or {}
|
||||
|
||||
x = act.get("action.ee.x", None)
|
||||
y = act.get("action.ee.y", None)
|
||||
z = act.get("action.ee.z", None)
|
||||
wx = act.get("action.ee.wx", None)
|
||||
wy = act.get("action.ee.wy", None)
|
||||
wz = act.get("action.ee.wz", None)
|
||||
|
||||
if None in (x, y, z, wx, wy, wz):
|
||||
# Nothing to do; restore what we popped and return
|
||||
act.update(
|
||||
{
|
||||
"action.ee.x": x,
|
||||
"action.ee.y": y,
|
||||
"action.ee.z": z,
|
||||
"action.ee.wx": wx,
|
||||
"action.ee.wy": wy,
|
||||
"action.ee.wz": wz,
|
||||
}
|
||||
)
|
||||
transition[TransitionKey.ACTION] = act
|
||||
return transition
|
||||
|
||||
# Get joint positions from complimentary data
|
||||
raw = comp.get("raw_joint_positions", None)
|
||||
if raw is None:
|
||||
raise ValueError(
|
||||
"raw_joint_positions is not in complementary data and is required for EEReferenceAndDelta"
|
||||
)
|
||||
|
||||
if self.initial_guess_current_joints: # Use current joints as initial guess
|
||||
self.q_curr = np.array([float(raw[n]) for n in self.motor_names], dtype=float)
|
||||
else: # Use previous ik solution as initial guess
|
||||
if self.q_curr is None:
|
||||
self.q_curr = np.array([float(raw[n]) for n in self.motor_names], dtype=float)
|
||||
|
||||
# Build desired 4x4 transform from pos + rotvec (twist)
|
||||
t_des = np.eye(4, dtype=float)
|
||||
t_des[:3, :3] = Rotation.from_rotvec([wx, wy, wz]).as_matrix()
|
||||
t_des[:3, 3] = [x, y, z]
|
||||
|
||||
# Compute inverse kinematics
|
||||
q_target = self.kinematics.inverse_kinematics(self.q_curr, t_des)
|
||||
self.q_curr = q_target
|
||||
|
||||
new_act = dict(act)
|
||||
for i, name in enumerate(self.motor_names):
|
||||
if name == "gripper":
|
||||
new_act["observation.state.gripper.pos"] = float(raw["gripper"])
|
||||
else:
|
||||
new_act[f"action.{name}.pos"] = float(q_target[i])
|
||||
transition[TransitionKey.ACTION] = new_act
|
||||
return transition
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
# We specify the dataset features of this step that we want to be stored in the dataset
|
||||
features["action.ee.x"] = float
|
||||
features["action.ee.y"] = float
|
||||
features["action.ee.z"] = float
|
||||
features["action.ee.wx"] = float
|
||||
features["action.ee.wy"] = float
|
||||
features["action.ee.wz"] = float
|
||||
|
||||
features["observation.state.gripper.pos"] = float
|
||||
features["action.gripper.pos"] = float
|
||||
return features
|
||||
|
||||
def reset(self):
|
||||
self.q_curr = None
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register("gripper_velocity_to_joint")
|
||||
@dataclass
|
||||
class GripperVelocityToJoint:
|
||||
"""
|
||||
Convert the gripper velocity to a joint velocity.
|
||||
|
||||
Input ACTION keys:
|
||||
{
|
||||
"action.gripper": float,
|
||||
}
|
||||
|
||||
Output ACTION keys:
|
||||
{
|
||||
"action.gripper.pos": float,
|
||||
}
|
||||
"""
|
||||
|
||||
motor_names: list[str]
|
||||
speed_factor: float = 20.0
|
||||
clip_min: float = 0.0
|
||||
clip_max: float = 100.0
|
||||
|
||||
def __call__(self, transition: EnvTransition) -> EnvTransition:
|
||||
obs = transition.get(TransitionKey.OBSERVATION) or {}
|
||||
act = transition.get(TransitionKey.ACTION) or {}
|
||||
comp = transition.get(TransitionKey.COMPLEMENTARY_DATA) or {}
|
||||
|
||||
if "action.gripper" not in act:
|
||||
return transition
|
||||
|
||||
if "gripper" not in self.motor_names:
|
||||
new_act = dict(act)
|
||||
new_act.pop("action.gripper", None)
|
||||
transition[TransitionKey.ACTION] = new_act
|
||||
return transition
|
||||
|
||||
# Get current gripper position from complementary data
|
||||
raw = comp.get("raw_joint_positions") or {}
|
||||
curr_pos = float(raw.get("gripper"))
|
||||
|
||||
# Compute desired gripper velocity
|
||||
u = float(act.get("action.gripper", 0.0))
|
||||
delta = u * float(self.speed_factor)
|
||||
gripper_pos = float(np.clip(curr_pos + delta, self.clip_min, self.clip_max))
|
||||
|
||||
new_act = dict(act)
|
||||
new_act["action.gripper.pos"] = gripper_pos
|
||||
new_act.pop("action.gripper", None)
|
||||
transition[TransitionKey.ACTION] = new_act
|
||||
|
||||
obs.update({"observation.state.gripper.pos": curr_pos})
|
||||
transition[TransitionKey.OBSERVATION] = obs
|
||||
return transition
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
# We specify the dataset features of this step that we want to be stored in the dataset
|
||||
features["observation.state.gripper.pos"] = float
|
||||
features["action.gripper.pos"] = float
|
||||
return features
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register("forward_kinematics_joints_to_ee")
|
||||
@dataclass
|
||||
class ForwardKinematicsJointsToEE(ObservationProcessor):
|
||||
"""
|
||||
Compute the end-effector pose from the joint positions.
|
||||
|
||||
Input OBSERVATION keys:
|
||||
{
|
||||
"observation.state.{joint_name_1,joint_name_2,...,joint_name_n}.pos": float,
|
||||
}
|
||||
|
||||
Output OBSERVATION keys:
|
||||
{
|
||||
"observation.state.ee.{x,y,z,wx,wy,wz}" : float
|
||||
}
|
||||
"""
|
||||
|
||||
kinematics: RobotKinematics
|
||||
motor_names: list[str]
|
||||
|
||||
def observation(self, obs: dict | None) -> dict:
|
||||
if not all(f"observation.state.{n}.pos" in obs for n in self.motor_names):
|
||||
return obs
|
||||
|
||||
q = np.array([obs[f"observation.state.{n}.pos"] for n in self.motor_names], dtype=float)
|
||||
t = self.kinematics.forward_kinematics(q)
|
||||
pos = t[:3, 3]
|
||||
tw = Rotation.from_matrix(t[:3, :3]).as_rotvec()
|
||||
|
||||
obs.update(
|
||||
{
|
||||
"observation.state.ee.x": float(pos[0]),
|
||||
"observation.state.ee.y": float(pos[1]),
|
||||
"observation.state.ee.z": float(pos[2]),
|
||||
"observation.state.ee.wx": float(tw[0]),
|
||||
"observation.state.ee.wy": float(tw[1]),
|
||||
"observation.state.ee.wz": float(tw[2]),
|
||||
}
|
||||
)
|
||||
return obs
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
# We specify the dataset features of this step that we want to be stored in the dataset
|
||||
for k in ["x", "y", "z", "wx", "wy", "wz"]:
|
||||
features[f"observation.state.ee.{k}"] = float
|
||||
return features
|
||||
|
||||
|
||||
@ProcessorStepRegistry.register("add_robot_observation")
|
||||
@dataclass
|
||||
class AddRobotObservationAsComplimentaryData(ComplementaryDataProcessor):
|
||||
"""
|
||||
Read the robot's current observation and insert it into the transition as complementary data.
|
||||
|
||||
- Joint positions are added under complementary_data["raw_joint_positions"] as a dict:
|
||||
{ "<motor_name>": <float position>, ... }
|
||||
"""
|
||||
|
||||
robot: Robot
|
||||
|
||||
def complementary_data(self, comp: dict | None) -> dict:
|
||||
comp = {} if comp is None else dict(comp)
|
||||
obs = self.robot.get_observation()
|
||||
|
||||
comp["raw_joint_positions"] = {
|
||||
k.removesuffix(".pos"): float(v)
|
||||
for k, v in obs.items()
|
||||
if isinstance(k, str) and k.endswith(".pos")
|
||||
}
|
||||
return comp
|
||||
|
||||
def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]:
|
||||
return features
|
||||
@@ -161,11 +161,6 @@ class SO100Follower(Robot):
|
||||
self.bus.write("I_Coefficient", motor, 0)
|
||||
self.bus.write("D_Coefficient", motor, 32)
|
||||
|
||||
if motor == "gripper":
|
||||
self.bus.write("Max_Torque_Limit", motor, 500) # 50% of max torque to avoid burnout
|
||||
self.bus.write("Protection_Current", motor, 250) # 50% of max current to avoid burnout
|
||||
self.bus.write("Overload_Torque", motor, 25) # 25% torque when overloaded
|
||||
|
||||
def setup_motors(self) -> None:
|
||||
for motor in reversed(self.bus.motors):
|
||||
input(f"Connect the controller board to the '{motor}' motor only and press enter.")
|
||||
|
||||
@@ -1,200 +0,0 @@
|
||||
# !/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
|
||||
from lerobot.cameras import make_cameras_from_configs
|
||||
from lerobot.errors import DeviceNotConnectedError
|
||||
from lerobot.model.kinematics import RobotKinematics
|
||||
from lerobot.motors import Motor, MotorNormMode
|
||||
from lerobot.motors.feetech import FeetechMotorsBus
|
||||
|
||||
from . import SO100Follower
|
||||
from .config_so100_follower import SO100FollowerEndEffectorConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SO100FollowerEndEffector(SO100Follower):
|
||||
"""
|
||||
SO100Follower robot with end-effector space control.
|
||||
|
||||
This robot inherits from SO100Follower but transforms actions from
|
||||
end-effector space to joint space before sending them to the motors.
|
||||
"""
|
||||
|
||||
config_class = SO100FollowerEndEffectorConfig
|
||||
name = "so100_follower_end_effector"
|
||||
|
||||
def __init__(self, config: SO100FollowerEndEffectorConfig):
|
||||
super().__init__(config)
|
||||
self.bus = FeetechMotorsBus(
|
||||
port=self.config.port,
|
||||
motors={
|
||||
"shoulder_pan": Motor(1, "sts3215", MotorNormMode.DEGREES),
|
||||
"shoulder_lift": Motor(2, "sts3215", MotorNormMode.DEGREES),
|
||||
"elbow_flex": Motor(3, "sts3215", MotorNormMode.DEGREES),
|
||||
"wrist_flex": Motor(4, "sts3215", MotorNormMode.DEGREES),
|
||||
"wrist_roll": Motor(5, "sts3215", MotorNormMode.DEGREES),
|
||||
"gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
|
||||
},
|
||||
calibration=self.calibration,
|
||||
)
|
||||
|
||||
self.cameras = make_cameras_from_configs(config.cameras)
|
||||
|
||||
self.config = config
|
||||
|
||||
# Initialize the kinematics module for the so100 robot
|
||||
if self.config.urdf_path is None:
|
||||
raise ValueError(
|
||||
"urdf_path must be provided in the configuration for end-effector control. "
|
||||
"Please set urdf_path in your SO100FollowerEndEffectorConfig."
|
||||
)
|
||||
|
||||
self.kinematics = RobotKinematics(
|
||||
urdf_path=self.config.urdf_path,
|
||||
target_frame_name=self.config.target_frame_name,
|
||||
)
|
||||
|
||||
# Store the bounds for end-effector position
|
||||
self.end_effector_bounds = self.config.end_effector_bounds
|
||||
|
||||
self.current_ee_pos = None
|
||||
self.current_joint_pos = None
|
||||
|
||||
@property
|
||||
def action_features(self) -> dict[str, Any]:
|
||||
"""
|
||||
Define action features for end-effector control.
|
||||
Returns dictionary with dtype, shape, and names.
|
||||
"""
|
||||
return {
|
||||
"dtype": "float32",
|
||||
"shape": (4,),
|
||||
"names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3},
|
||||
}
|
||||
|
||||
def send_action(self, action: dict[str, Any]) -> dict[str, Any]:
|
||||
"""
|
||||
Transform action from end-effector space to joint space and send to motors.
|
||||
|
||||
Args:
|
||||
action: Dictionary with keys 'delta_x', 'delta_y', 'delta_z' for end-effector control
|
||||
or a numpy array with [delta_x, delta_y, delta_z]
|
||||
|
||||
Returns:
|
||||
The joint-space action that was sent to the motors
|
||||
"""
|
||||
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(f"{self} is not connected.")
|
||||
|
||||
# Convert action to numpy array if not already
|
||||
if isinstance(action, dict):
|
||||
if all(k in action for k in ["delta_x", "delta_y", "delta_z"]):
|
||||
delta_ee = np.array(
|
||||
[
|
||||
action["delta_x"] * self.config.end_effector_step_sizes["x"],
|
||||
action["delta_y"] * self.config.end_effector_step_sizes["y"],
|
||||
action["delta_z"] * self.config.end_effector_step_sizes["z"],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
if "gripper" not in action:
|
||||
action["gripper"] = [1.0]
|
||||
action = np.append(delta_ee, action["gripper"])
|
||||
else:
|
||||
logger.warning(
|
||||
f"Expected action keys 'delta_x', 'delta_y', 'delta_z', got {list(action.keys())}"
|
||||
)
|
||||
action = np.zeros(4, dtype=np.float32)
|
||||
|
||||
if self.current_joint_pos is None:
|
||||
# Read current joint positions
|
||||
current_joint_pos = self.bus.sync_read("Present_Position")
|
||||
self.current_joint_pos = np.array([current_joint_pos[name] for name in self.bus.motors])
|
||||
|
||||
# Calculate current end-effector position using forward kinematics
|
||||
if self.current_ee_pos is None:
|
||||
self.current_ee_pos = self.kinematics.forward_kinematics(self.current_joint_pos)
|
||||
|
||||
# Set desired end-effector position by adding delta
|
||||
desired_ee_pos = np.eye(4)
|
||||
desired_ee_pos[:3, :3] = self.current_ee_pos[:3, :3] # Keep orientation
|
||||
|
||||
# Add delta to position and clip to bounds
|
||||
desired_ee_pos[:3, 3] = self.current_ee_pos[:3, 3] + action[:3]
|
||||
if self.end_effector_bounds is not None:
|
||||
desired_ee_pos[:3, 3] = np.clip(
|
||||
desired_ee_pos[:3, 3],
|
||||
self.end_effector_bounds["min"],
|
||||
self.end_effector_bounds["max"],
|
||||
)
|
||||
|
||||
# Compute inverse kinematics to get joint positions
|
||||
target_joint_values_in_degrees = self.kinematics.inverse_kinematics(
|
||||
self.current_joint_pos, desired_ee_pos
|
||||
)
|
||||
|
||||
# Create joint space action dictionary
|
||||
joint_action = {
|
||||
f"{key}.pos": target_joint_values_in_degrees[i] for i, key in enumerate(self.bus.motors.keys())
|
||||
}
|
||||
|
||||
# Handle gripper separately if included in action
|
||||
# Gripper delta action is in the range 0 - 2,
|
||||
# We need to shift the action to the range -1, 1 so that we can expand it to -Max_gripper_pos, Max_gripper_pos
|
||||
joint_action["gripper.pos"] = np.clip(
|
||||
self.current_joint_pos[-1] + (action[-1] - 1) * self.config.max_gripper_pos,
|
||||
5,
|
||||
self.config.max_gripper_pos,
|
||||
)
|
||||
|
||||
self.current_ee_pos = desired_ee_pos.copy()
|
||||
self.current_joint_pos = target_joint_values_in_degrees.copy()
|
||||
self.current_joint_pos[-1] = joint_action["gripper.pos"]
|
||||
|
||||
# Send joint space action to parent class
|
||||
return super().send_action(joint_action)
|
||||
|
||||
def get_observation(self) -> dict[str, Any]:
|
||||
if not self.is_connected:
|
||||
raise DeviceNotConnectedError(f"{self} is not connected.")
|
||||
|
||||
# Read arm position
|
||||
start = time.perf_counter()
|
||||
obs_dict = self.bus.sync_read("Present_Position")
|
||||
obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
|
||||
dt_ms = (time.perf_counter() - start) * 1e3
|
||||
logger.debug(f"{self} read state: {dt_ms:.1f}ms")
|
||||
|
||||
# Capture images from cameras
|
||||
for cam_key, cam in self.cameras.items():
|
||||
start = time.perf_counter()
|
||||
obs_dict[cam_key] = cam.async_read()
|
||||
dt_ms = (time.perf_counter() - start) * 1e3
|
||||
logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
|
||||
|
||||
return obs_dict
|
||||
|
||||
def reset(self):
|
||||
self.current_ee_pos = None
|
||||
self.current_joint_pos = None
|
||||
@@ -157,13 +157,6 @@ class SO101Follower(Robot):
|
||||
self.bus.write("I_Coefficient", motor, 0)
|
||||
self.bus.write("D_Coefficient", motor, 32)
|
||||
|
||||
if motor == "gripper":
|
||||
self.bus.write(
|
||||
"Max_Torque_Limit", motor, 500
|
||||
) # 50% of the max torque limit to avoid burnout
|
||||
self.bus.write("Protection_Current", motor, 250) # 50% of max current to avoid burnout
|
||||
self.bus.write("Overload_Torque", motor, 25) # 25% torque when overloaded
|
||||
|
||||
def setup_motors(self) -> None:
|
||||
for motor in reversed(self.bus.motors):
|
||||
input(f"Connect the controller board to the '{motor}' motor only and press enter.")
|
||||
|
||||
@@ -69,6 +69,7 @@ def make_robot_from_config(config: RobotConfig) -> Robot:
|
||||
raise ValueError(config.type)
|
||||
|
||||
|
||||
# TODO(pepijn): Move to pipeline step to make sure we don't have to do this in the robot code and send action to robot is clean for use in dataset
|
||||
def ensure_safe_goal_position(
|
||||
goal_present_pos: dict[str, tuple[float, float]], max_relative_target: float | dict[float]
|
||||
) -> dict[str, float]:
|
||||
|
||||
@@ -141,10 +141,10 @@ python lerobot/scripts/control_robot.py \
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`lerobot-train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
To train a policy to control your robot, use the [`python -m lerobot.scripts.train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
|
||||
```bash
|
||||
lerobot-train \
|
||||
python -m lerobot.scripts.train \
|
||||
--dataset.repo_id=${HF_USER}/aloha_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_aloha_test \
|
||||
|
||||
@@ -21,7 +21,7 @@ You want to evaluate a model from the hub (eg: https://huggingface.co/lerobot/di
|
||||
for 10 episodes.
|
||||
|
||||
```
|
||||
lerobot-eval \
|
||||
python -m lerobot.scripts.eval \
|
||||
--policy.path=lerobot/diffusion_pusht \
|
||||
--env.type=pusht \
|
||||
--eval.batch_size=10 \
|
||||
@@ -32,7 +32,7 @@ lerobot-eval \
|
||||
|
||||
OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes.
|
||||
```
|
||||
lerobot-eval \
|
||||
python -m lerobot.scripts.eval \
|
||||
--policy.path=outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \
|
||||
--env.type=pusht \
|
||||
--eval.batch_size=10 \
|
||||
@@ -243,11 +243,7 @@ def eval_policy(
|
||||
if max_episodes_rendered > 0 and not videos_dir:
|
||||
raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.")
|
||||
|
||||
# Handle accelerate-wrapped models by unwrapping them
|
||||
if hasattr(policy, 'module') and isinstance(policy.module, PreTrainedPolicy):
|
||||
# This is likely an accelerate-wrapped model (DistributedDataParallel)
|
||||
policy = policy.module
|
||||
elif not isinstance(policy, PreTrainedPolicy):
|
||||
if not isinstance(policy, PreTrainedPolicy):
|
||||
raise ValueError(
|
||||
f"Policy of type 'PreTrainedPolicy' is expected, but type '{type(policy)}' was provided."
|
||||
)
|
||||
|
||||
@@ -1048,10 +1048,8 @@ def get_observation_features(
|
||||
return None, None
|
||||
|
||||
with torch.no_grad():
|
||||
observation_features = policy.actor.encoder.get_cached_image_features(observations, normalize=True)
|
||||
next_observation_features = policy.actor.encoder.get_cached_image_features(
|
||||
next_observations, normalize=True
|
||||
)
|
||||
observation_features = policy.actor.encoder.get_cached_image_features(observations)
|
||||
next_observation_features = policy.actor.encoder.get_cached_image_features(next_observations)
|
||||
|
||||
return observation_features, next_observation_features
|
||||
|
||||
|
||||
@@ -302,6 +302,11 @@ class RobotClient:
|
||||
|
||||
self.logger.debug(f"Current latest action: {latest_action}")
|
||||
|
||||
# Get queue state before changes
|
||||
old_size, old_timesteps = self._inspect_action_queue()
|
||||
if not old_timesteps:
|
||||
old_timesteps = [latest_action] # queue was empty
|
||||
|
||||
# Get queue state before changes
|
||||
old_size, old_timesteps = self._inspect_action_queue()
|
||||
if not old_timesteps:
|
||||
|
||||
+210
-101
@@ -14,18 +14,20 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from contextlib import nullcontext
|
||||
from functools import partial
|
||||
from pprint import pformat
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
# Fix tokenizer parallelism conflicts with multiprocessing
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
|
||||
from termcolor import colored
|
||||
from torch.amp import GradScaler
|
||||
from torch.optim import Optimizer
|
||||
import os
|
||||
from datetime import timedelta
|
||||
|
||||
from lerobot.configs import parser
|
||||
from lerobot.configs.train import TrainPipelineConfig
|
||||
@@ -34,7 +36,7 @@ from lerobot.datasets.sampler import EpisodeAwareSampler
|
||||
from lerobot.datasets.utils import cycle
|
||||
from lerobot.envs.factory import make_env
|
||||
from lerobot.optim.factory import make_optimizer_and_scheduler
|
||||
from lerobot.policies.factory import make_policy
|
||||
from lerobot.policies.factory import make_policy, make_processor
|
||||
from lerobot.policies.pretrained import PreTrainedPolicy
|
||||
from lerobot.policies.utils import get_device_from_parameters
|
||||
from lerobot.scripts.eval import eval_policy
|
||||
@@ -55,8 +57,6 @@ from lerobot.utils.utils import (
|
||||
)
|
||||
from lerobot.utils.wandb_utils import WandBLogger
|
||||
|
||||
def is_launched_with_accelerate() -> bool:
|
||||
return "ACCELERATE_MIXED_PRECISION" in os.environ
|
||||
|
||||
def update_policy(
|
||||
train_metrics: MetricsTracker,
|
||||
@@ -64,65 +64,47 @@ def update_policy(
|
||||
batch: Any,
|
||||
optimizer: Optimizer,
|
||||
grad_clip_norm: float,
|
||||
grad_scaler: GradScaler | None,
|
||||
grad_scaler: GradScaler,
|
||||
lr_scheduler=None,
|
||||
use_amp: bool = False,
|
||||
lock=None,
|
||||
accelerator=None,
|
||||
) -> tuple[MetricsTracker, dict]:
|
||||
start_time = time.perf_counter()
|
||||
device = get_device_from_parameters(policy)
|
||||
policy.train()
|
||||
|
||||
grad_norm = 0.0 # Initialize grad_norm to avoid undefined variable
|
||||
|
||||
if accelerator:
|
||||
with accelerator.accumulate(policy):
|
||||
with torch.autocast(device_type=device.type) if use_amp else nullcontext():
|
||||
loss, output_dict = policy.forward(batch)
|
||||
# TODO(rcadene): policy.unnormalize_outputs(out_dict)
|
||||
accelerator.backward(loss)
|
||||
if accelerator.sync_gradients:
|
||||
grad_norm = torch.nn.utils.clip_grad_norm_(
|
||||
policy.parameters(),
|
||||
grad_clip_norm,
|
||||
error_if_nonfinite=False,
|
||||
)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
else:
|
||||
# Standard training loop without accelerate
|
||||
with torch.autocast(device_type=device.type) if use_amp else nullcontext():
|
||||
loss, output_dict = policy.forward(batch)
|
||||
# Forward pass timing
|
||||
forward_start = time.perf_counter()
|
||||
with torch.autocast(device_type=device.type) if use_amp else nullcontext():
|
||||
loss, output_dict = policy.forward(batch)
|
||||
# TODO(rcadene): policy.unnormalize_outputs(out_dict)
|
||||
|
||||
if grad_scaler is not None:
|
||||
grad_scaler.scale(loss).backward()
|
||||
# Unscale the gradient of the optimizer's assigned params in-place **prior to gradient clipping**.
|
||||
grad_scaler.unscale_(optimizer)
|
||||
grad_norm = torch.nn.utils.clip_grad_norm_(
|
||||
policy.parameters(),
|
||||
grad_clip_norm,
|
||||
error_if_nonfinite=False,
|
||||
)
|
||||
# Optimizer's gradients are already unscaled, so scaler.step does not unscale them,
|
||||
# although it still skips optimizer.step() if the gradients contain infs or NaNs.
|
||||
with lock if lock is not None else nullcontext():
|
||||
grad_scaler.step(optimizer)
|
||||
# Updates the scale for next iteration.
|
||||
grad_scaler.update()
|
||||
else:
|
||||
# Without GradScaler (fallback)
|
||||
loss.backward()
|
||||
grad_norm = torch.nn.utils.clip_grad_norm_(
|
||||
policy.parameters(),
|
||||
grad_clip_norm,
|
||||
error_if_nonfinite=False,
|
||||
)
|
||||
with lock if lock is not None else nullcontext():
|
||||
optimizer.step()
|
||||
forward_time = time.perf_counter() - forward_start
|
||||
|
||||
# Backward pass timing
|
||||
backward_start = time.perf_counter()
|
||||
grad_scaler.scale(loss).backward()
|
||||
backward_time = time.perf_counter() - backward_start
|
||||
|
||||
optimizer.zero_grad()
|
||||
# Unscale the gradient of the optimizer's assigned params in-place **prior to gradient clipping**.
|
||||
grad_scaler.unscale_(optimizer)
|
||||
|
||||
grad_norm = torch.nn.utils.clip_grad_norm_(
|
||||
policy.parameters(),
|
||||
grad_clip_norm,
|
||||
error_if_nonfinite=False,
|
||||
)
|
||||
|
||||
# Optimizer step timing
|
||||
optim_start = time.perf_counter()
|
||||
|
||||
# Optimizer's gradients are already unscaled, so scaler.step does not unscale them,
|
||||
# although it still skips optimizer.step() if the gradients contain infs or NaNs.
|
||||
with lock if lock is not None else nullcontext():
|
||||
grad_scaler.step(optimizer)
|
||||
# Updates the scale for next iteration.
|
||||
grad_scaler.update()
|
||||
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Step through pytorch scheduler at every batch instead of epoch
|
||||
if lr_scheduler is not None:
|
||||
@@ -131,9 +113,50 @@ def update_policy(
|
||||
if has_method(policy, "update"):
|
||||
# To possibly update an internal buffer (for instance an Exponential Moving Average like in TDMPC).
|
||||
policy.update()
|
||||
|
||||
optim_time = time.perf_counter() - optim_start
|
||||
total_time = time.perf_counter() - start_time
|
||||
|
||||
# Collect timing statistics for RLearN policy (averaged reporting every minute)
|
||||
if getattr(policy, "name", None) == "rlearn":
|
||||
# Initialize timing accumulator if not exists
|
||||
if not hasattr(policy, '_train_timing_stats'):
|
||||
policy._train_timing_stats = {
|
||||
'forward_times': [],
|
||||
'backward_times': [],
|
||||
'optim_times': [],
|
||||
'total_times': [],
|
||||
'last_print_time': time.perf_counter()
|
||||
}
|
||||
|
||||
# Accumulate current step's timings
|
||||
stats = policy._train_timing_stats
|
||||
stats['forward_times'].append(forward_time * 1000)
|
||||
stats['backward_times'].append(backward_time * 1000)
|
||||
stats['optim_times'].append(optim_time * 1000)
|
||||
stats['total_times'].append(total_time * 1000)
|
||||
|
||||
# Print averaged stats every minute (60 seconds)
|
||||
current_time = time.perf_counter()
|
||||
if current_time - stats['last_print_time'] >= 60.0:
|
||||
n_samples = len(stats['forward_times'])
|
||||
if n_samples > 0:
|
||||
print(f"\nTraining Step Average Timing (last {n_samples} steps):")
|
||||
print(f" Forward pass: {sum(stats['forward_times'])/n_samples:.2f} ms")
|
||||
print(f" Backward pass: {sum(stats['backward_times'])/n_samples:.2f} ms")
|
||||
print(f" Optimizer step: {sum(stats['optim_times'])/n_samples:.2f} ms")
|
||||
print(f" Total update: {sum(stats['total_times'])/n_samples:.2f} ms")
|
||||
print(f" Avg steps/sec: {1000.0/(sum(stats['total_times'])/n_samples):.2f}")
|
||||
print("-" * 50)
|
||||
|
||||
# Reset stats for next minute
|
||||
for key in stats:
|
||||
if key != 'last_print_time':
|
||||
stats[key] = []
|
||||
stats['last_print_time'] = current_time
|
||||
|
||||
train_metrics.loss = loss.item()
|
||||
train_metrics.grad_norm = grad_norm.item() if isinstance(grad_norm, torch.Tensor) else grad_norm
|
||||
train_metrics.grad_norm = grad_norm.item()
|
||||
train_metrics.lr = optimizer.param_groups[0]["lr"]
|
||||
train_metrics.update_s = time.perf_counter() - start_time
|
||||
return train_metrics, output_dict
|
||||
@@ -142,33 +165,8 @@ def update_policy(
|
||||
@parser.wrap()
|
||||
def train(cfg: TrainPipelineConfig):
|
||||
cfg.validate()
|
||||
|
||||
accelerator = None
|
||||
if is_launched_with_accelerate():
|
||||
import accelerate
|
||||
|
||||
# For example pi0 has unused params (last llm block)
|
||||
from accelerate import DistributedDataParallelKwargs
|
||||
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
|
||||
# accelerator = accelerate.Accelerator(step_scheduler_with_optimizer=False, kwargs_handlers=[ddp_kwargs])
|
||||
from accelerate import InitProcessGroupKwargs
|
||||
# Set NCCL timeout (default 30 minutes = 1800 seconds)
|
||||
nccl_timeout = getattr(cfg, 'nccl_timeout', 1800)
|
||||
ddp_init_kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=nccl_timeout)) # FIXME(mshukor): allow user to set timeout. This should be longer than the evaluation time
|
||||
# Set gradient accumulation steps (default 1)
|
||||
gradient_accumulation_steps = getattr(cfg, 'gradient_accumulation_steps', 1)
|
||||
accelerator = accelerate.Accelerator(step_scheduler_with_optimizer=False, gradient_accumulation_steps=gradient_accumulation_steps, kwargs_handlers=[ddp_init_kwargs, ddp_kwargs])
|
||||
if accelerator is not None and not accelerator.is_main_process:
|
||||
# Disable duplicate logging on non-main processes
|
||||
logging.info(f"Setting logging level on non-main process {accelerator.process_index} to WARNING.")
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
logging.info(pformat(cfg.to_dict()))
|
||||
|
||||
if accelerator and not accelerator.is_main_process:
|
||||
# Disable logging on non-main processes.
|
||||
cfg.wandb.enable = False
|
||||
|
||||
if cfg.wandb.enable and cfg.wandb.project:
|
||||
wandb_logger = WandBLogger(cfg)
|
||||
else:
|
||||
@@ -184,6 +182,18 @@ def train(cfg: TrainPipelineConfig):
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
logging.info("Creating dataset")
|
||||
|
||||
# Force PyAV backend for RLearN (proven to be fastest)
|
||||
if getattr(cfg.policy, "type", None) == "rlearn":
|
||||
# Override video backend to use PyAV
|
||||
if hasattr(cfg.dataset, 'video_backend'):
|
||||
original_backend = cfg.dataset.video_backend
|
||||
cfg.dataset.video_backend = 'pyav'
|
||||
logging.info(f"RLearN: Forcing video_backend from '{original_backend}' to 'pyav' for better performance")
|
||||
else:
|
||||
cfg.dataset.video_backend = 'pyav'
|
||||
logging.info("RLearN: Setting video_backend to 'pyav' for better performance")
|
||||
|
||||
dataset = make_dataset(cfg)
|
||||
|
||||
# Create environment used for evaluating checkpoints during training on simulation data.
|
||||
@@ -195,15 +205,23 @@ def train(cfg: TrainPipelineConfig):
|
||||
eval_env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs)
|
||||
|
||||
logging.info("Creating policy")
|
||||
# Pass episode_data_index for RLearN to calculate proper progress
|
||||
episode_data_index = dataset.episode_data_index if hasattr(dataset, "episode_data_index") else None
|
||||
policy = make_policy(
|
||||
cfg=cfg.policy,
|
||||
ds_meta=dataset.meta,
|
||||
episode_data_index=episode_data_index,
|
||||
)
|
||||
|
||||
|
||||
|
||||
preprocessor, postprocessor = make_processor(
|
||||
policy_cfg=cfg.policy, pretrained_path=cfg.policy.pretrained_path, dataset_stats=dataset.meta.stats
|
||||
)
|
||||
|
||||
logging.info("Creating optimizer and scheduler")
|
||||
optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy)
|
||||
# Only use GradScaler when not using accelerate (accelerate handles mixed precision internally)
|
||||
grad_scaler = None if accelerator else GradScaler(device.type, enabled=cfg.policy.use_amp)
|
||||
grad_scaler = GradScaler(device.type, enabled=cfg.policy.use_amp)
|
||||
|
||||
step = 0 # number of policy updates (forward + backward + optim)
|
||||
|
||||
@@ -230,6 +248,15 @@ def train(cfg: TrainPipelineConfig):
|
||||
drop_n_last_frames=cfg.policy.drop_n_last_frames,
|
||||
shuffle=True,
|
||||
)
|
||||
elif cfg.policy.type == "rlearn":
|
||||
# For RLearN, drop first 15 frames to avoid padding issues with temporal windows
|
||||
shuffle = False
|
||||
sampler = EpisodeAwareSampler(
|
||||
dataset.episode_data_index,
|
||||
drop_n_first_frames=15, # Skip frames that would need padding
|
||||
drop_n_last_frames=0,
|
||||
shuffle=True,
|
||||
)
|
||||
else:
|
||||
shuffle = True
|
||||
sampler = None
|
||||
@@ -242,14 +269,12 @@ def train(cfg: TrainPipelineConfig):
|
||||
sampler=sampler,
|
||||
pin_memory=device.type == "cuda",
|
||||
drop_last=False,
|
||||
persistent_workers=cfg.num_workers > 0, # Keep workers alive between epochs
|
||||
prefetch_factor=3, # Prefetch for video pipeline
|
||||
timeout=30, # Prevent hanging on video decode errors
|
||||
)
|
||||
dl_iter = cycle(dataloader)
|
||||
|
||||
# Prepare models for accelerate if using multi-GPU
|
||||
if accelerator:
|
||||
policy, optimizer, dataloader = accelerator.prepare(policy, optimizer, dataloader)
|
||||
dl_iter = cycle(dataloader)
|
||||
|
||||
policy.train()
|
||||
|
||||
train_metrics = {
|
||||
@@ -259,6 +284,12 @@ def train(cfg: TrainPipelineConfig):
|
||||
"update_s": AverageMeter("updt_s", ":.3f"),
|
||||
"dataloading_s": AverageMeter("data_s", ":.3f"),
|
||||
}
|
||||
# RLearN-only: pixels per second throughput
|
||||
try:
|
||||
if getattr(policy, "name", None) == "rlearn":
|
||||
train_metrics["pix_s"] = AverageMeter("pix/s", ":.1f")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
train_tracker = MetricsTracker(
|
||||
cfg.batch_size, dataset.num_frames, dataset.num_episodes, train_metrics, initial_step=step
|
||||
@@ -266,14 +297,21 @@ def train(cfg: TrainPipelineConfig):
|
||||
|
||||
logging.info("Start offline training on a fixed dataset")
|
||||
for _ in range(step, cfg.steps):
|
||||
start_time = time.perf_counter()
|
||||
# Data loading timing
|
||||
data_start = time.perf_counter()
|
||||
batch = next(dl_iter)
|
||||
train_tracker.dataloading_s = time.perf_counter() - start_time
|
||||
data_loading_time = time.perf_counter() - data_start
|
||||
|
||||
# Preprocessing timing
|
||||
preprocess_start = time.perf_counter()
|
||||
batch = preprocessor(batch)
|
||||
preprocess_time = time.perf_counter() - preprocess_start
|
||||
|
||||
train_tracker.dataloading_s = data_loading_time + preprocess_time
|
||||
|
||||
if not accelerator:
|
||||
for key in batch:
|
||||
if isinstance(batch[key], torch.Tensor):
|
||||
batch[key] = batch[key].to(device, non_blocking=device.type == "cuda")
|
||||
for key in batch:
|
||||
if isinstance(batch[key], torch.Tensor):
|
||||
batch[key] = batch[key].to(device, non_blocking=device.type == "cuda")
|
||||
|
||||
train_tracker, output_dict = update_policy(
|
||||
train_tracker,
|
||||
@@ -284,9 +322,75 @@ def train(cfg: TrainPipelineConfig):
|
||||
grad_scaler=grad_scaler,
|
||||
lr_scheduler=lr_scheduler,
|
||||
use_amp=cfg.policy.use_amp,
|
||||
accelerator=accelerator,
|
||||
)
|
||||
|
||||
# RLearN-only: compute pixel throughput (pixels per second)
|
||||
if getattr(policy, "name", None) == "rlearn":
|
||||
def _count_pixels(x: torch.Tensor) -> int:
|
||||
# Expect shapes: (B,T,C,H,W) or (B,C,H,W)
|
||||
if x.dim() == 5:
|
||||
b, t, _, h, w = x.shape
|
||||
return int(b * t * h * w)
|
||||
if x.dim() == 4:
|
||||
b, _, h, w = x.shape
|
||||
return int(b * h * w)
|
||||
return 0
|
||||
|
||||
total_pixels = 0
|
||||
for k, v in batch.items():
|
||||
if "image" not in k.lower():
|
||||
continue
|
||||
if isinstance(v, torch.Tensor):
|
||||
total_pixels += _count_pixels(v)
|
||||
elif isinstance(v, list) and len(v) > 0 and isinstance(v[0], torch.Tensor):
|
||||
# list of T tensors shaped (B,C,H,W)
|
||||
total_pixels += sum(_count_pixels(t) for t in v)
|
||||
|
||||
# Avoid div-by-zero
|
||||
meter = train_tracker.update_s
|
||||
upd_s = meter.val if isinstance(meter, AverageMeter) else float(meter)
|
||||
upd_s = max(upd_s, 1e-8)
|
||||
pix_per_s = float(total_pixels) / upd_s
|
||||
try:
|
||||
train_tracker.pix_s = pix_per_s
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Collect data pipeline timing for RLearN (averaged reporting every minute)
|
||||
if getattr(policy, "name", None) == "rlearn":
|
||||
# Initialize data timing accumulator if not exists
|
||||
if not hasattr(policy, '_data_timing_stats'):
|
||||
policy._data_timing_stats = {
|
||||
'data_loading_times': [],
|
||||
'preprocess_times': [],
|
||||
'last_print_time': time.perf_counter()
|
||||
}
|
||||
|
||||
# Accumulate current step's data timings
|
||||
data_stats = policy._data_timing_stats
|
||||
data_stats['data_loading_times'].append(data_loading_time * 1000)
|
||||
data_stats['preprocess_times'].append(preprocess_time * 1000)
|
||||
|
||||
# Print averaged stats every minute (60 seconds)
|
||||
current_time = time.perf_counter()
|
||||
if current_time - data_stats['last_print_time'] >= 60.0:
|
||||
n_samples = len(data_stats['data_loading_times'])
|
||||
if n_samples > 0:
|
||||
avg_data_loading = sum(data_stats['data_loading_times']) / n_samples
|
||||
avg_preprocessing = sum(data_stats['preprocess_times']) / n_samples
|
||||
|
||||
print(f"\nData Pipeline Average Timing (last {n_samples} steps):")
|
||||
print(f" Data loading: {avg_data_loading:.2f} ms")
|
||||
print(f" Preprocessing: {avg_preprocessing:.2f} ms")
|
||||
print(f" Total data pipeline: {avg_data_loading + avg_preprocessing:.2f} ms")
|
||||
print("-" * 50)
|
||||
|
||||
# Reset stats for next minute
|
||||
for key in data_stats:
|
||||
if key != 'last_print_time':
|
||||
data_stats[key] = []
|
||||
data_stats['last_print_time'] = current_time
|
||||
|
||||
# Note: eval and checkpoint happens *after* the `step`th training update has completed, so we
|
||||
# increment `step` here.
|
||||
step += 1
|
||||
@@ -295,6 +399,7 @@ def train(cfg: TrainPipelineConfig):
|
||||
is_saving_step = step % cfg.save_freq == 0 or step == cfg.steps
|
||||
is_eval_step = cfg.eval_freq > 0 and step % cfg.eval_freq == 0
|
||||
|
||||
|
||||
if is_log_step:
|
||||
logging.info(train_tracker)
|
||||
if wandb_logger:
|
||||
@@ -304,17 +409,15 @@ def train(cfg: TrainPipelineConfig):
|
||||
wandb_logger.log_dict(wandb_log_dict, step)
|
||||
train_tracker.reset_averages()
|
||||
|
||||
if cfg.save_checkpoint and is_saving_step and (not accelerator or accelerator.is_main_process):
|
||||
if cfg.save_checkpoint and is_saving_step:
|
||||
logging.info(f"Checkpoint policy after step {step}")
|
||||
checkpoint_dir = get_step_checkpoint_dir(cfg.output_dir, cfg.steps, step)
|
||||
# Unwrap model for accelerate
|
||||
policy_to_save = accelerator.unwrap_model(policy) if accelerator else policy
|
||||
save_checkpoint(checkpoint_dir, step, cfg, policy_to_save, optimizer, lr_scheduler)
|
||||
save_checkpoint(checkpoint_dir, step, cfg, policy, optimizer, lr_scheduler, preprocessor)
|
||||
update_last_checkpoint(checkpoint_dir)
|
||||
if wandb_logger:
|
||||
wandb_logger.log_policy(checkpoint_dir)
|
||||
|
||||
if cfg.env and is_eval_step and (not accelerator or accelerator.is_main_process):
|
||||
if cfg.env and is_eval_step:
|
||||
step_id = get_step_identifier(step, cfg.steps)
|
||||
logging.info(f"Eval policy at step {step}")
|
||||
with (
|
||||
@@ -323,7 +426,7 @@ def train(cfg: TrainPipelineConfig):
|
||||
):
|
||||
eval_info = eval_policy(
|
||||
eval_env,
|
||||
accelerator.unwrap_model(policy) if accelerator else policy,
|
||||
policy,
|
||||
cfg.eval.n_episodes,
|
||||
videos_dir=cfg.output_dir / "eval" / f"videos_step_{step_id}",
|
||||
max_episodes_rendered=4,
|
||||
@@ -347,12 +450,18 @@ def train(cfg: TrainPipelineConfig):
|
||||
wandb_logger.log_dict(wandb_log_dict, step, mode="eval")
|
||||
wandb_logger.log_video(eval_info["video_paths"][0], step, mode="eval")
|
||||
|
||||
|
||||
|
||||
if eval_env:
|
||||
eval_env.close()
|
||||
logging.info("End of training")
|
||||
|
||||
if cfg.policy.push_to_hub:
|
||||
policy.push_model_to_hub(cfg)
|
||||
if preprocessor:
|
||||
preprocessor.push_to_hub(cfg.policy.repo_id)
|
||||
if postprocessor:
|
||||
postprocessor.push_to_hub(cfg.policy.repo_id)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -18,7 +18,7 @@ Helper to set motor ids and baudrate.
|
||||
Example:
|
||||
|
||||
```shell
|
||||
lerobot-setup-motors \
|
||||
python -m lerobot.setup_motors \
|
||||
--teleop.type=so100_leader \
|
||||
--teleop.port=/dev/tty.usbmodem575E0031751
|
||||
```
|
||||
|
||||
@@ -18,7 +18,7 @@ Simple script to control a robot from teleoperation.
|
||||
Example:
|
||||
|
||||
```shell
|
||||
lerobot-teleoperate \
|
||||
python -m lerobot.teleoperate \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
|
||||
@@ -32,7 +32,7 @@ lerobot-teleoperate \
|
||||
Example teleoperation with bimanual so100:
|
||||
|
||||
```shell
|
||||
lerobot-teleoperate \
|
||||
python -m lerobot.teleoperate \
|
||||
--robot.type=bi_so100_follower \
|
||||
--robot.left_arm_port=/dev/tty.usbmodem5A460851411 \
|
||||
--robot.right_arm_port=/dev/tty.usbmodem5A460812391 \
|
||||
@@ -109,7 +109,7 @@ def teleop_loop(
|
||||
action = teleop.get_action()
|
||||
if display_data:
|
||||
observation = robot.get_observation()
|
||||
log_rerun_data(observation, action)
|
||||
log_rerun_data(observation=observation, action=action)
|
||||
|
||||
robot.send_action(action)
|
||||
dt_s = time.perf_counter() - loop_start
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user