Compare commits

..

26 Commits

Author SHA1 Message Date
Jade Choghari 995a46b302 make it work 2025-12-29 17:34:18 +01:00
Jade Choghari 23d4846423 more quick fixes 2025-12-28 07:33:24 +00:00
Jade Choghari 7d897daeb2 add more changges 2025-12-27 21:15:30 +00:00
Jade Choghari 7556c7fd70 more changes 2025-12-27 20:26:23 +00:00
Jade Choghari 4434c863b4 fix training 2025-12-27 10:43:00 +00:00
Jade Choghari 4b40153c32 align fast more 2025-12-26 17:24:39 +00:00
Jade Choghari f0923e5c86 remove brkpt 2025-12-26 06:46:27 +00:00
Jade Choghari 8edd544bbe detoknize action at policy level 2025-12-26 06:45:38 +00:00
Jade Choghari e682ef05f9 make fast work 2025-12-25 20:59:32 +00:00
Jade Choghari 9b5ac4387c add more changes 2025-12-23 13:11:18 +00:00
Jade Choghari 5781754c30 add pifast 2025-12-22 11:36:53 +01:00
Jade Choghari 18ddc67714 add more changes 2025-12-17 18:23:23 +00:00
Pepijn b229e7df28 Add voice example 2025-12-17 16:31:25 +01:00
Jade Choghari 8e05dc9a7a add fast tokenizer support 2025-12-16 11:28:27 +00:00
Jade Choghari fddd044306 add eos token in tokenizer, working 2025-12-14 14:54:07 +00:00
Jade Choghari 522396a15a more 2025-12-13 21:02:36 +00:00
Jade Choghari 7e232fb114 more changes 2025-12-13 21:02:07 +00:00
Jade Choghari dc452f37e0 add training 2025-12-12 10:27:28 +00:00
Jade Choghari 3c11946755 allow loading high level tasks 2025-12-10 16:22:54 +00:00
Jade Choghari 8edbd5b55e working step 2 2025-12-10 09:53:29 +00:00
Jade Choghari 025c2b2831 make step 2 work 2025-12-09 16:53:01 +00:00
Jade Choghari c8eee4ea16 add step2 2025-12-09 12:28:46 +00:00
Jade Choghari 9091b68d86 make it work 2025-12-08 14:19:15 +00:00
Jade Choghari 3568df8a35 woking on qwen 2025-12-08 14:03:47 +00:00
Jade Choghari a811945336 add 2025-12-08 12:21:41 +01:00
Jade Choghari 0a10d377b5 add Dlabel script 2025-12-08 12:21:01 +01:00
134 changed files with 14369 additions and 13731 deletions
@@ -31,8 +31,7 @@ jobs:
name: Upload Preview and Comment
if: >
github.event.workflow_run.event == 'pull_request' &&
github.event.workflow_run.conclusion == 'success' &&
github.repository == 'huggingface/lerobot'
github.event.workflow_run.conclusion == 'success'
uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
with:
package_name: lerobot
+2 -4
View File
@@ -42,9 +42,7 @@ jobs:
# This job builds and deploys the official documentation.
build_main_docs:
name: Build Main Docs
if: >
(github.event_name == 'push' || github.event_name == 'workflow_dispatch') &&
github.repository == 'huggingface/lerobot'
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
permissions:
contents: read
uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
@@ -60,7 +58,7 @@ jobs:
# The result of this job triggers the 'Upload PR Documentation' workflow.
build_pr_docs:
name: Build PR Docs
if: github.event_name == 'pull_request' && github.repository == 'huggingface/lerobot'
if: github.event_name == 'pull_request'
permissions:
contents: read
pull-requests: write
+1
View File
@@ -45,6 +45,7 @@ permissions:
env:
UV_VERSION: "0.8.0"
PYTHON_VERSION: "3.10"
DOCKER_IMAGE_NAME: huggingface/lerobot-gpu
# Ensures that only the latest commit for a PR or branch is built, canceling older runs.
concurrency:
-2
View File
@@ -43,7 +43,6 @@ jobs:
name: Build CPU Docker for Nightly
runs-on:
group: aws-general-8-plus
if: github.repository == 'huggingface/lerobot'
outputs:
image_tag: ${{ env.DOCKER_IMAGE_NAME_CPU }}
steps:
@@ -78,7 +77,6 @@ jobs:
name: Build GPU Docker for Nightly
runs-on:
group: aws-general-8-plus
if: github.repository == 'huggingface/lerobot'
outputs:
image_tag: ${{ env.DOCKER_IMAGE_NAME_GPU }}
steps:
-1
View File
@@ -29,7 +29,6 @@ jobs:
build-and-publish:
name: Build and publish Python distributions
runs-on: ubuntu-latest
if: github.repository == 'huggingface/lerobot'
outputs:
version: ${{ steps.extract_info.outputs.tag_version }}
permissions:
-1
View File
@@ -45,7 +45,6 @@ jobs:
stale:
name: Close Stale Issues and PRs
runs-on: ubuntu-latest
if: github.repository == 'huggingface/lerobot'
permissions:
actions: write
contents: write # only for delete-branch option
-1
View File
@@ -43,7 +43,6 @@ jobs:
full-tests:
name: Full Unbound Tests
runs-on: ubuntu-latest
if: github.repository == 'huggingface/lerobot'
env:
MUJOCO_GL: egl
HF_HOME: /mnt/cache/.cache/huggingface
BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

+5 -22
View File
@@ -201,8 +201,7 @@ from lerobot.teleoperators.so100_leader.so100_leader import SO100Leader
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
from lerobot.scripts.lerobot_record import record_loop
from lerobot.processor import make_default_processors
from lerobot.record import record_loop
NUM_EPISODES = 5
FPS = 30
@@ -210,19 +209,12 @@ EPISODE_TIME_SEC = 60
RESET_TIME_SEC = 10
TASK_DESCRIPTION = "My task description"
# Create robot configuration
# Create the robot and teleoperator configurations
camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
robot_config = SO100FollowerConfig(
id="my_awesome_follower_arm",
cameras={
"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS) # Optional: fourcc="MJPG" for troubleshooting OpenCV async error.
},
port="/dev/tty.usbmodem58760434471",
)
teleop_config = SO100LeaderConfig(
id="my_awesome_leader_arm",
port="/dev/tty.usbmodem585A0077581",
port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config
)
teleop_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm")
# Initialize the robot and teleoperator
robot = SO100Follower(robot_config)
@@ -251,9 +243,6 @@ init_rerun(session_name="recording")
robot.connect()
teleop.connect()
# Create the required processors
teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors()
episode_idx = 0
while episode_idx < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}")
@@ -262,9 +251,6 @@ while episode_idx < NUM_EPISODES and not events["stop_recording"]:
robot=robot,
events=events,
fps=FPS,
teleop_action_processor=teleop_action_processor,
robot_action_processor=robot_action_processor,
robot_observation_processor=robot_observation_processor,
teleop=teleop,
dataset=dataset,
control_time_s=EPISODE_TIME_SEC,
@@ -279,9 +265,6 @@ while episode_idx < NUM_EPISODES and not events["stop_recording"]:
robot=robot,
events=events,
fps=FPS,
teleop_action_processor=teleop_action_processor,
robot_action_processor=robot_action_processor,
robot_observation_processor=robot_observation_processor,
teleop=teleop,
control_time_s=RESET_TIME_SEC,
single_task=TASK_DESCRIPTION,
-328
View File
@@ -1,328 +0,0 @@
# OpenArms Robot
OpenArms is a 7 DOF robotic arm with a gripper, designed by [Enactic, Inc.](https://www.enactic.com/) It uses Damiao motors controlled via CAN bus communication and MIT control mode for smooth, precise motion.
## Hardware Overview
- **7 DOF per arm** (14 DOF total for dual arm setup)
- **1 gripper per arm** (2 grippers total)
- **Damiao motors** with 4 different types:
- **DM8009** (DM-J8009P-2EC) for shoulders (J1, J2) - high torque
- **DM4340** for shoulder rotation and elbow (J3, J4)
- **DM4310** (DM-J4310-2EC V1.1) for wrist (J5, J6, J7) and gripper (J8)
- **24V power supply** required
- **CAN interface device**:
- **Linux**: Any SocketCAN-compatible adapter
- **macOS**: CANable, PEAK PCAN-USB, or Kvaser USBcan
- Proper CAN wiring (CANH, CANL, 120Ω termination)
## Motor Configuration
Each arm has the following motor configuration based on the [OpenArm setup guide](https://docs.openarm.dev/software/setup/):
| Joint | Motor | Motor Type | Sender CAN ID | Receiver ID | Description |
|-------|-------|------------|---------------|-------------|-------------|
| J1 | joint_1 | DM8009 | 0x01 | 0x11 | Shoulder pan |
| J2 | joint_2 | DM8009 | 0x02 | 0x12 | Shoulder lift |
| J3 | joint_3 | DM4340 | 0x03 | 0x13 | Shoulder rotation |
| J4 | joint_4 | DM4340 | 0x04 | 0x14 | Elbow flex |
| J5 | joint_5 | DM4310 | 0x05 | 0x15 | Wrist roll |
| J6 | joint_6 | DM4310 | 0x06 | 0x16 | Wrist pitch |
| J7 | joint_7 | DM4310 | 0x07 | 0x17 | Wrist rotation |
| J8 | gripper | DM4310 | 0x08 | 0x18 | Gripper |
For dual arm setups, the left arm uses IDs 0x09-0x10 for joints 1-8 with the same motor types.
## Quick Start
```bash
# Install system dependencies
sudo apt install can-utils iproute2
# Install LeRobot with OpenArms support
pip install -e ".[openarms]"
```
## Setup Guide
### Step 1: Motor ID Configuration
**IMPORTANT**: Before using the robot, motors must be configured with the correct CAN IDs.
Refer to the [OpenArm Motor ID Configuration Guide](https://docs.openarm.dev/software/setup/motor-id) for detailed instructions using the Damiao Debugging Tools on Windows.
Key points:
- Each motor needs a unique **Sender CAN ID** (0x01-0x08)
- Each motor needs a unique **Receiver/Master ID** (0x11-0x18)
- Use the Damiao Debugging Tools to set these IDs
### Step 2: Setup CAN Interface
Configure your CAN interface as described in the [OpenArm CAN Setup Guide](https://docs.openarm.dev/software/setup/can-setup):
#### Linux (SocketCAN)
```bash
# Find your CAN interface
ip link show
# Configure can0, 1, 2, 3
sudo ip link set can0 down
sudo ip link set can0 type can bitrate 1000000
sudo ip link set can0 up
sudo ip link set can1 down
sudo ip link set can1 type can bitrate 1000000
sudo ip link set can1 up
sudo ip link set can2 down
sudo ip link set can2 type can bitrate 1000000
sudo ip link set can2 up
sudo ip link set can3 down
sudo ip link set can3 type can bitrate 1000000
sudo ip link set can3 up
# Verify configuration
ip link show can0
```
or run:
`examples/openarms/setup_can.sh`
### Testing canbus and motor connection
Please run this script to check if all motors can be found and to find your can-fd speed: `python examples/openarms/debug_can_communication.py`
## Usage
### Basic Setup
```python
from lerobot.robots.openarms import OpenArmsFollower
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
# Configure for dual arm setup
config = OpenArmsFollowerConfig(
port="can0",
can_interface="socketcan", # Or "auto" for auto-detection
id="openarms_dual",
is_dual_arm=True,
)
robot = OpenArmsFollower(config)
robot.connect()
```
### Calibration
On first use, you'll need to calibrate the robot:
```python
robot.calibrate()
```
The calibration process will:
1. Disable torque on all motors
2. Ask you to position arms in **hanging position with grippers closed**
3. Set this as the zero position
4. Ask you to move each joint through its full range
5. Record min/max positions for each joint
6. Save calibration to file
### Reading Observations
The robot provides comprehensive state information:
```python
observation = robot.get_observation()
# Observation includes for each motor:
# - {motor_name}.pos: Position in degrees
# - {motor_name}.vel: Velocity in degrees/second
# - {motor_name}.torque: Motor torque
# - {camera_name}: Camera images (if configured)
print(f"Right arm joint 1 position: {observation['right_joint_1.pos']:.1f}°")
print(f"Right arm joint 1 velocity: {observation['right_joint_1.vel']:.1f}°/s")
print(f"Right arm joint 1 torque: {observation['right_joint_1.torque']:.3f} N·m")
```
### Sending Actions
```python
# Send target positions (in degrees)
action = {
"right_joint_1.pos": 45.0,
"right_joint_2.pos": -30.0,
# ... all joints
"right_gripper.pos": 45.0, # Half-closed
}
actual_action = robot.send_action(action)
```
### Gripper Control
```python
# Open gripper
robot.open_gripper(arm="right")
# Close gripper
robot.close_gripper(arm="right")
```
## Safety Features
### 1. Maximum Relative Target
Limits how far a joint can move in a single command to prevent sudden movements:
```python
config = OpenArmsFollowerConfig(
port="can0",
# Limit all joints to 10 degrees per command
max_relative_target=10.0,
# Or set per-motor limits
max_relative_target={
"right_joint_1": 15.0, # Slower moving joint
"right_joint_2": 10.0,
"right_gripper": 5.0, # Very slow gripper
}
)
```
**How it works**: If current position is 50° and you command 80°, with `max_relative_target=10.0`, the robot will only move to 60° in that step.
### 2. Torque Limits
Control maximum torque output, especially important for grippers and teleoperation:
```python
config = OpenArmsFollowerConfig(
port="can0",
# Gripper torque limit (fraction of motor's max torque)
gripper_torque_limit=0.5, # 50% of max torque
)
```
Lower torque limits prevent damage when gripping delicate objects.
### 3. MIT Control Gains
Control responsiveness and stability via PID-like gains:
```python
config = OpenArmsFollowerConfig(
port="can0",
position_kp=10.0, # Position gain (higher = more responsive)
position_kd=0.5, # Velocity damping (higher = more damped)
)
```
**Guidelines**:
- **For following (robot)**: Higher gains for responsiveness
- `position_kp=10.0`, `position_kd=0.5`
- **For teleoperation (leader)**: Lower gains or disable torque for manual movement
- `manual_control=True` (torque disabled)
### 4. Velocity Limits
Velocity limits are enforced by the Damiao motors based on motor type. For DM4310:
- Max velocity: 30 rad/s ≈ 1718°/s
The motors will automatically limit velocity to safe values.
## Teleoperation
### Leader Arm Setup
The leader arm is moved manually (torque disabled) to generate commands:
```python
from lerobot.teleoperators.openarms import OpenArmsLeader
from lerobot.teleoperators.openarms.config_openarms_leader import OpenArmsLeaderConfig
config = OpenArmsLeaderConfig(
port="can1", # Separate CAN interface for leader
id="openarms_leader",
manual_control=True, # Torque disabled for manual movement
is_dual_arm=True,
)
leader = OpenArmsLeader(config)
leader.connect()
# Read current position as action
action = leader.get_action()
# action contains positions for all joints in degrees
```
### Safety Considerations for Teleoperation
1. **Use separate CAN interfaces** for leader and follower to avoid conflicts
2. **Enable max_relative_target** on follower to smooth abrupt movements
3. **Lower torque limits** on follower to prevent damage from tracking errors
4. **Test with one arm** before enabling dual arm teleoperation
5. **Have emergency stop** ready (power switch or CAN disable)
```python
# Recommended follower config for teleoperation
follower_config = OpenArmsFollowerConfig(
port="can0",
max_relative_target=5.0, # Small steps for smooth following
gripper_torque_limit=0.3, # Low torque for safety
position_kp=5.0, # Lower gains for gentler following
position_kd=0.3,
)
```
## Troubleshooting
### Motor Shaking/Unstable
- **Lower control gains**: Reduce `position_kp` and `position_kd`
- **Check calibration**: Re-run calibration procedure
- **Verify power**: Insufficient current can cause instability
- **Check mechanical**: Loose connections, binding, or damaged components
### CAN Bus Errors
```bash
# Check for errors
ip -s link show can0
# Reset CAN interface
sudo ip link set can0 down
sudo ip link set can0 up
```
### Control Mode
OpenArms uses **MIT control mode** which allows simultaneous control of:
- Position (degrees)
- Velocity (degrees/second)
- Torque (N·m)
- Position gain (Kp)
- Velocity damping (Kd)
### Communication
- **Protocol**: CAN 2.0 at 1 Mbps (or CAN-FD at 5 Mbps)
- **Frame format**: Standard 11-bit IDs
- **Update rate**: Typically 50-100 Hz depending on motor count
- **Latency**: ~10-20ms per motor command
## References
- [OpenArm Official Documentation](https://docs.openarm.dev/)
- [OpenArm Setup Guide](https://docs.openarm.dev/software/setup/)
- [Motor ID Configuration](https://docs.openarm.dev/software/setup/motor-id)
- [CAN Interface Setup](https://docs.openarm.dev/software/setup/can-setup)
- [Motor Communication Test](https://docs.openarm.dev/software/setup/configure-test)
- [Damiao Motor Documentation](https://wiki.seeedstudio.com/damiao_series/)
- [Enactic GitHub](https://github.com/enactic/openarm_can)
+1 -6
View File
@@ -4,12 +4,11 @@ This guide covers the complete setup process for the Unitree G1 humanoid, from i
## About the Unitree G1
We offer support for both 29 and 23 DOF G1. We introduce:
We offer support for both 29 and 23 DOF G1. In this first PR we introduce:
- **`unitree g1` robot class, handling low level communication with the humanoid**
- **ZMQ socket bridge** for remote communication over WiFi, allowing one to deploy policies remotely instead of over ethernet or directly on the Orin
- **GR00T locomotion policy** for bipedal walking and balance
- **MuJoCo simulation mode** for testing policies without the physical robot
---
@@ -192,10 +191,6 @@ Press `Ctrl+C` to stop the policy.
---
## Extra: Running in Simulation Mode (MuJoCo)
You can now test and develop policies without a physical robot using MuJoCo. to do so set `is_simulation=True` in config.
## Additional Resources
- [Unitree SDK Documentation](https://github.com/unitreerobotics/unitree_sdk2_python)
+3 -104
View File
@@ -11,14 +11,13 @@ LeRobot provides several utilities for manipulating datasets:
3. **Merge Datasets** - Combine multiple datasets into one. The datasets must have identical features, and episodes are concatenated in the order specified in `repo_ids`
4. **Add Features** - Add new features to a dataset
5. **Remove Features** - Remove features from a dataset
6. **Convert to Video** - Convert image-based datasets to video format for efficient storage
The core implementation is in `lerobot.datasets.dataset_tools`.
An example script detailing how to use the tools API is available in `examples/dataset/use_dataset_tools.py`.
## Command-Line Tool: lerobot-edit-dataset
`lerobot-edit-dataset` is a command-line script for editing datasets. It can be used to delete episodes, split datasets, merge datasets, add features, remove features, and convert image datasets to video format.
`lerobot-edit-dataset` is a command-line script for editing datasets. It can be used to delete episodes, split datasets, merge datasets, add features, and remove features.
Run `lerobot-edit-dataset --help` for more information on the configuration of each operation.
@@ -87,71 +86,9 @@ lerobot-edit-dataset \
--operation.feature_names "['observation.images.top']"
```
#### Convert to Video
Convert an image-based dataset to video format, creating a new LeRobotDataset where images are stored as videos. This is useful for reducing storage requirements and improving data loading performance. The new dataset will have the exact same structure as the original, but with images encoded as MP4 videos in the proper LeRobot format.
```bash
# Local-only: Save to a custom output directory (no hub push)
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--operation.type convert_to_video \
--operation.output_dir /path/to/output/pusht_video
# Save with new repo_id (local storage)
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--new_repo_id lerobot/pusht_video \
--operation.type convert_to_video
# Convert and push to Hugging Face Hub
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--new_repo_id lerobot/pusht_video \
--operation.type convert_to_video \
--push_to_hub true
# Convert with custom video codec and quality settings
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--operation.type convert_to_video \
--operation.output_dir outputs/pusht_video \
--operation.vcodec libsvtav1 \
--operation.pix_fmt yuv420p \
--operation.g 2 \
--operation.crf 30
# Convert only specific episodes
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--operation.type convert_to_video \
--operation.output_dir outputs/pusht_video \
--operation.episode_indices "[0, 1, 2, 5, 10]"
# Convert with multiple workers for parallel processing
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--operation.type convert_to_video \
--operation.output_dir outputs/pusht_video \
--operation.num_workers 8
```
**Parameters:**
- `output_dir`: Custom output directory (optional - by default uses `new_repo_id` or `{repo_id}_video`)
- `vcodec`: Video codec to use - options: `h264`, `hevc`, `libsvtav1` (default: `libsvtav1`)
- `pix_fmt`: Pixel format - options: `yuv420p`, `yuv444p` (default: `yuv420p`)
- `g`: Group of pictures (GOP) size - lower values give better quality but larger files (default: 2)
- `crf`: Constant rate factor - lower values give better quality but larger files, 0 is lossless (default: 30)
- `fast_decode`: Fast decode tuning option (default: 0)
- `episode_indices`: List of specific episodes to convert (default: all episodes)
- `num_workers`: Number of parallel workers for processing (default: 4)
**Note:** The resulting dataset will be a proper LeRobotDataset with all cameras encoded as videos in the `videos/` directory, with parquet files containing only metadata (no raw image data). All episodes, stats, and tasks are preserved.
### Push to Hub
Add the `--push_to_hub true` flag to any command to automatically upload the resulting dataset to the Hugging Face Hub:
Add the `--push_to_hub` flag to any command to automatically upload the resulting dataset to the Hugging Face Hub:
```bash
lerobot-edit-dataset \
@@ -159,45 +96,7 @@ lerobot-edit-dataset \
--new_repo_id lerobot/pusht_after_deletion \
--operation.type delete_episodes \
--operation.episode_indices "[0, 2, 5]" \
--push_to_hub true
--push_to_hub
```
There is also a tool for adding features to a dataset that is not yet covered in `lerobot-edit-dataset`.
# Dataset Visualization
## Online Visualization
When you record a dataset using `lerobot`, it automatically uploads to the Hugging Face Hub unless you specify otherwise. To view the dataset online, use our **LeRobot Dataset Visualizer**, available at:
https://huggingface.co/spaces/lerobot/visualize_dataset
## Local Visualization
You can also visualize episodes from a dataset locally using our command-line tool.
**From the Hugging Face Hub:**
```bash
lerobot-dataset-viz \
--repo-id lerobot/pusht \
--episode-index 0
```
**From a local folder:**
Add the `--root` option and set `--mode local`. For example, to search in `./my_local_data_dir/lerobot/pusht`:
```bash
lerobot-dataset-viz \
--repo-id lerobot/pusht \
--root ./my_local_data_dir \
--mode local \
--episode-index 0
```
Once executed, the tool opens `rerun.io` and displays the camera streams, robot states, and actions for the selected episode.
For advanced usage—including visualizing datasets stored on a remote server—run:
```bash
lerobot-dataset-viz --help
```
+84 -42
View File
@@ -24,7 +24,7 @@ Built from pure Transformer encoders, X-VLA scales naturally with model size and
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/xvla-architecture2.png"
alt="XVLA Architecture 2"
style="width: 60%; height: auto;"
style="width: 32%; max-width: 450px; height: auto;"
/>
</p>
@@ -120,7 +120,7 @@ Adapted for Google Robot platforms.
### Recommended Training Configuration
When fine-tuning X-VLA for a new embodiment or task, we recommend not freezing the VLM, and also setting the `policy.dtype=bfloat16` to not hit OOM errors.
When fine-tuning X-VLA for a new embodiment or task, we recommend the following freezing strategy:
```bash
lerobot-train \
@@ -129,26 +129,25 @@ lerobot-train \
--job_name=xvla_training \
--policy.path="lerobot/xvla-base" \
--policy.repo_id="HF_USER/xvla-your-robot" \
--policy.dtype=bfloat16 \
--policy.action_mode=auto \
--steps=20000 \
--steps=3000 \
--policy.device=cuda \
--policy.freeze_vision_encoder=false \
--policy.freeze_language_encoder=false \
--policy.train_policy_transformer=true \
--policy.train_soft_prompts=true \
--policy.freeze_vision_encoder=True \
--policy.freeze_language_encoder=True \
--policy.train_policy_transformer=True \
--policy.train_soft_prompts=True \
--policy.action_mode=YOUR_ACTION_MODE
```
### Training Parameters Explained
| Parameter | Default | Description |
| -------------------------- | ------- | ---------------------------------------------- |
| `freeze_vision_encoder` | `false` | Do not freeze the VLM vision encoder weights |
| `freeze_language_encoder` | `false` | Do not freeze the VLM language encoder weights |
| `train_policy_transformer` | `true` | Allow policy transformer layers to train |
| `train_soft_prompts` | `true` | Allow soft prompts to train |
| Parameter | Default | Description |
| -------------------------- | ------- | ---------------------------------------- |
| `freeze_vision_encoder` | `True` | Freeze the VLM vision encoder weights |
| `freeze_language_encoder` | `True` | Freeze the VLM language encoder weights |
| `train_policy_transformer` | `True` | Allow policy transformer layers to train |
| `train_soft_prompts` | `True` | Allow soft prompts to train |
**💡 Best Practice**: For Phase II adaptation to new embodiments, do not freeze the VLM encoders and also train the policy transformer and soft prompts.
**💡 Best Practice**: For Phase II adaptation to new embodiments, freeze the VLM encoders and only train the policy transformer and soft prompts. This provides excellent sample efficiency with minimal compute.
### Example: Training on Bimanual Robot
@@ -158,15 +157,14 @@ lerobot-train \
--output_dir=./outputs/xvla_bimanual \
--job_name=xvla_so101_training \
--policy.path="lerobot/xvla-base" \
--policy.dtype=bfloat16 \
--policy.repo_id="YOUR_USERNAME/xvla-biso101" \
--steps=3000 \
--policy.device=cuda \
--policy.action_mode=so101_bimanual \
--policy.freeze_vision_encoder=false \
--policy.freeze_language_encoder=false \
--policy.train_policy_transformer=true \
--policy.train_soft_prompts=true
--policy.freeze_vision_encoder=True \
--policy.freeze_language_encoder=True \
--policy.train_policy_transformer=True \
--policy.train_soft_prompts=True
```
💡 **Best Performance:** If you have sufficient computational resources and want to achieve best X-VLA finetuning performance, you should follow the official finetuning strategy:
@@ -174,7 +172,71 @@ lerobot-train \
**🔥 Full-finetune all components with a custom learning-rate scheme**
To ensure stable optimization, the Vision-Language Model (VLM) must be trained with only 1/10 of the base learning rate, while all other components use the full LR.
This LR ratio is crucial for achieving strong and stable finetuning performance. This is already done for you by default.
This LR ratio is crucial for achieving strong and stable finetuning performance.
To enable this behavior, you must:
1. Implement a custom optimizer and register it in your training config
```
from dataclasses import dataclass, asdict
from lerobot.optim.optimizers import OptimizerConfig
import torch
@OptimizerConfig.register_subclass("xvla-adamw")
@dataclass
class XVLAAdamW(OptimizerConfig):
lr: float = 1e-4
betas: tuple[float, float] = (0.9, 0.99)
eps: float = 1e-8
weight_decay: float = 0.0
grad_clip_norm: float = 10.0
def build(self, params: dict) -> torch.optim.Optimizer:
"""
Expect `named_parameters()` as input.
Apply lr = lr / 10 for all VLM-related parameters.
"""
assert isinstance(params, dict), \
"Custom LR optimizer requires `named_parameters()` as inputs."
kwargs = asdict(self)
kwargs.pop("grad_clip_norm")
vlm_group, other_group = [], []
for name, p in params.items():
if not p.requires_grad:
continue
if "vlm" in name.lower():
vlm_group.append(p)
else:
other_group.append(p)
param_groups = [
{"params": vlm_group, "lr": self.lr * 0.1, "weight_decay": self.weight_decay * 0.1},
{"params": other_group, "lr": self.lr, "weight_decay": self.weight_decay},
]
return torch.optim.AdamW(param_groups, **kwargs)
```
2. Modify X-VLAs get_optim_params to return named parameters
Replace:
```
def get_optim_params(self) -> dict:
"""Return only trainable parameters for optimization."""
return filter(lambda p: p.requires_grad, self.parameters())
```
with:
```
def get_optim_params(self):
"""Return trainable named parameters."""
return filter(lambda kv: kv[1].requires_grad, self.named_parameters())
```
This ensures the optimizer receives a dict of named parameters, allowing it to correctly detect VLM modules and apply the 1/10 LR rule.
❕Note
Completely matching the official reported performance may require an additional warm-up LR schedule for soft-prompts, which can bring minor improvements.
@@ -264,26 +326,6 @@ domain_id = 3
The domain_id is automatically added to observations by the `XVLAAddDomainIdProcessorStep` in the preprocessing pipeline.
The `lerobot/xvla-base` model has been trained on the following domain IDs. It is recommended to choose one that most resembles your robot/configuration:
#### Fine-tuning Datasets
| Dataset Name | Domain ID |
| ---------------- | --------- |
| Bridge | 0 |
| RT1 | 1 |
| Calvin | 2 |
| libero | 3 |
| widowx-air | 4 |
| AIR-AGILEX-HQ | 5 |
| robotwin2_abs_ee | 6 |
| robotwin2_clean | 6 |
| robocasa-human | 7 |
| VLABench | 8 |
| AGIBOT-challenge | 9 |
| AIR-AGILEX | 10 |
| AIRBOT | 18 |
### 3. Processor Steps
X-VLA requires specific preprocessing and postprocessing steps for proper operation.
+243
View File
@@ -0,0 +1,243 @@
# Synthetic Data Generation Script - Summary
## ✅ What Was Created
### Main Script: `annotate_pgen.py` (717 lines)
A production-ready script implementing the Hi-Robot synthetic data generation pipeline.
**Key Features:**
- ✅ Loads LeRobot datasets with skill annotations
- ✅ Generates synthetic user prompts and robot utterances using Qwen VLM
-**Temporal sampling** - generates dialogue every N seconds (default: 1s)
- ✅ Adds `task_index_high_level` feature to dataset parquets
- ✅ Saves high-level tasks to `meta/tasks_high_level.parquet`
- ✅ Exports debug JSONL for quality analysis
- ✅ Supports both Qwen2-VL and Qwen3-VL models
- ✅ Multi-view camera support
- ✅ Episode-aware processing with automatic first-frame sampling
- ✅ Modular architecture for easy extension
### Supporting Files Created
1. **`run_pgen.sh`** - Convenience script with sensible defaults
2. **`README_PGEN.md`** - Comprehensive documentation with examples
3. **`example_pgen_usage.md`** - Practical examples and performance estimates
4. **`SAMPLING_DIAGRAM.md`** - Visual explanation of temporal sampling strategy
5. **`PGEN_SUMMARY.md`** - This file
## 🚀 Key Innovation: Temporal Sampling
The script processes **ALL episodes** in the dataset efficiently via `--sample-interval`:
```bash
# Instead of calling VLM for every frame (expensive):
# 15,000 frames × VLM call = ~5 hours
# Generate dialogue every 1 second (efficient):
python annotate_pgen.py --repo-id dataset --model qwen --sample-interval 1.0
# 15,000 frames processed, only ~500 VLM calls (30x speedup!)
```
**How it works:**
- Process ALL frames in ALL episodes (complete coverage)
- Generate dialogue at sampled timepoints (e.g., every 1 second)
- Propagate task indices to intermediate frames
- Always sample first frame of each episode
- All frames get labeled, but VLM is only called for samples
- No dummy values or skipped episodes
**Benefits:**
- 30-100x speedup depending on interval
- Maintains temporal coherence
- Reduces cost without losing quality
- Configurable based on skill duration
## 📊 Efficiency Comparison
For a typical 15,000 frame dataset at 30 fps:
| Method | VLM Calls | Time | Cost |
|--------|-----------|------|------|
| Every frame | 15,000 | ~5 hours | $$$$ |
| Every 0.5s | 1,000 | ~20 min | $$$ |
| **Every 1s** (default) | **500** | **~10 min** | **$$** |
| Every 2s | 250 | ~5 min | $ |
## 🎯 Usage
### Quick Test (5s sampling for fast iteration)
```bash
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--sample-interval 5.0 \
--output-dir ./outputs/test_quick
```
### Production Run (Recommended Settings)
```bash
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--sample-interval 1.0 \
--output-dir ./outputs/full_pgen
```
### High-Quality with Qwen3
```bash
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen3-VL-30B-A3B-Instruct \
--sample-interval 0.5 \
--temperature 0.6 \
--output-dir ./outputs/high_quality
```
## 📦 Output Structure
After running, you'll have:
```
dataset_root/
├── meta/
│ ├── tasks_high_level.parquet # High-level tasks with prompts/utterances
│ └── syn_annotations.jsonl # Debug: full context for each sample
└── data/
└── chunk-000/
└── file-000.parquet # Updated with task_index_high_level
```
**New feature added to all parquet files:**
- `task_index_high_level` (int64): Links to tasks_high_level.parquet
## 🔧 All Parameters
| Parameter | Default | Description |
|-----------|---------|-------------|
| `--repo-id` / `--data-dir` | - | Dataset source |
| `--model` | Qwen/Qwen2-VL-7B-Instruct | VLM model |
| `--device` | cuda | Device to use |
| `--dtype` | bfloat16 | Model precision |
| `--temperature` | 0.7 | Sampling temperature |
| **`--sample-interval`** | **1.0** | **Generate every N seconds (all episodes processed)** |
| `--num-image-views-per-sample` | 1 | Number of cameras |
| `--batch-size` | 1 | Batch size (currently unused) |
| `--output-dir` | None | Output directory |
| `--push-to-hub` | False | Push to HuggingFace |
## 🎨 Generated Data Format
Each sampled frame produces:
```json
{
"scenario_type": "specific_object",
"response_type": "confirmation",
"user_prompt": "Can you pick up the pink brick?",
"robot_utterance": "Sure, I'll grab the pink lego brick.",
"skill": "robot arm picks up pink lego brick",
"episode_id": 0,
"frame_index": 45,
"timestamp": 1.5,
"skill_history": ["robot arm moves towards pink lego brick"],
"task_description": "pink lego brick into the transparent box"
}
```
**Scenario Types:**
- specific_object, negative_task, situated_correction, implicit_request, constraint_based
**Response Types:**
- confirmation, clarification, acknowledgment, constraint_acknowledgment
## 🔬 Code Architecture
```python
# Main components (modular design)
class QwenPgen:
"""VLM wrapper supporting Qwen2/3"""
def call_qwen(images, prompt) -> dict
def construct_prompt(task, history, skill) -> str:
"""Build contextual prompt with history"""
def annotate_sample(pgen, images, ...) -> dict:
"""Generate dialogue for one sample"""
def generate_synthetic_data(dataset, pgen, ...) -> tuple:
"""Process entire dataset with temporal sampling"""
# Core sampling logic:
# - Track last_sample_timestamp per episode
# - Sample if time_elapsed >= sample_interval
# - Always sample first frame of episodes
# - Propagate task_index to intermediate frames
def main():
"""CLI entrypoint with argparse"""
```
## ✨ Next Steps
1. **Quick test with large interval:**
```bash
# Fast iteration - samples every 5 seconds
python examples/dataset/annotate_pgen.py \
--data-dir /path/to/dataset \
--model Qwen/Qwen2-VL-7B-Instruct \
--sample-interval 5.0 \
--output-dir ./outputs/quick_test
```
2. **Verify output quality:**
```bash
head outputs/quick_test/meta/syn_annotations.jsonl
```
3. **Production run:**
```bash
# Standard 1 second sampling for production
bash examples/dataset/run_pgen.sh
```
4. **Use in training:**
```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset
ds = LeRobotDataset(repo_id="...", root="outputs/pgen_annotations")
# Access high-level task for each frame
frame = ds[100]
task_idx = frame["task_index_high_level"].item()
```
## 📚 Documentation Files
- **`README_PGEN.md`**: Full API reference and troubleshooting
- **`example_pgen_usage.md`**: Practical examples with performance estimates
- **`SAMPLING_DIAGRAM.md`**: Visual explanation of temporal sampling
- **`PGEN_SUMMARY.md`**: This overview document
## 🎯 Success Criteria
✅ Script generates synthetic dialogue using Qwen VLM
✅ Adds `task_index_high_level` feature to dataset
✅ Saves tasks to `tasks_high_level.parquet`
✅ Implements efficient temporal sampling (30-100x speedup)
✅ Handles episode boundaries correctly
✅ Produces diverse interaction types (scenarios + responses)
✅ Maintains temporal coherence within episodes
✅ Includes comprehensive documentation and examples
✅ Ready for production use on real datasets
## 💡 Key Takeaway
**The script processes ALL episodes with intelligent sampling:**
- `--sample-interval` controls how often VLM is called (default: 1.0s)
- ALL frames in ALL episodes get labeled (complete coverage)
- Intermediate frames inherit from most recent sample (temporal coherence)
- Achieves 30-100x speedup while maintaining quality
- Adjust interval based on use case: 5.0s for testing, 1.0s for production, 0.5s for fine detail
This makes the synthetic data generation **practical, scalable, and complete** for real-world datasets!
+243
View File
@@ -0,0 +1,243 @@
# Synthetic Data Generation for Hierarchical Robot Policies
This directory contains `annotate_pgen.py`, a script for generating synthetic user prompts and robot utterances for hierarchical policy training using Vision-Language Models (VLMs).
## Overview
The script implements the synthetic data generation pipeline described in the Hi-Robot paper:
1. **Load** a LeRobot dataset with skill annotations (from `annotate.py`)
2. **Generate** synthetic dialogue using Qwen VLM:
- User prompts (_t): Natural requests that lead to specific skills
- Robot utterances (u_t): Acknowledgments and clarifications
3. **Save** results as a new dataset feature `task_index_high_level`
## Prerequisites
1. First, annotate your dataset with skills using `annotate.py`:
```bash
python examples/dataset/annotate.py \
--repo-id lerobot/svla_so101_pickplace \
--video-key observation.images.base \
--model Qwen/Qwen2-VL-7B-Instruct
```
This creates `meta/skills.json` with skill segmentation for each episode.
## Usage
### Basic Usage
```bash
python examples/dataset/annotate_pgen.py \
--repo-id lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--sample-interval 1.0 \
--output-dir ./outputs/pgen_dataset
```
**Note**: The script processes **all episodes** in the dataset. It generates dialogue every 1 second (`--sample-interval 1.0`) using temporal sampling. Frames between samples reuse the last generated dialogue. This makes the process efficient while ensuring complete dataset coverage.
### Advanced Options
```bash
python examples/dataset/annotate_pgen.py \
--repo-id lerobot/svla_so101_pickplace \
--model Qwen/Qwen3-VL-30B-A3B-Instruct \
--temperature 0.8 \
--sample-interval 0.5 \
--num-image-views-per-sample 2 \
--output-dir ./outputs/pgen_dataset \
--push-to-hub
```
This example uses a more powerful model and samples every 0.5 seconds for finer granularity.
### Fast Testing (larger interval)
```bash
python examples/dataset/annotate_pgen.py \
--repo-id lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--sample-interval 5.0 \
--output-dir ./outputs/pgen_quick_test
```
Use a larger interval (5.0 seconds) for rapid iteration during development. All episodes are still processed.
### Using Local Dataset
```bash
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--output-dir ./outputs/pgen_dataset
```
## Output Files
The script produces several outputs:
1. **`meta/tasks_high_level.parquet`**: High-level tasks with user prompts and robot utterances
- Columns: task_index, user_prompt, robot_utterance, skill, scenario_type, response_type
2. **`meta/syn_annotations.jsonl`**: Debug file with all generated dialogues
- One JSON object per line with full context for each frame
3. **Modified dataset**: New dataset with `task_index_high_level` feature added to all parquet files
## Scenario and Response Types
The generator produces diverse interaction types:
### Scenario Types
- **specific_object**: Direct specification of objects/actions
- **negative_task**: Instructions about what NOT to do
- **situated_correction**: Adjustments based on current state
- **implicit_request**: Implied needs without direct commands
- **constraint_based**: Specific constraints or preferences
### Response Types
- **confirmation**: Simple acknowledgment ("OK, I'll do X")
- **clarification**: Seeking confirmation ("Just to confirm...")
- **acknowledgment**: Action acknowledgment ("Got it, doing X")
- **constraint_acknowledgment**: Acknowledging constraints ("Sure, I'll X while Y")
## Example Generated Data
```json
{
"episode_id": 0,
"frame_index": 45,
"timestamp": 2.5,
"skill_current": "robot arm picks up pink lego brick",
"skill_history": ["robot arm moves towards pink lego brick"],
"task_description": "pink lego brick into the transparent box",
"scenario_type": "specific_object",
"response_type": "confirmation",
"user_prompt": "Can you grab the pink brick?",
"robot_utterance": "Sure, I'll pick up the pink lego brick."
}
```
## Accessing the Data
After running the script, access the synthetic data in your code:
```python
from lerobot.datasets.lerobot_dataset import LeRobotDataset
import pandas as pd
# Load modified dataset
dataset = LeRobotDataset(repo_id="lerobot/svla_so101_pickplace_with_high_level_tasks")
# Access frame with high-level task
frame = dataset[100]
high_level_task_idx = frame["task_index_high_level"].item()
# Load high-level tasks
tasks_df = pd.read_parquet(dataset.root / "meta" / "tasks_high_level.parquet")
task_info = tasks_df.iloc[high_level_task_idx]
print(f"User prompt: {task_info['user_prompt']}")
print(f"Robot utterance: {task_info['robot_utterance']}")
print(f"Skill: {task_info['skill']}")
```
## Architecture
The script is modular and extensible:
```python
# Core components
class QwenPgen:
"""VLM wrapper for generation"""
def call_qwen(images, prompt) -> dict
def construct_prompt(task, history, skill) -> str
"""Build prompt for VLM"""
def annotate_sample(pgen, images, ...) -> dict
"""Generate dialogue for one sample"""
def generate_synthetic_data(dataset, pgen, ...) -> tuple
"""Process entire dataset"""
```
## Parameters
| Parameter | Default | Description |
|-----------|---------|-------------|
| `--repo-id` | - | HuggingFace dataset ID |
| `--data-dir` | - | Local dataset path |
| `--model` | Qwen/Qwen2-VL-7B-Instruct | VLM model name |
| `--device` | cuda | Device (cuda/cpu) |
| `--dtype` | bfloat16 | Model precision |
| `--temperature` | 0.7 | Sampling temperature |
| `--sample-interval` | 1.0 | Generate dialogue every N seconds (all episodes processed) |
| `--num-image-views-per-sample` | 1 | Number of cameras |
| `--output-dir` | None | Output directory |
| `--push-to-hub` | False | Push to HuggingFace Hub |
## Sampling Strategy
The script uses **temporal sampling** to efficiently generate dialogue:
- **Default**: Generate dialogue every 1 second (`--sample-interval 1.0`)
- **Efficiency**: If a dataset runs at 30fps, this samples ~3% of frames
- **Propagation**: Frames between samples reuse the last generated task_index
- **Episode-aware**: Always samples the first frame of each episode
### Example with 30 fps dataset:
```bash
# Sample every 1 second (every 30 frames)
--sample-interval 1.0 # ~3,000 generations for a 100 episode dataset (3 sec/episode)
# Sample every 0.5 seconds (every 15 frames)
--sample-interval 0.5 # ~6,000 generations (more granular)
# Sample every 2 seconds (every 60 frames)
--sample-interval 2.0 # ~1,500 generations (more efficient)
```
### Why sampling works:
- Skills typically last 1-3 seconds
- Dialogue doesn't need to change every frame
- Reduces computational cost by 30-100x
- Still provides good coverage for training
## Tips
1. **Quick testing**: Use larger `--sample-interval` (e.g., 5.0 or 10.0) for rapid iteration
2. **Monitor GPU**: VLM inference is memory-intensive
3. **Check outputs**: Review `syn_annotations.jsonl` for quality
4. **Adjust temperature**: Higher = more diverse, lower = more consistent
5. **Multiple views**: Use `--num-image-views-per-sample 2+` for better context
6. **Tune sampling**: Start with 1.0s, increase for speed (testing), decrease for granularity (production)
## Troubleshooting
### No skills.json found
Run `annotate.py` first to generate skill annotations.
### Out of memory
- Reduce batch size to 1
- Use smaller model (Qwen2-VL-7B instead of Qwen3-VL-30B)
- Process fewer samples at a time
### Poor quality generations
- Adjust temperature (try 0.6-0.9)
- Check that skills.json has good annotations
- Ensure images are loading correctly
## Citation
Based on the Hi-Robot paper's synthetic data generation approach:
```
@article{hirobot2024,
title={Hi-Robot: Hierarchical Robot Learning with Vision-Language Models},
year={2024}
}
```
+141
View File
@@ -0,0 +1,141 @@
# Temporal Sampling Strategy Visualization
## How `--sample-interval` Works
### Example: 30 fps dataset, `--sample-interval 1.0` (1 second)
```
Timeline (seconds): 0.0 0.5 1.0 1.5 2.0 2.5 3.0
│ │ │ │ │ │ │
Frames: 0───15───30───45───60───75───90───105──120──135──150
│ │ │ │ │ │ │
▼ ▼ ▼ ▼
Sampled: YES NO YES NO YES NO YES
│ │ │ │
Task Index: [0]──────────────>[1]──────────────>[2]──────────────>[3]
│ │ │ │
VLM Called: ✓ Gen ✓ Gen ✓ Gen ✓ Gen
dialogue dialogue dialogue dialogue
│ │ │ │
Frames 0-29 ─────┘ │ │ │
get task 0 │ │ │
│ │ │
Frames 30-59 ────────────────────────┘ │ │
get task 1 │ │
│ │
Frames 60-89 ──────────────────────────────────────────┘ │
get task 2 │
Frames 90-119 ────────────────────────────────────────────────────────────┘
get task 3
```
## Comparison: Different Sampling Intervals
### `--sample-interval 2.0` (every 2 seconds)
```
Timeline: 0.0 1.0 2.0 3.0 4.0 5.0 6.0
│ │ │ │ │ │ │
Sampled: YES NO YES NO YES NO YES
│ │ │ │
Tasks: [0]───────────────>[1]───────────────>[2]───────────────>[3]
VLM Calls: 4 (fewer calls, faster but less granular)
```
### `--sample-interval 1.0` (every 1 second) - **DEFAULT**
```
Timeline: 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.5 5.0 5.5 6.0
│ │ │ │ │ │ │ │ │ │ │ │ │
Sampled: YES NO YES NO YES NO YES NO YES NO YES NO YES
│ │ │ │ │ │ │
Tasks: [0]─────────>[1]─────────>[2]─────────>[3]─────────>[4]─────────>[5]─────>[6]
VLM Calls: 7 (balanced coverage and speed)
```
### `--sample-interval 0.5` (every 0.5 seconds)
```
Timeline: 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.5 5.0 5.5 6.0
│ │ │ │ │ │ │ │ │ │ │ │ │
Sampled: YES YES YES YES YES YES YES YES YES YES YES YES YES
│ │ │ │ │ │ │ │ │ │ │ │ │
Tasks: [0]─>[1]─>[2]─>[3]─>[4]─>[5]─>[6]─>[7]─>[8]─>[9]─>[10]>[11]>[12]
VLM Calls: 13 (high granularity, slower but more detailed)
```
## Episode Boundaries
The script always samples the **first frame** of each episode:
```
Episode 0 Episode 1 Episode 2
├─────────────────────────────────┤├─────────────────────────────────┤├──────...
│ ││ ││
Frame: 0 30 60 90 120 130 160 190 220 250 260 290 320
Time: 0.0 1.0 2.0 3.0 4.0 0.0 1.0 2.0 3.0 4.0 0.0 1.0 2.0
│ │ │ │ │ │ │ │ │ │ │ │ │
▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼
Sample:YES YES YES YES YES YES YES YES YES YES YES YES YES
│ │ │ │ │ │ │ │ │ │ │ │ │
Task: 0────1─────2─────3────4 5─────6─────7─────8────9 10────11───12
Note: Frames 0, 130, 260 are ALWAYS sampled (episode starts)
Even if they're within the sample-interval window
```
## Real-World Example: svla_so101_pickplace Dataset
Typical stats:
- **Total episodes**: 50
- **Avg episode length**: 300 frames (10 seconds at 30 fps)
- **Total frames**: 15,000
### Without Sampling (every frame)
```
Frames processed: 15,000
VLM calls: 15,000
Time estimate: ~5 hours
Unique tasks: ~12,000 (lots of duplicates)
```
### With `--sample-interval 1.0` (every 1 second)
```
Frames processed: 15,000 ✓
VLM calls: 500
Time estimate: ~10 minutes
Unique tasks: ~450 (meaningful variety)
Efficiency gain: 30x faster
```
### With `--sample-interval 2.0` (every 2 seconds)
```
Frames processed: 15,000 ✓
VLM calls: 250
Time estimate: ~5 minutes
Unique tasks: ~220
Efficiency gain: 60x faster
```
## Key Points
1. **All frames get labeled**: Every frame gets a `task_index_high_level`
2. **Only sampled frames call VLM**: Huge efficiency gain
3. **Temporal coherence**: Nearby frames share the same task
4. **Episode-aware**: Always samples episode starts
5. **Configurable**: Adjust `--sample-interval` based on your needs
## Choosing Your Sampling Interval
| Use Case | Recommended Interval | Why |
|----------|---------------------|-----|
| Quick testing | 2.0s | Fastest iteration |
| Standard training | 1.0s | Good balance |
| High-quality dataset | 0.5s | Better coverage |
| Fine-grained control | 0.33s | Very detailed |
| Dense annotations | 0.1s | Nearly every frame |
**Rule of thumb**: Match your sampling interval to your typical skill duration.
If skills last 1-3 seconds, sampling every 1 second captures each skill multiple times.
@@ -0,0 +1,138 @@
#!/usr/bin/env python
"""
Example demonstrating how to use the ActionTokenizerProcessorStep to tokenize actions.
This example shows how to:
1. Load a dataset with action data
2. Apply the action tokenizer processor to tokenize actions with proper padding/truncation
3. Access both the tokenized actions and the attention mask
4. Decode tokenized actions back to their original form
"""
import torch
from transformers import AutoProcessor
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.processor.core import EnvTransition, TransitionKey
from lerobot.processor.tokenizer_processor import ActionTokenizerProcessorStep
from lerobot.utils.constants import ACTION_TOKEN_MASK
# Define delta timestamps for the dataset
delta_timestamps = {
'action': [
0.0, 0.03333333333333333, 0.06666666666666667, 0.1, 0.13333333333333333,
0.16666666666666666, 0.2, 0.23333333333333334, 0.26666666666666666, 0.3,
0.3333333333333333, 0.36666666666666664, 0.4, 0.43333333333333335,
0.4666666666666667, 0.5, 0.5333333333333333, 0.5666666666666667, 0.6,
0.6333333333333333, 0.6666666666666666, 0.7, 0.7333333333333333,
0.7666666666666667, 0.8, 0.8333333333333334, 0.8666666666666667, 0.9,
0.9333333333333333, 0.9666666666666667, 1.0, 1.0333333333333334,
1.0666666666666667, 1.1, 1.1333333333333333, 1.1666666666666667, 1.2,
1.2333333333333334, 1.2666666666666666, 1.3, 1.3333333333333333,
1.3666666666666667, 1.4, 1.4333333333333333, 1.4666666666666666, 1.5,
1.5333333333333334, 1.5666666666666667, 1.6, 1.6333333333333333
]
}
# Load the dataset
print("Loading dataset...")
dataset = LeRobotDataset(
repo_id="local",
root="/fsx/jade_choghari/outputs/pgen_annotations1",
delta_timestamps=delta_timestamps
)
# Create a dataloader
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=0,
batch_size=4,
shuffle=True,
)
# Get a batch of data
batch = next(iter(dataloader))
action_data = batch["action"] # Shape: (batch_size, action_horizon, action_dim)
print(f"\nOriginal action shape: {action_data.shape}")
print(f"Original action data (first sample, first timestep):\n{action_data[0, 0]}")
# Method 1: Using the tokenizer directly (as in fast_tokenize.py)
print("\n" + "="*80)
print("Method 1: Direct tokenizer usage")
print("="*80)
tokenizer = AutoProcessor.from_pretrained("physical-intelligence/fast", trust_remote_code=True)
# Tokenize directly
tokens = tokenizer(action_data)
print(f"\nDirect tokenization result type: {type(tokens)}")
print(f"Tokens shape/length: {tokens.shape if isinstance(tokens, torch.Tensor) else len(tokens)}")
# Decode
decoded_actions = tokenizer.decode(tokens)
print(f"Decoded actions shape: {decoded_actions.shape}")
reconstruction_error = torch.abs(action_data - decoded_actions).mean()
print(f"Mean absolute reconstruction error: {reconstruction_error.item():.6f}")
# Method 2: Using the ActionTokenizerProcessorStep with proper padding/truncation
print("\n" + "="*80)
print("Method 2: Using ActionTokenizerProcessorStep (with padding & mask)")
print("="*80)
# Create the action tokenizer processor step
action_tokenizer_processor = ActionTokenizerProcessorStep(
tokenizer_name="physical-intelligence/fast",
trust_remote_code=True,
max_action_tokens=32, # Maximum number of tokens per action
)
# Create a transition with the action data
transition = {
TransitionKey.ACTION: action_data,
TransitionKey.OBSERVATION: {}, # Empty for this example
}
# Apply the processor
processed_transition = action_tokenizer_processor(transition)
# Extract tokenized actions and mask
tokenized_actions = processed_transition[TransitionKey.ACTION]
complementary_data = processed_transition[TransitionKey.COMPLEMENTARY_DATA]
action_mask = complementary_data[ACTION_TOKEN_MASK]
print(f"\nTokenized actions shape: {tokenized_actions.shape}") # (batch_size, max_action_tokens)
print(f"Action mask shape: {action_mask.shape}") # (batch_size, max_action_tokens)
print(f"Tokenized actions dtype: {tokenized_actions.dtype}")
print(f"Action mask dtype: {action_mask.dtype}")
# Show token statistics
print(f"\nFirst sample tokens: {tokenized_actions[0]}")
print(f"First sample mask: {action_mask[0]}")
num_real_tokens = action_mask[0].sum().item()
print(f"Number of real tokens (non-padding): {num_real_tokens}")
print(f"Number of padding tokens: {action_mask.shape[1] - num_real_tokens}")
# Decode using the mask
print("\nDecoding tokenized actions...")
decoded_with_processor = tokenizer.decode(tokenized_actions)
print(f"Decoded actions shape: {decoded_with_processor.shape}")
# Calculate reconstruction error
reconstruction_error_processor = torch.abs(action_data - decoded_with_processor).mean()
print(f"Mean absolute reconstruction error: {reconstruction_error_processor.item():.6f}")
# Show that masking works correctly
print("\n" + "="*80)
print("Mask demonstration")
print("="*80)
for i in range(min(4, tokenized_actions.shape[0])):
mask_i = action_mask[i]
num_real = mask_i.sum().item()
print(f"Sample {i}: {num_real} real tokens, {len(mask_i) - num_real} padding tokens")
print("\n" + "="*80)
print("Action tokenization example completed successfully!")
print("="*80)
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
+143
View File
@@ -0,0 +1,143 @@
# Example: Synthetic Data Generation with Sampling
## Quick Start
### 1. Test with 100 frames and 1 second sampling
```bash
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--num-samples 100 \
--sample-interval 1.0 \
--output-dir ./outputs/test_pgen
```
**Expected behavior** (assuming 30 fps):
- Total frames: 100
- Frames sampled: ~4 (every 30 frames = 1 second)
- Efficiency: 96% fewer VLM calls
- Output: All 100 frames get `task_index_high_level`, but only 4 unique dialogues generated
### 2. Process full dataset with different sampling rates
#### Conservative (every 2 seconds)
```bash
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--sample-interval 2.0 \
--output-dir ./outputs/pgen_2s
```
#### Standard (every 1 second) - **RECOMMENDED**
```bash
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--sample-interval 1.0 \
--output-dir ./outputs/pgen_1s
```
#### Fine-grained (every 0.5 seconds)
```bash
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen2-VL-7B-Instruct \
--sample-interval 0.5 \
--output-dir ./outputs/pgen_0.5s
```
## Performance Estimates
For a dataset with:
- 100 episodes
- 10 seconds per episode (average)
- 30 fps
- Total frames: 30,000
| Sampling Interval | Frames Sampled | % Sampled | Speedup | Time Estimate |
|-------------------|----------------|-----------|---------|---------------|
| Every frame (0.033s) | 30,000 | 100% | 1x | ~10 hours |
| 0.5 seconds | 2,000 | 6.7% | 15x | ~40 min |
| **1.0 seconds** | **1,000** | **3.3%** | **30x** | **~20 min** |
| 2.0 seconds | 500 | 1.7% | 60x | ~10 min |
*Note: Times are approximate and depend on GPU, model size, and generation speed*
## Understanding the Output
### Console Output Example
```
[cyan]Generating synthetic data for 30000 frames...[/cyan]
[cyan]Sampling interval: 1.0s (fps: 30)[/cyan]
Generating synthetic dialogue: 100%|████████| 30000/30000 [20:15<00:00, 24.68it/s]
[green]✓ Sampled 1000 frames out of 30000 (3.3%)[/green]
[green]✓ Generated 450 unique high-level tasks[/green]
```
### What happens:
1. **Frame 0 (t=0.0s)**: Generate dialogue → Task index 0
2. **Frames 1-29 (t=0.033s-0.967s)**: Reuse task index 0
3. **Frame 30 (t=1.0s)**: Generate new dialogue → Task index 1
4. **Frames 31-59 (t=1.033s-1.967s)**: Reuse task index 1
5. And so on...
### Result:
- Every frame has a `task_index_high_level`
- Only sampled frames have unique dialogues generated
- Intermediate frames inherit from the most recent sample
- Maintains temporal coherence within episodes
## Checking Your Results
After running, verify the output:
```bash
# Check the generated tasks
python -c "
import pandas as pd
from pathlib import Path
tasks = pd.read_parquet('outputs/test_pgen/meta/tasks_high_level.parquet')
print(f'Total unique tasks: {len(tasks)}')
print(f'Sample tasks:')
print(tasks[['user_prompt', 'robot_utterance', 'skill']].head())
"
# Check debug output
head outputs/test_pgen/meta/syn_annotations.jsonl
# Load and verify dataset
python -c "
from lerobot.datasets.lerobot_dataset import LeRobotDataset
ds = LeRobotDataset(repo_id='local_with_high_level_tasks',
root='outputs/test_pgen')
print(f'Dataset has {len(ds)} frames')
print(f'Features: {list(ds.features.keys())}')
assert 'task_index_high_level' in ds.features
print('✓ task_index_high_level feature added successfully!')
"
```
## Common Use Cases
### Development/Testing
```bash
--sample-interval 2.0 # Fast iteration
--num-samples 500 # Small subset
```
### Production Training
```bash
--sample-interval 1.0 # Good coverage
# Process all samples (no --num-samples)
```
### High-Quality Dataset
```bash
--sample-interval 0.5 # Fine-grained
--temperature 0.6 # More consistent
--model Qwen/Qwen3-VL-30B-A3B-Instruct # Larger model
```
+25
View File
@@ -0,0 +1,25 @@
import numpy as np
from transformers import AutoProcessor
import torch
from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
delta_timestamps = {'action': [0.0, 0.03333333333333333, 0.06666666666666667, 0.1, 0.13333333333333333, 0.16666666666666666, 0.2, 0.23333333333333334, 0.26666666666666666, 0.3, 0.3333333333333333, 0.36666666666666664, 0.4, 0.43333333333333335, 0.4666666666666667, 0.5, 0.5333333333333333, 0.5666666666666667, 0.6, 0.6333333333333333, 0.6666666666666666, 0.7, 0.7333333333333333, 0.7666666666666667, 0.8, 0.8333333333333334, 0.8666666666666667, 0.9, 0.9333333333333333, 0.9666666666666667, 1.0, 1.0333333333333334, 1.0666666666666667, 1.1, 1.1333333333333333, 1.1666666666666667, 1.2, 1.2333333333333334, 1.2666666666666666, 1.3, 1.3333333333333333, 1.3666666666666667, 1.4, 1.4333333333333333, 1.4666666666666666, 1.5, 1.5333333333333334, 1.5666666666666667, 1.6, 1.6333333333333333]}
dataset = LeRobotDataset(repo_id="local", root="/fsx/jade_choghari/outputs/pgen_annotations1", delta_timestamps=delta_timestamps)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=0,
batch_size=4,
shuffle=True,
)
batch = next(iter(dataloader))
# Load the tokenizer from the Hugging Face hub
tokenizer = AutoProcessor.from_pretrained("physical-intelligence/fast", trust_remote_code=True)
# Tokenize & decode action chunks (we use dummy data here)
action_data = batch["action"] # one batch of action chunks
tokens = tokenizer(action_data) # tokens = list[int]
decoded_actions = tokenizer.decode(tokens)
print("tokenized actions: ", tokens)
+17
View File
@@ -0,0 +1,17 @@
from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
processor = AutoProcessor.from_pretrained(model_id)
breakpoint()
prefix_output = model.language_model.forward(
inputs_embeds=inputs_embeds[0],
attention_mask=attention_mask,
position_ids=position_ids,
adarms_cond=adarms_cond[0] if adarms_cond is not None else None,
)
prefix_past_key_values = prefix_output.past_key_values
# prefix_output to be used for the language head
# shape: [batch_size, seq_len, hidden_size] with hidden_size = 2048
prefix_output = prefix_output.last_hidden_state
+91
View File
@@ -0,0 +1,91 @@
import torch
from huggingface_hub import HfApi
import lerobot
from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
# import make_pre_post_processors
from lerobot.policies.factory import make_pre_post_processors
from lerobot.policies.pi05.configuration_pi05 import PI05Config
from lerobot.policies.factory import make_policy, make_policy_config
from lerobot.configs.policies import PreTrainedConfig
cfg = PreTrainedConfig.from_pretrained(
pretrained_name_or_path="/fsx/jade_choghari/outputs/pi0_training/checkpoints/last/pretrained_model",
)
cfg.dtype = "bfloat16"
pre_processor, post_processor = make_pre_post_processors(
policy_cfg=cfg,
pretrained_path="/fsx/jade_choghari/outputs/pi0_training/checkpoints/last/pretrained_model",
)
delta_timestamps = {'action': [0.0, 0.03333333333333333, 0.06666666666666667, 0.1, 0.13333333333333333, 0.16666666666666666, 0.2, 0.23333333333333334, 0.26666666666666666, 0.3, 0.3333333333333333, 0.36666666666666664, 0.4, 0.43333333333333335, 0.4666666666666667, 0.5, 0.5333333333333333, 0.5666666666666667, 0.6, 0.6333333333333333, 0.6666666666666666, 0.7, 0.7333333333333333, 0.7666666666666667, 0.8, 0.8333333333333334, 0.8666666666666667, 0.9, 0.9333333333333333, 0.9666666666666667, 1.0, 1.0333333333333334, 1.0666666666666667, 1.1, 1.1333333333333333, 1.1666666666666667, 1.2, 1.2333333333333334, 1.2666666666666666, 1.3, 1.3333333333333333, 1.3666666666666667, 1.4, 1.4333333333333333, 1.4666666666666666, 1.5, 1.5333333333333334, 1.5666666666666667, 1.6, 1.6333333333333333]}
dataset = LeRobotDataset(repo_id="local", root="/fsx/jade_choghari/outputs/pgen_annotations1", delta_timestamps=delta_timestamps)
# rename map --rename_map='{
# "observation.images.side": "observation.images.base_0_rgb",
# "observation.images.up": "observation.images.left_wrist_0_rgb"
# }'
rename_map = {
"observation.images.side": "observation.images.base_0_rgb",
"observation.images.up": "observation.images.left_wrist_0_rgb"
}
policy = make_policy(
cfg=cfg,
ds_meta=dataset.meta,
rename_map=rename_map,
)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=0,
batch_size=4,
shuffle=True,
)
batch = next(iter(dataloader))
batch = pre_processor(batch)
policy.train()
# run inference
# action = policy.select_action(batch)
loss, loss_dict = policy.forward(batch)
breakpoint()
# import requests
# from PIL import Image
# from transformers import AutoProcessor
# model = policy.model.paligemma_with_expert.paligemma
# model = model.to(device="cuda", dtype=torch.bfloat16)
# model.eval()
# prompt = "Describe this image."
# url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
# image = Image.open(requests.get(url, stream=True).raw)
# processor = AutoProcessor.from_pretrained(
# "google/paligemma-3b-pt-224",
# )
# inputs = processor(image, prompt, return_tensors="pt").to(model.device)
# print("generating...")
# output = model.generate(
# **inputs,
# max_new_tokens=50,
# use_cache=True, # default dynamic cache
# )
# print(processor.decode(output[0], skip_special_tokens=True))
# # other model
# from transformers import PaliGemmaForConditionalGeneration
# model = PaliGemmaForConditionalGeneration.from_pretrained(
# "google/paligemma2-3b-pt-224",
# torch_dtype=torch.bfloat16,
# device_map="auto",
# )
# model.eval()
# print("generating...")
# output = model.generate(
# **inputs,
# max_new_tokens=100,
# use_cache=True, # default dynamic cache
# )
# print("Model 2 output:")
# print(processor.decode(output[0], skip_special_tokens=True))
+23
View File
@@ -0,0 +1,23 @@
import torch
from huggingface_hub import HfApi
import lerobot
from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
dataset = LeRobotDataset(repo_id="local", root="/fsx/jade_choghari/outputs/pgen_annotations1")
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=0,
batch_size=32,
shuffle=True,
)
batch = next(iter(dataloader))
print(batch.keys())
print(batch['task_index_high_level'].shape)
print(batch['task_index_high_level'])
print(batch['user_prompt'][0])
print(batch['robot_utterance'][0])
print(batch['task'][0])
breakpoint()
+18
View File
@@ -0,0 +1,18 @@
import torch
from huggingface_hub import HfApi
import lerobot
from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
dataset = LeRobotDataset(repo_id="lerobot/libero")
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=0,
batch_size=4,
shuffle=True,
)
batch = next(iter(dataloader))
print(batch.keys())
breakpoint()
+159
View File
@@ -0,0 +1,159 @@
## One-sentence answer
> `make_att_2d_masks(prefix_pad_masks, prefix_att_masks)` builds the **actual 2D attention mask** `[B, L, L]` that tells the transformer **which token positions may attend to which others**, combining **padding** and **causality**.
Everything else youve seen so far was just metadata.
---
## What goes in
### Inputs
```python
prefix_pad_masks # shape [B, L]
prefix_att_masks # shape [B, L]
```
Where:
* `prefix_pad_masks[b, i] = True`
→ token `i` exists (not padding)
* `prefix_att_masks[b, i] = False`
→ token `i` is **bidirectional**
* `prefix_att_masks[b, i] = True`
→ token `i` is **causal (autoregressive)**
---
## What comes out
```python
att_2d_prefix # shape [B, L, L]
```
Each entry:
```text
att_2d_prefix[b, i, j] = True
```
means:
> “In batch `b`, **token i (query)** is allowed to attend to **token j (key)**.”
---
## How it is constructed (conceptually)
For **each batch b**, **each query position i**, **each key position j**:
```python
if not prefix_pad_masks[b, j]:
att[b, i, j] = False # cannot attend to padding
else if not prefix_att_masks[b, i]:
att[b, i, j] = True # bidirectional token → can see all real tokens
else:
att[b, i, j] = (j <= i) # causal token → can see only past + itself
```
Thats it.
---
## Tiny concrete example (exactly matching your code)
Suppose:
```python
prefix_pad_masks[0] = [T, T, T, T, T, F]
prefix_att_masks[0] = [F, F, F, T, T, T]
```
Tokens:
```
0: IMG
1: IMG
2: LANG
3: SUB0
4: SUB1
5: PAD
```
---
### Resulting `att_2d_prefix[0]`
`✓ = True, ✗ = False`
| Q \ K | 0 | 1 | 2 | 3 | 4 | 5 |
| ---------- | - | - | - | - | - | - |
| 0 (bi) | ✓ | ✓ | ✓ | ✓ | ✓ | ✗ |
| 1 (bi) | ✓ | ✓ | ✓ | ✓ | ✓ | ✗ |
| 2 (bi) | ✓ | ✓ | ✓ | ✓ | ✓ | ✗ |
| 3 (causal) | ✓ | ✓ | ✓ | ✓ | ✗ | ✗ |
| 4 (causal) | ✓ | ✓ | ✓ | ✓ | ✓ | ✗ |
| 5 (pad) | ✗ | ✗ | ✗ | ✗ | ✗ | ✗ |
---
## Why this matters for your training code
This line:
```python
att_2d_prefix_4d = self._prepare_attention_masks_4d(att_2d_prefix)
```
Converts `[B, L, L] → [B, 1, L, L]` and possibly flips True/False to `0/-inf`.
This is **exactly what Paligemma uses inside self-attention**.
---
## Key implications (VERY important)
### 1️⃣ This mask does **not isolate token groups**
* Bidirectional tokens can attend to **everything**
* Causal tokens only restrict *their own row*
So **flow/action tokens must be blocked separately**.
---
### 2️⃣ This is why your AR subtask prediction works
* Subtask tokens are causal
* Output at position `i` predicts token `i+1`
* Padding is fully ignored
---
### 3️⃣ Inference behavior
When `subtask_tokens = None`:
* `prefix_att_masks` contains only `False`
* `att_2d_prefix` becomes **fully bidirectional**
* No AR behavior remains
Exactly what you want.
---
## One-sentence takeaway (commit this)
> `make_att_2d_masks` fuses **padding** and **causality** into a concrete `[B, L, L]` attention matrix that the transformer actually uses.
If you want next, I can:
* inspect `make_att_2d_masks()` source with you
* show how to block **flow → subtask** attention
* explain how this changes when suffix tokens are added
* help you refactor this into a cleaner “grouped attention” API
Youre now at the point where the models behavior should feel *predictable*, not magical.
+334
View File
@@ -0,0 +1,334 @@
Generate annotate_pgen.py using Qwen for synthetic data generation
You are writing a Python script called annotate_pgen.py.
This script generates synthetic user prompts (_t) and robot utterances (u_t) for Hi Robotstyle hierarchical policy training, using Qwen 3vl as the generator model (pgen).
SCRIPT PURPOSE
The script must:
Load Dlabeled which is a LeRobot Dataset that has been annotate using the annotate.py script, which contains:
images: list of image paths at time t
skill_current: the annotated skill label (ℓ̂_t)
skill_history: list of previous skill labels (ℓ̂₀ … ℓ̂_{t1}), those where annotated, and you can find details on them stored in teh dataset inside the the DATA_PATH/meta/skills.json
you will find something like
{
"coarse_description": "pink lego brick into the transparent box",
"skill_to_task_index": {
"robot arm picks up pink lego brick": 19,
"robot arm approaches transparent box": 3,
"robot arm retracts from transparent box": 28,
"robot arm moves towards pink lego brick": 12,
"robot arm releases red lego brick into box": 26,
"robot arm releases red lego brick into transparent box": 27,
"robot arm closes gripper to pick up the pink lego brick": 5,
"robot arm lifts the pink lego brick": 7,
etc..
},
"episodes": {
"0": {
"episode_index": 0,
"description": "pink lego brick into the transparent box",
"skills": [
{
"name": "robot arm moves towards pink lego brick",
"start": 0.0,
"end": 1.8
},
{
"name": "robot arm picks up pink lego brick",
"start": 1.8,
"end": 3.1
},
{
"name": "robot arm moves towards transparent box",
"start": 3.1,
"end": 5.5
},
{
"name": "robot arm releases pink lego brick into transparent box",
"start": 5.5,
"end": 7.0
},
{
"name": "robot arm retracts from transparent box",
"start": 7.0,
"end": 10.1
}
]
},
"1": {
"episode_index": 1,
"description": "pink lego brick into the transparent box",
"skills": [
{
"name": "robot arm moves towards red lego brick",
"start": 0.0,
"end": 1.2
},
{
"name": "robot arm picks up red lego brick",
"start": 1.2,
"end": 2.0
},
{
"name": "robot arm moves towards transparent box",
"start": 2.0,
"end": 3.8
},
{
"name": "robot arm places red lego brick into transparent box",
"start": 3.8,
"end": 5.0
},
{
"name": "robot arm moves away from transparent box",
"start": 5.0,
"end": 8.9
}
]
},
notice how task_description: is a high-level description (e.g., "make a sandwich") stored in description for each episode
For each sample, call Qwen VLM to generate:
synthetic user prompt _t
synthetic robot response u_t
Save results to D_syn in Parquet format insdie DATA_PATH/meta/tasks.parquet ; note tasks.parquet already contains the other tasks, so you need to update
Should be modular, clean, easy to extend, with:
a PGEN_PROMPT_TEMPLATE
a construct_prompt() method
a call_qwen() method
a annotate_sample() method
a CLI entrypoint (if __name__ == "__main__":)
📦 INPUT FORMAT (Dlabeled)
The script should expect Dlabeled as a .jsonl file where each line has:
{
"episode_id": "ep_001",
"t": 37,
"images": ["path/to/cam0_t.jpg", "path/to/cam1_t.jpg"],
"skill_current": "pick up the KitKat",
"skill_history": ["open fridge", "pick up lettuce", "place lettuce"],
"task_description": "making a sandwich"
}
📤 OUTPUT FORMAT (D_syn)
Each line of synthetically generated data should be:
{
"episode_id": "ep_001",
"t": 37,
"images": ["path/to/cam0_t.jpg", "path/to/cam1_t.jpg"],
"skill_current": "pick up the KitKat",
"skill_history": [...],
"user_prompt": "Can you grab me something sweet?",
"robot_utterance": "Sure, I can pick up the KitKat.",
"task_description": "making a sandwich"
}
Store as syn_annotations.jsonl. for debugging
🧠 pgen MODEL (Qwen) REQUIREMENTS
Use HuggingFace Transformers:
Qwen/Qwen2-VL-7B-Instruct (or any Qwen2-VL Vision-Language model available)
Use the image + text chat interface
Vision inputs should be loaded with PIL
Use a single forward pass that outputs BOTH _t and u_t in a structured JSON
📝 PROMPT FORMAT FOR pgen
Create a template like:
You are a robot-assistant dialogue generator for hierarchical robot policies.
You will receive:
- A list of images showing the current robot scene.
- The high-level task: {task_description}
- Previous skill steps completed: {skill_history}
- The next skill to be performed by the robot: {skill_current}
Generate two things in JSON:
1. "user_prompt": a natural-sounding user request that logically leads to the robot performing the skill "{skill_current}" given the task and history.
2. "robot_utterance": a natural robot reply acknowledging or clarifying the request.
The responses must be grounded in the visual scene, the task, and the skill history.
Respond ONLY in JSON:
{
"user_prompt": "...",
"robot_utterance": "..."
}
This resposne will have a corresponsing task_index, and the task will be saved in task.parqeut and you must update each dataset parquet in for example /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace/data/chunk-000/
file-000.parquet to include this new feature called task_index_high_level consider udpatign the metadata in info.json as well
📌 LOGIC REQUIRED
construct_prompt(sample)
Loads sample dict
Inserts:
task_description
skill_history
skill_current
Returns a full text prompt string
call_qwen(images, prompt)
Loads images into Qwen-VL multimodal input format
Calls model.generate
Parses JSON output
annotate_sample(sample)
Builds prompt
Calls Qwen
Returns augmented sample with user_prompt + robot_utterance
🚀 CLI Usage
The script should run as:
python annotate_pgen.py \
--output-dir PATH \
--model Qwen/Qwen2-VL-7B-Instruct \
--repo-id lerobot/svla_so101_pickplace \
--model Qwen/Qwen3-VL-30B-A3B-Instruct \
--batch-size 1
Include arguments via argparse.
🔧 OTHER REQUIREMENTS
Use tqdm for progress bars
Log errors gracefully and continue
Support GPU acceleration (device="cuda")
Cache model loading so it's not reloaded every call
Make the prompt deterministic but allow temperature parameter
Add a flag --num-image-views-per-sample
Add automatic JSON parsing with helpful error messages
🎯 FINAL DELIVERABLE
Cursor must now generate:
A full Python file named annotate_pgen.py implementing the above functionality end-to-end.
It should be production-ready, runnable on real data, cleanly structured, and easy to modify.
from the paper:
Next, we use a large vision-language model (VLM) pgen
to produce synthetic user prompts and interjections t,
and corresponding robot utterance ut. Given Dlabeled, we
prompt pgen with both the visual context I1
t ,...,In
t and the
skill labelˆ
t (e.g., pick up the lettuce). pgen then imag-
ines an appropriate interaction that might have led toˆ
t in a
real user interaction: it generates possible user prompts t
(e.g., “Can you add some lettuce for me?”) along with the
robots verbal responses and clarifications ut. We detail the
A. Synthetic Data Generation
A.1. Scenario and Response Categorization
To ensure the quality and diversity of the synthetic data,
we incorporate structured scenario classification and re-
sponse categorization into the prompt design for pgen, fol-
lowing (Stephan et al., 2024). Specifically, we classify
interactions into different scenario types, such as nega-
tive task (where the user instructs the robot what not to
do), situated correction (where the user adjusts an earlier
command based on the evolving task state), and specific
constraint (where the user specifies particular constraints,
such as dietary preferences). In addition, we categorize
the robots responses into types such as simple confirma-
tions, clarifications, and error handling. These classifica-
tions guide the generation process to ensure a broad range
of user-robot interactions.
A.2. Prompt Construction for Contextual Grounding
In prompt P, we include a detailed description of the task
(e.g., bussing a table, making a sandwich, grocery shop-
ping) and instruct the model to ground responses in visual
observations and prior context. A key advantage of lever-
aging large pretrained VLMs is their ability to incorporate
world knowledge when generating interactions. For in-
stance, the model can infer dietary constraints when gener-
ating prompts for sandwich-making, producing user com-
mands such as “Can you make a sandwich for me? Im
lactose intolerant” and an appropriate robot response like
“Sure, I wont put cheese on it.” Similarly, it can reason
over ambiguous or implicit requests, such as inferring that
“I want something sweet” in a grocery shopping scenario
should lead to suggestions like chocolate or candy.
To maintain consistency in multi-step tasks, we condition
pgen on prior skill labels within an episodeˆ
ˆ
0,...,
t1,
allowing it to generate coherent user commands that
account for past actions. For instance, if the robot
has already placed lettuce and tomato on a sandwich,
the generated user prompt might request additional in-
gredients that logically follow. This ensures that the
synthetic interactions reflect realistic task progression
rather than isolated commands. As such, we leverage
ˆ
ˆ
ˆ
pgen(t,ut|I1
t ,...,In
t ,
0,...,
t1,
t,P) to produce a richer,
more diverse synthetic dataset Dsyn that provides mean-
ingful supervision for training our high-level policy.
While in this work we generate a separate Dsyn and train
a separate high-level policy for each task (e.g., sandwich
making vs. table cleaning) for clarity and ease of bench-
marking, the architecture is readily amenable to a unified
multi-task formulation. In principle, the same hierarchical
approach could be used to train a single high-level policy
across a multitude of tasks, facilitating knowledge transfer
The result should be a new LeRobotDataset with a new feature called task_index_high_level inside each dataset parquet
+11
View File
@@ -0,0 +1,11 @@
python examples/dataset/annotate.py \
--repo-id jadechoghari/collect-data \
--video-key observation.images.base \
--model Qwen/Qwen3-VL-30B-A3B-Instruct \
--episodes 16 22
# python examples/dataset/annotate.py \
# --repo-id lerobot/svla_so101_pickplace \
# --video-key observation.images.side \
# --model Qwen/Qwen3-VL-30B-A3B-Instruct \
# --episodes 5
+43
View File
@@ -0,0 +1,43 @@
#!/bin/bash
# Example script to run synthetic data generation with Qwen VLM
# This generates user prompts and robot utterances for hierarchical policy training
# Configuration
REPO_ID="jadechoghari/collect-data"
MODEL="Qwen/Qwen3-VL-30B-A3B-Instruct"
# Alternative: MODEL="Qwen/Qwen2-VL-7B-Instruct"
OUTPUT_DIR="/fsx/jade_choghari/outputs/collect-data-pgen"
BATCH_SIZE=32
TEMPERATURE=0.9
SAMPLE_INTERVAL=5.0 # Generate dialogue every 1 second (all episodes processed)
# Run synthetic data generation (processes ALL episodes)
python examples/dataset/annotate_pgen.py \
--repo-id "$REPO_ID" \
--model "$MODEL" \
--output-dir "$OUTPUT_DIR" \
--temperature "$TEMPERATURE" \
--batch-size "$BATCH_SIZE" \
--sample-interval "$SAMPLE_INTERVAL" \
--image-key observation.images.base \
--num-image-views-per-sample 1
# For faster testing, increase sample interval:
# --sample-interval 5.0 # Samples every 5 seconds (much faster)
# To push to hub after generation:
# Add --push-to-hub flag
# Efficient batch processing: 4 episodes at once
# python examples/dataset/annotate_pgen.py \
# --repo-id "$REPO_ID" \
# --model "$MODEL" \
# --output-dir "$OUTPUT_DIR" \
# --video-mode \
# --video-key observation.images.up \
# --video-batch-size "$BATCH_SIZE" \
# --sample-interval 1.0
+802
View File
@@ -0,0 +1,802 @@
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SARM Subtask Annotation using local GPU (Qwen3-VL).
This script implements the annotation approach from the SARM paper using local GPU inference:
"SARM: Stage-Aware Reward Modeling for Long Horizon Robot Manipulation"
Paper: https://arxiv.org/pdf/2509.25358
What it does:
1. Takes videos from a LeRobot dataset
2. Uses Qwen3-VL running locally on GPU to identify when subtasks occur
3. Saves subtask timestamps to the dataset metadata
4. Optionally pushes the annotated dataset to HuggingFace Hub
SARM trains reward models that predict:
- Stage: Which subtask is currently being executed (discrete classification)
- Progress: How far along the subtask we are (continuous 0-1)
Supports three annotation modes:
1. No annotations (no args): Auto-creates single sparse "task" stage covering full episode.
Use with SARM config annotation_mode="single_stage" for simple tasks.
2. Dense-only (--dense-only --dense-subtasks): Dense annotations from VLM, auto-generated
single sparse "task" stage. Use with annotation_mode="dense_only".
3. Dual mode (--sparse-subtasks + --dense-subtasks): Both sparse and dense annotations
from VLM. Use with annotation_mode="dual".
Requirements:
- GPU with sufficient VRAM (16GB+ recommended for 30B model)
- `pip install transformers, torch, qwen-vl-utils`
Run with:
```bash
python examples/dataset_annotation/subtask_annotation.py \
--repo-id your-username/your-dataset \
--sparse-subtasks "Do ..." \
--dense-subtasks "Do task 1, Do task 2, Do task 3" \
--video-key observation.images.base \
--push-to-hub
```
"""
import argparse
import json
import multiprocessing as mp
import re
import subprocess
import tempfile
import textwrap
import time
from concurrent.futures import ProcessPoolExecutor, as_completed
from pathlib import Path
import cv2
import pandas as pd
import torch
from qwen_vl_utils import process_vision_info
from rich.console import Console
from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.policies.sarm.sarm_utils import (
Subtask,
SubtaskAnnotation,
Timestamp,
compute_temporal_proportions,
)
def create_sarm_prompt(subtask_list: list[str]) -> str:
subtask_str = "\n".join([f" - {name}" for name in subtask_list])
return textwrap.dedent(f"""\
# Role
You are a Robotics Vision System specializing in temporal action localization for robot manipulation. Your job is to segment a single demonstration video into distinct, non-overlapping atomic actions from a fixed subtask list.
# Subtask Label Set (Closed Vocabulary)
You must strictly identify the video segments using ONLY the following labels. Do not create new labels or modify existing ones:
[
{subtask_str}
]
The video shows one successful execution of all subtasks in a logical order.
# Ground-Truth Semantics (Very Important)
Use **visual state changes** to define when a subtask starts and ends. Do NOT assume equal durations for the subtasks.
- A subtask **starts** at the first frame where the robot's motion clearly initiates that subtask.
- A subtask **ends** at the first frame where that specific action is visually completed and the manipulated object reaches a temporary, stable configuration.
If there are short pauses or micro-motions that don't clearly correspond to a new subtask, they belong to the **current** subtask.
# Hard Constraints & Logic
1. **Continuous Coverage (No Gaps):**
- The entire video duration from "00:00" to the final timestamp must be covered by subtasks.
- There can be no gaps between subtasks.
- If there is any idle or ambiguous time between clear actions, extend the *preceding* subtask to cover it.
2. **Boundary Consistency:**
- The `"end"` timestamp of one subtask must be exactly equal to the `"start"` timestamp of the next subtask.
- Boundaries must coincide with a real visual state transition, not just a convenient time split.
3. **Chronological Order, One Occurrence Each:**
- This is a single successful demonstration.
- Each subtask from the vocabulary appears **exactly once**, in the correct logical order.
- **Durations may be very different** between subtasks. Never assume they are similar lengths. Base all boundaries only on the video.
4. **Reject Uniform Segmentation (Important):**
- Do NOT simply divide the video into equal or nearly equal time chunks.
- If your boundaries would result in subtasks with similar durations (e.g. all around 5 seconds), treat this as evidence that your segmentation is wrong and refine the boundaries.
- Only use nearly equal durations if the video truly shows each subtask taking the same amount of time (this is very rare).
5. **Timestamps:**
- Timestamps must be in `"MM:SS"` format.
- The first subtask always starts at `"00:00"`.
- The last subtask ends at the final visible frame of the video.
# Step 1 — Textual Timeline (must do this first)
First, write a extensive and detailed textual timeline describing what happens in the video with approximate timestamps.
For each subtask, include:
- its name
- an approximate start and end time,
- an description of the visual event at the boundary (e.g. "shirt fully folded to the left", "robot rotates folded shirt 90 degrees").
Format this as a bullet list.
# Step 2 — JSON Output (final answer)
After the textual timeline, output **only** valid JSON with this structure.
The JSON **must** be consistent with the textual timeline above:
{{
"subtasks": [
{{
"name": "EXACT_NAME_FROM_LIST",
"timestamps": {{
"start": "MM:SS",
"end": "MM:SS"
}}
}},
{{
"name": "EXACT_NAME_FROM_LIST",
"timestamps": {{
"start": "MM:SS",
"end": "MM:SS"
}}
}}
]
}}
Do not add any extra keys to the JSON.
""")
class VideoAnnotator:
"""Annotates robot manipulation videos using local Qwen3-VL model on GPU"""
def __init__(
self,
subtask_list: list[str],
model_name: str = "Qwen/Qwen3-VL-30B-A3B-Instruct",
device: str = "cuda",
torch_dtype: torch.dtype = torch.bfloat16,
model: "Qwen3VLMoeForConditionalGeneration | None" = None,
processor: "AutoProcessor | None" = None,
):
"""
Initialize the video annotator with local model.
Args:
subtask_list: List of allowed subtask names (for consistency)
model_name: Hugging Face model name (default: Qwen/Qwen3-VL-30B-A3B-Instruct)
device: Device to use (cuda, cpu)
torch_dtype: Data type for model (bfloat16, float16, float32)
model: Pre-loaded model instance (optional, to share between annotators)
processor: Pre-loaded processor instance (optional, to share between annotators)
"""
self.subtask_list = subtask_list
self.prompt = create_sarm_prompt(subtask_list)
self.console = Console()
self.device = device
# Use provided model/processor or load new ones
if model is not None and processor is not None:
self.model = model
self.processor = processor
self.console.print(f"[green]✓ Using shared model on {device}[/green]")
else:
self.console.print(f"[cyan]Loading model: {model_name}...[/cyan]")
self.model = Qwen3VLMoeForConditionalGeneration.from_pretrained(
model_name, torch_dtype=torch_dtype, device_map=device, trust_remote_code=True
)
self.processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
self.console.print(f"[green]✓ Model loaded successfully on {device}[/green]")
def extract_episode_segment(
self, file_path: Path, start_timestamp: float, end_timestamp: float, target_fps: int = 1
) -> Path:
"""
Extract a specific episode segment from concatenated video.
Uses minimal compression to preserve quality for local inference.
Args:
file_path: Path to the concatenated video file
start_timestamp: Starting timestamp in seconds (within this video file)
end_timestamp: Ending timestamp in seconds (within this video file)
target_fps: Target FPS (default: 1 for faster processing)
Returns:
Path to extracted video file
"""
# Create temporary file for extracted video
tmp_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
tmp_path = Path(tmp_file.name)
tmp_file.close()
try:
# Check if ffmpeg is available
subprocess.run(
["ffmpeg", "-version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True
)
except (subprocess.CalledProcessError, FileNotFoundError):
raise RuntimeError("ffmpeg not found, cannot extract episode segment") from e
try:
# Calculate duration
duration = end_timestamp - start_timestamp
self.console.print(
f"[cyan]Extracting episode: {start_timestamp:.1f}s-{end_timestamp:.1f}s ({duration:.1f}s)[/cyan]"
)
# Use ffmpeg to extract segment with minimal quality loss
cmd = [
"ffmpeg",
"-i",
str(file_path),
"-ss",
str(start_timestamp),
"-t",
str(duration),
"-r",
str(target_fps),
"-c:v",
"libx264",
"-preset",
"ultrafast",
"-crf",
"23",
"-an",
"-y",
str(tmp_path),
]
subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
# Verify the output file was created and is not empty
if not tmp_path.exists() or tmp_path.stat().st_size == 0:
self.console.print("[red]✗ Video extraction failed (0 bytes) - skipping episode[/red]")
if tmp_path.exists():
tmp_path.unlink()
raise RuntimeError("FFmpeg produced empty video file")
# Show extraction results
file_size_mb = tmp_path.stat().st_size / (1024 * 1024)
# Fail if file is too small (< 100KB likely means extraction failed)
if file_size_mb < 0.1:
self.console.print(
f"[red]✗ Extracted video too small ({file_size_mb:.2f}MB) - skipping episode[/red]"
)
tmp_path.unlink()
raise RuntimeError(f"Video extraction produced invalid file ({file_size_mb:.2f}MB)")
self.console.print(f"[green]✓ Extracted: {file_size_mb:.1f}MB ({target_fps} FPS)[/green]")
return tmp_path
except subprocess.CalledProcessError as e:
raise RuntimeError(f"ffmpeg failed ({e})") from e
def annotate(
self,
file_path: str | Path,
fps: int,
start_timestamp: float = 0.0,
end_timestamp: float | None = None,
max_retries: int = 3,
) -> SubtaskAnnotation:
"""Annotate a video segment using local GPU."""
file_path = Path(file_path)
if end_timestamp is None:
cap = cv2.VideoCapture(str(file_path))
end_timestamp = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) / (cap.get(cv2.CAP_PROP_FPS) or 1)
cap.release()
duration = end_timestamp - start_timestamp
duration_str = f"{int(duration // 60):02d}:{int(duration % 60):02d}"
extracted_path = self.extract_episode_segment(file_path, start_timestamp, end_timestamp, 1)
is_extracted = extracted_path != file_path
try:
messages = [
{"role": "system", "content": [{"type": "text", "text": self.prompt}]},
{
"role": "user",
"content": [
{"type": "video", "video": str(extracted_path), "fps": 1.0},
{
"type": "text",
"text": f"Video is {duration_str} (~{duration:.1f}s). Follow instructions.",
},
],
},
]
for attempt in range(max_retries):
try:
text = self.processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = self.processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
).to(self.device)
with torch.no_grad():
generated_ids = self.model.generate(
**inputs, max_new_tokens=1024, do_sample=True, temperature=0.7
)
response = self.processor.batch_decode(
[out[len(inp) :] for inp, out in zip(inputs.input_ids, generated_ids)],
skip_special_tokens=True,
)[0].strip()
# Extract JSON
if "```json" in response:
response = response.split("```json")[1].split("```")[0]
elif "```" in response:
response = response.split("```")[1].split("```")[0]
try:
return SubtaskAnnotation.model_validate(json.loads(response))
except json.JSONDecodeError:
match = re.search(r"\{.*\}", response, re.DOTALL)
if match:
return SubtaskAnnotation.model_validate(json.loads(match.group()))
raise ValueError("No JSON found")
except Exception as e:
if attempt == max_retries - 1:
raise RuntimeError(f"Failed after {max_retries} attempts") from e
time.sleep(1)
finally:
if is_extracted and extracted_path.exists():
extracted_path.unlink()
def display_annotation(
annotation: SubtaskAnnotation, console: Console, episode_idx: int, fps: int, prefix: str = ""
):
"""Display annotation summary."""
subtask_summary = ", ".join(
f"{s.name}({s.timestamps.start}-{s.timestamps.end})" for s in annotation.subtasks
)
console.print(
f"[green]Episode {episode_idx} {prefix}: {len(annotation.subtasks)} subtasks - {subtask_summary}[/green]"
)
def timestamp_to_seconds(timestamp: str) -> float:
"""Convert MM:SS or SS timestamp to seconds"""
parts = timestamp.split(":")
if len(parts) == 2:
return int(parts[0]) * 60 + int(parts[1])
else:
return int(parts[0])
def save_annotations_to_dataset(
dataset_path: Path, annotations: dict[int, SubtaskAnnotation], fps: int, prefix: str = "sparse"
):
"""Save annotations to LeRobot dataset parquet format."""
from lerobot.datasets.utils import DEFAULT_EPISODES_PATH, load_episodes
episodes_dataset = load_episodes(dataset_path)
if not episodes_dataset or len(episodes_dataset) == 0:
return
episodes_df = episodes_dataset.to_pandas()
cols = [
f"{prefix}_{c}"
for c in [
"subtask_names",
"subtask_start_times",
"subtask_end_times",
"subtask_start_frames",
"subtask_end_frames",
]
]
for col in cols:
episodes_df[col] = None
for ep_idx, ann in annotations.items():
if ep_idx >= len(episodes_df):
continue
names, starts, ends, start_frames, end_frames = [], [], [], [], []
for s in ann.subtasks:
names.append(s.name)
st, et = timestamp_to_seconds(s.timestamps.start), timestamp_to_seconds(s.timestamps.end)
starts.append(st)
ends.append(et)
start_frames.append(int(st * fps))
end_frames.append(int(et * fps))
episodes_df.at[ep_idx, cols[0]] = names
episodes_df.at[ep_idx, cols[1]] = starts
episodes_df.at[ep_idx, cols[2]] = ends
episodes_df.at[ep_idx, cols[3]] = start_frames
episodes_df.at[ep_idx, cols[4]] = end_frames
# Group by file and write
for ep_idx in episodes_df.index:
key = (
episodes_df.loc[ep_idx, "meta/episodes/chunk_index"],
episodes_df.loc[ep_idx, "meta/episodes/file_index"],
)
path = dataset_path / DEFAULT_EPISODES_PATH.format(chunk_index=key[0], file_index=key[1])
if path.exists():
file_df = pd.read_parquet(path)
for col in cols + (
[
"subtask_names",
"subtask_start_times",
"subtask_end_times",
"subtask_start_frames",
"subtask_end_frames",
]
if prefix == "sparse"
else []
):
if col not in file_df.columns:
file_df[col] = None
if ep_idx in annotations:
for col in cols:
file_df.at[ep_idx, col] = episodes_df.loc[ep_idx, col]
if prefix == "sparse": # Legacy columns
for i, legacy in enumerate(
[
"subtask_names",
"subtask_start_times",
"subtask_end_times",
"subtask_start_frames",
"subtask_end_frames",
]
):
file_df.at[ep_idx, legacy] = episodes_df.loc[ep_idx, cols[i]]
file_df.to_parquet(path, engine="pyarrow", compression="snappy")
def generate_auto_sparse_annotations(
dataset: LeRobotDataset, episode_indices: list[int], video_key: str
) -> dict[int, SubtaskAnnotation]:
"""Auto-generate single 'task' stage annotations for all episodes."""
annotations = {}
for ep_idx in episode_indices:
start = float(dataset.meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx])
end = float(dataset.meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx])
duration = end - start
end_str = f"{int(duration // 60):02d}:{int(duration % 60):02d}"
annotations[ep_idx] = SubtaskAnnotation(
subtasks=[Subtask(name="task", timestamps=Timestamp(start="00:00", end=end_str))]
)
return annotations
def load_annotations_from_dataset(dataset_path: Path, prefix: str = "sparse") -> dict[int, SubtaskAnnotation]:
"""Load annotations from LeRobot dataset parquet files."""
from lerobot.datasets.utils import load_episodes
episodes_dataset = load_episodes(dataset_path)
if not episodes_dataset or len(episodes_dataset) == 0:
return {}
col_names = f"{prefix}_subtask_names"
col_start = f"{prefix}_subtask_start_times"
col_end = f"{prefix}_subtask_end_times"
# Fall back to legacy columns for sparse
if col_names not in episodes_dataset.column_names:
if prefix == "sparse" and "subtask_names" in episodes_dataset.column_names:
col_names, col_start, col_end = "subtask_names", "subtask_start_times", "subtask_end_times"
else:
return {}
df = episodes_dataset.to_pandas()
annotations = {}
for ep_idx in df.index:
names = df.loc[ep_idx, col_names]
if names is None or (isinstance(names, float) and pd.isna(names)):
continue
starts, ends = df.loc[ep_idx, col_start], df.loc[ep_idx, col_end]
annotations[int(ep_idx)] = SubtaskAnnotation(
subtasks=[
Subtask(
name=n,
timestamps=Timestamp(
start=f"{int(s) // 60:02d}:{int(s) % 60:02d}",
end=f"{int(e) // 60:02d}:{int(e) % 60:02d}",
),
)
for n, s, e in zip(names, starts, ends)
]
)
return annotations
def process_single_episode(
ep_idx: int,
dataset_root: Path,
dataset_meta,
video_key: str,
fps: int,
annotator: VideoAnnotator,
console: Console,
) -> tuple[int, SubtaskAnnotation | None, str | None]:
"""Process a single episode annotation."""
try:
video_path = dataset_root / dataset_meta.get_video_file_path(ep_idx, video_key)
if not video_path.exists():
return ep_idx, None, f"Video not found: {video_path}"
start = float(dataset_meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx])
end = float(dataset_meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx])
return ep_idx, annotator.annotate(video_path, fps, start, end), None
except Exception as e:
return ep_idx, None, str(e)
def worker_process_episodes(
worker_id: int,
gpu_id: int,
episode_indices: list[int],
repo_id: str,
video_key: str,
sparse_subtask_list: list[str],
dense_subtask_list: list[str] | None,
model_name: str,
torch_dtype: torch.dtype,
) -> tuple[dict, dict | None]:
"""Worker for parallel processing across GPUs."""
device = f"cuda:{gpu_id}"
console = Console()
dataset = LeRobotDataset(repo_id, download_videos=False)
sparse_annotator = VideoAnnotator(sparse_subtask_list, model_name, device, torch_dtype)
dense_annotator = (
VideoAnnotator(
dense_subtask_list,
model_name,
device,
torch_dtype,
sparse_annotator.model,
sparse_annotator.processor,
)
if dense_subtask_list
else None
)
sparse_annotations, dense_annotations = {}, {} if dense_subtask_list else None
for ep_idx in episode_indices:
_, sparse_ann, err = process_single_episode(
ep_idx, dataset.root, dataset.meta, video_key, dataset.fps, sparse_annotator, console
)
if sparse_ann:
sparse_annotations[ep_idx] = sparse_ann
if dense_annotator:
_, dense_ann, _ = process_single_episode(
ep_idx, dataset.root, dataset.meta, video_key, dataset.fps, dense_annotator, console
)
if dense_ann:
dense_annotations[ep_idx] = dense_ann
return sparse_annotations, dense_annotations
def main():
parser = argparse.ArgumentParser(description="SARM-style subtask annotation using local GPU (Qwen3-VL)")
parser.add_argument("--repo-id", type=str, required=True, help="HuggingFace dataset repository ID")
parser.add_argument(
"--sparse-subtasks", type=str, default=None, help="Comma-separated sparse subtask names"
)
parser.add_argument(
"--dense-subtasks", type=str, default=None, help="Comma-separated dense subtask names"
)
parser.add_argument(
"--dense-only", action="store_true", help="Dense-only mode with auto-generated sparse 'task' stage"
)
parser.add_argument("--episodes", type=int, nargs="+", default=None, help="Episode indices to annotate")
parser.add_argument("--model", type=str, default="Qwen/Qwen3-VL-30B-A3B-Instruct", help="VLM model")
parser.add_argument("--skip-existing", action="store_true", help="Skip already annotated episodes")
parser.add_argument("--video-key", type=str, default=None, help="Video key (default: first available)")
parser.add_argument("--push-to-hub", action="store_true", help="Push to HuggingFace Hub")
parser.add_argument("--output-repo-id", type=str, default=None, help="Output repo ID for push")
parser.add_argument("--device", type=str, default="cuda", help="Device (cuda/cpu)")
parser.add_argument("--dtype", type=str, default="bfloat16", choices=["bfloat16", "float16", "float32"])
parser.add_argument("--num-workers", type=int, default=1, help="Parallel workers for multi-GPU")
parser.add_argument("--gpu-ids", type=int, nargs="+", default=None, help="GPU IDs to use")
args = parser.parse_args()
console = Console()
# Validate arguments
if args.dense_only and not args.dense_subtasks:
return console.print("[red]Error: --dense-only requires --dense-subtasks[/red]")
if args.dense_subtasks and not args.sparse_subtasks and not args.dense_only:
return console.print("[red]Error: --dense-subtasks requires --sparse-subtasks or --dense-only[/red]")
sparse_subtask_list = (
[s.strip() for s in args.sparse_subtasks.split(",")] if args.sparse_subtasks else None
)
dense_subtask_list = [s.strip() for s in args.dense_subtasks.split(",")] if args.dense_subtasks else None
auto_sparse = sparse_subtask_list is None
dense_mode = dense_subtask_list is not None
torch_dtype = {"bfloat16": torch.bfloat16, "float16": torch.float16, "float32": torch.float32}[args.dtype]
console.print(f"[cyan]Loading dataset: {args.repo_id}[/cyan]")
dataset = LeRobotDataset(args.repo_id, download_videos=True)
fps = dataset.fps
if not dataset.meta.video_keys:
raise ValueError("No video keys found")
video_key = (
args.video_key if args.video_key in (dataset.meta.video_keys or []) else dataset.meta.video_keys[0]
)
console.print(f"[cyan]Using camera: {video_key}, FPS: {fps}[/cyan]")
# Determine episodes
episode_indices = args.episodes or list(range(dataset.meta.total_episodes))
existing_annotations = load_annotations_from_dataset(dataset.root, prefix="sparse")
if args.skip_existing:
episode_indices = [ep for ep in episode_indices if ep not in existing_annotations]
if not episode_indices:
return console.print("[green]All episodes already annotated![/green]")
console.print(f"[cyan]Annotating {len(episode_indices)} episodes[/cyan]")
# GPU setup
gpu_ids = args.gpu_ids or list(
range(min(args.num_workers, torch.cuda.device_count() if torch.cuda.is_available() else 1))
)
args.num_workers = len(gpu_ids)
sparse_annotations = existing_annotations.copy()
dense_annotations = {} if dense_mode else None
# Auto-sparse mode
if auto_sparse:
sparse_annotations.update(generate_auto_sparse_annotations(dataset, episode_indices, video_key))
save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse")
console.print(f"[green]Auto-generated {len(episode_indices)} sparse 'task' annotations[/green]")
# VLM annotation (for sparse if not auto, and for dense)
need_vlm = (not auto_sparse) or dense_mode
if need_vlm:
if args.num_workers > 1 and not auto_sparse:
# Parallel processing
console.print(f"[cyan]Parallel processing with {args.num_workers} workers[/cyan]")
episodes_per_worker = [[] for _ in range(args.num_workers)]
for i, ep_idx in enumerate(episode_indices):
episodes_per_worker[i % args.num_workers].append(ep_idx)
with ProcessPoolExecutor(
max_workers=args.num_workers, mp_context=mp.get_context("spawn")
) as executor:
futures = [
executor.submit(
worker_process_episodes,
w,
gpu_ids[w],
episodes_per_worker[w],
args.repo_id,
video_key,
sparse_subtask_list,
dense_subtask_list,
args.model,
torch_dtype,
)
for w in range(args.num_workers)
if episodes_per_worker[w]
]
for future in as_completed(futures):
try:
worker_sparse, worker_dense = future.result()
sparse_annotations.update(worker_sparse)
if dense_mode and worker_dense:
dense_annotations.update(worker_dense)
save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse")
if dense_mode:
save_annotations_to_dataset(dataset.root, dense_annotations, fps, prefix="dense")
except Exception as e:
raise RuntimeError(f"Worker failed: {e}") from e
else:
# Sequential processing
sparse_annotator = (
VideoAnnotator(sparse_subtask_list, args.model, args.device, torch_dtype)
if not auto_sparse and sparse_subtask_list
else None
)
dense_annotator = (
VideoAnnotator(
dense_subtask_list,
args.model,
args.device,
torch_dtype,
sparse_annotator.model if sparse_annotator else None,
sparse_annotator.processor if sparse_annotator else None,
)
if dense_mode
else None
)
for i, ep_idx in enumerate(episode_indices):
console.print(f"[cyan]Episode {ep_idx} ({i + 1}/{len(episode_indices)})[/cyan]")
if sparse_annotator:
_, sparse_ann, err = process_single_episode(
ep_idx, dataset.root, dataset.meta, video_key, fps, sparse_annotator, console
)
if sparse_ann:
sparse_annotations[ep_idx] = sparse_ann
save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse")
elif err:
console.print(f"[red]Sparse failed: {err}[/red]")
if dense_annotator:
_, dense_ann, err = process_single_episode(
ep_idx, dataset.root, dataset.meta, video_key, fps, dense_annotator, console
)
if dense_ann:
dense_annotations[ep_idx] = dense_ann
save_annotations_to_dataset(dataset.root, dense_annotations, fps, prefix="dense")
elif err:
console.print(f"[red]Dense failed: {err}[/red]")
# Save temporal proportions
def save_proportions(annotations, prefix, is_auto=False):
props: dict[str, float] = {"task": 1.0} if is_auto else compute_temporal_proportions(annotations, fps)
path = dataset.root / "meta" / f"temporal_proportions_{prefix}.json"
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
json.dump(props, f, indent=2)
console.print(f"[green]Saved {prefix} temporal proportions[/green]")
save_proportions(sparse_annotations, "sparse", auto_sparse)
if dense_mode and dense_annotations:
save_proportions(dense_annotations, "dense")
console.print(
f"\n[bold green]Complete! {len(sparse_annotations)} sparse, {len(dense_annotations or {})} dense annotations[/bold green]"
)
if args.push_to_hub:
try:
dataset.push_to_hub(push_videos=True)
console.print(f"[green]Pushed to {args.output_repo_id or args.repo_id}[/green]")
except Exception as e:
console.print(f"[red]Push failed: {e}[/red]")
if __name__ == "__main__":
main()
+1
View File
@@ -0,0 +1 @@
srun --time 12:00:00 --qos=high --gres=gpu:1 --mem=24G --partition=hopper-prod --container-image /fsx/michel_aractingi/docker_images/huggingface+lerobot-gpu+dev.sqsh --container-mounts /fsx/jade_choghari
+44
View File
@@ -0,0 +1,44 @@
#!/bin/bash
# Quick test to verify the fix for task_indices length mismatch
# This should now work correctly even with --num-samples < full dataset length
echo "Testing annotate_pgen.py with --num-samples=100 on full dataset..."
python examples/dataset/annotate_pgen.py \
--data-dir /fsx/jade_choghari/.cache/huggingface/lerobot/lerobot/svla_so101_pickplace \
--model Qwen/Qwen3-VL-30B-A3B-Instruct \
--num-samples 100 \
--sample-interval 1.0 \
--output-dir /fsx/jade_choghari/outputs/pgen_test_fixed
if [ $? -eq 0 ]; then
echo "✓ SUCCESS: Script completed without errors!"
echo ""
echo "Verifying output..."
# Check that all frames have task_index_high_level
python -c "
from lerobot.datasets.lerobot_dataset import LeRobotDataset
import numpy as np
ds = LeRobotDataset(repo_id='local_test', root='/fsx/jade_choghari/outputs/pgen_test_fixed')
print(f'Dataset has {len(ds)} frames')
print(f'Features: {list(ds.features.keys())}')
# Check that task_index_high_level exists
assert 'task_index_high_level' in ds.features, 'task_index_high_level not in features!'
# Sample some frames
for idx in [0, 50, 99, 100, 500, 1000, 11938]:
if idx < len(ds):
frame = ds[idx]
task_idx = frame['task_index_high_level'].item()
print(f'Frame {idx}: task_index_high_level = {task_idx}')
print('✓ All checks passed!')
"
else
echo "✗ FAILED: Script exited with error code $?"
fi
@@ -1,416 +0,0 @@
#!/usr/bin/env python3
"""
Comprehensive debug script for OpenArms CAN FD communication.
Tests all 4 CAN interfaces with CAN FD support.
"""
import can
import time
import sys
import subprocess
def check_can_interface(port):
"""Check if CAN interface is UP and configured."""
try:
result = subprocess.run(['ip', 'link', 'show', port],
capture_output=True, text=True)
if result.returncode != 0:
return False, "Interface not found", None
output = result.stdout
if 'UP' not in output:
return False, "Interface is DOWN", None
# Check if CAN FD is enabled
is_fd = 'fd on' in output.lower() or 'canfd' in output.lower()
return True, "Interface is UP", is_fd
except FileNotFoundError:
return None, "Cannot check (ip command not found)", None
def test_motor_on_interface(bus, motor_id, timeout=2.0, use_fd=False):
"""
Test a single motor and return all responses.
Returns:
list of (arbitration_id, data) tuples for all responses received
"""
# Send enable command
enable_msg = can.Message(
arbitration_id=motor_id,
data=[0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC],
is_extended_id=False,
is_fd=use_fd
)
try:
bus.send(enable_msg)
except Exception as e:
return None, f"Send error: {e}"
# Listen for responses
responses = []
start_time = time.time()
while time.time() - start_time < timeout:
msg = bus.recv(timeout=0.1)
if msg:
responses.append((msg.arbitration_id, msg.data, msg.is_fd if hasattr(msg, 'is_fd') else False))
# Send disable command
disable_msg = can.Message(
arbitration_id=motor_id,
data=[0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFD],
is_extended_id=False,
is_fd=use_fd
)
try:
bus.send(disable_msg)
except:
pass
return responses, None
def test_interface(port, interface_type="socketcan", use_can_fd=True):
"""Test all 8 motors on a single CAN interface."""
results = {
'interface': port,
'status': None,
'is_fd': use_can_fd,
'motors': {}
}
# Check interface status
status_ok, status_msg, interface_has_fd = check_can_interface(port)
if interface_has_fd is not None:
results['interface_fd_enabled'] = interface_has_fd
if use_can_fd and not interface_has_fd:
status_msg += " (CAN FD NOT enabled on interface!)"
elif interface_has_fd:
status_msg += " (CAN FD enabled)"
results['status'] = status_msg
if status_ok is False:
return results
# Try to connect
try:
if use_can_fd:
print(f" Connecting to {port} with CAN FD (1 Mbps / 5 Mbps)...")
bus = can.interface.Bus(
channel=port,
interface=interface_type,
bitrate=1000000,
data_bitrate=5000000,
fd=True
)
else:
print(f" Connecting to {port} with CAN 2.0 (1 Mbps)...")
bus = can.interface.Bus(
channel=port,
interface=interface_type,
bitrate=1000000
)
except Exception as e:
results['status'] = f"Connection failed: {e}"
return results
try:
# Clear any pending messages
while bus.recv(timeout=0.01):
pass
# Test each motor (0x01 to 0x08)
for motor_id in range(0x01, 0x09):
responses, error = test_motor_on_interface(bus, motor_id, timeout=1.0, use_fd=use_can_fd)
if error:
results['motors'][motor_id] = {'error': error}
elif responses:
results['motors'][motor_id] = {
'found': True,
'responses': responses
}
else:
results['motors'][motor_id] = {
'found': False,
'responses': []
}
time.sleep(0.05) # Small delay between motors
finally:
bus.shutdown()
return results
def print_results(all_results):
"""Print formatted results for all interfaces."""
print("SUMMARY - Motors Found on Each Interface")
motor_names = {
0x01: "joint_1 (Shoulder pan)",
0x02: "joint_2 (Shoulder lift)",
0x03: "joint_3 (Shoulder rotation)",
0x04: "joint_4 (Elbow flex)",
0x05: "joint_5 (Wrist roll)",
0x06: "joint_6 (Wrist pitch)",
0x07: "joint_7 (Wrist rotation)",
0x08: "gripper",
}
total_found = 0
for result in all_results:
interface = result['interface']
status = result['status']
print(f"{interface}: {status}")
if result.get('is_fd'):
print(f" Mode: CAN FD")
else:
print(f" Mode: CAN 2.0")
if 'Connection failed' in status or 'DOWN' in status:
print(f" ⚠ Cannot test {interface}")
continue
motors_found = 0
for motor_id in range(0x01, 0x09):
motor_data = result['motors'].get(motor_id, {})
motor_name = motor_names.get(motor_id, "Unknown")
if motor_data.get('error'):
print(f" Motor 0x{motor_id:02X} ({motor_name}): ✗ {motor_data['error']}")
elif motor_data.get('found'):
motors_found += 1
total_found += 1
responses = motor_data['responses']
print(f" Motor 0x{motor_id:02X} ({motor_name}): ✓ FOUND")
for resp_id, data, is_fd in responses:
data_hex = data.hex()
fd_flag = " [FD]" if is_fd else " [2.0]"
print(f" → Response from 0x{resp_id:02X}{fd_flag}: {data_hex}")
else:
print(f" Motor 0x{motor_id:02X} ({motor_name}): ✗ No response")
print(f"\n Summary: {motors_found}/8 motors found on {interface}")
# Overall summary
print("OVERALL SUMMARY")
print(f"Total motors found across all interfaces: {total_found}")
# Analyze configuration
print("DIAGNOSIS")
for result in all_results:
interface = result['interface']
motors_found = sum(1 for m in result['motors'].values() if m.get('found'))
if motors_found == 0:
print(f"\n{interface}: NO MOTORS FOUND")
print(" Possible issues:")
print(" 1. CAN FD mode mismatch (interface vs motor configuration)")
print(" 2. Missing 120Ω termination resistors at BOTH cable ends")
print(" 3. Motor timeout parameter set incorrectly (should NOT be 0)")
print(" 4. CANH/CANL wiring issue")
print(" 5. Cable too long (>40m for CAN FD at 5Mbps)")
# Check FD mismatch
if result.get('is_fd') and not result.get('interface_fd_enabled'):
print(" ⚠️ CRITICAL: Trying CAN FD but interface NOT configured for FD!")
print(f" Fix: sudo ip link set {interface} type can bitrate 1000000 dbitrate 5000000 fd on")
elif motors_found < 8:
print(f"\n{interface}: Only {motors_found}/8 motors responding")
print(" Check power and connections for missing motors")
else:
print(f"\n{interface}: All 8 motors responding correctly!")
# Check for unexpected response IDs
print("RESPONSE ID ANALYSIS")
for result in all_results:
interface = result['interface']
unexpected = []
for motor_id, motor_data in result['motors'].items():
if motor_data.get('found'):
expected_id = motor_id + 0x10
actual_ids = [resp[0] for resp in motor_data['responses']]
if expected_id not in actual_ids:
unexpected.append((motor_id, actual_ids))
if unexpected:
print(f"\n{interface}: Unexpected response IDs detected")
for motor_id, actual_ids in unexpected:
expected_id = motor_id + 0x10
print(f" Motor 0x{motor_id:02X}: Expected 0x{expected_id:02X}, "
f"got {[f'0x{id:02X}' for id in actual_ids]}")
print(" → Motor Master IDs need reconfiguration")
else:
motors_found = sum(1 for m in result['motors'].values() if m.get('found'))
if motors_found > 0:
print(f"\n{interface}: All responding motors use correct IDs")
def test_communication_speed(interface, motor_id, num_iterations=100):
"""
Test communication speed with a motor.
Returns:
tuple: (hz, avg_latency_ms) or (None, None) if test failed
"""
try:
# Connect to interface
bus = can.interface.Bus(
channel=interface,
interface="socketcan",
bitrate=1000000,
data_bitrate=5000000,
fd=True
)
# Send refresh commands and measure round-trip time
latencies = []
successful = 0
for _ in range(num_iterations):
start = time.perf_counter()
# Send enable command (lightweight operation)
enable_msg = can.Message(
arbitration_id=motor_id,
data=[0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC],
is_extended_id=False,
is_fd=True
)
bus.send(enable_msg)
# Wait for response
msg = bus.recv(timeout=0.1)
if msg:
latency = (time.perf_counter() - start) * 1000 # Convert to ms
latencies.append(latency)
successful += 1
bus.shutdown()
if successful > 0:
avg_latency = sum(latencies) / len(latencies)
hz = 1000.0 / avg_latency if avg_latency > 0 else 0
return hz, avg_latency
return None, None
except Exception as e:
print(f" Speed test error: {e}")
return None, None
def main():
"""Main function to test all CAN interfaces with CAN FD."""
print("\nThis will test all 4 CAN interfaces (can0-can3) with CAN FD")
print("Testing motors 0x01-0x08 on each interface")
print()
print("Make sure:")
print(" ✓ Motors are powered (24V)")
print(" ✓ CAN interfaces configured with FD mode:")
print(" ./examples/openarms/setup_can.sh")
print(" ✓ Motor 'timeout' parameter NOT set to 0 (use Damiao tools)")
print(" ✓ CAN wiring includes 120Ω termination at BOTH ends")
print()
input("Press ENTER to start testing...")
# Test all 4 interfaces with CAN FD
all_results = []
for i in range(4):
interface = f"can{i}"
print(f"Testing {interface}...")
result = test_interface(interface, use_can_fd=True)
all_results.append(result)
# Quick status
if 'Connection failed' in result['status'] or 'DOWN' in result['status']:
print(f"{interface}: {result['status']}")
else:
motors_found = sum(1 for m in result['motors'].values() if m.get('found'))
print(f" {interface}: {motors_found}/8 motors found")
time.sleep(0.2)
# Print detailed results
print_results(all_results)
print("Testing Complete!")
all_found = sum(sum(1 for m in r['motors'].values() if m.get('found')) for r in all_results)
if all_found == 0:
print("\n⚠️ CRITICAL: No motors found on any interface!")
print("\nTop issues to check:")
print(" 1. Motor 'timeout' parameter (use Damiao tools to set > 0)")
print(" 2. CAN FD not enabled (run ./examples/openarms/setup_can.sh)")
print(" 3. Missing termination resistors")
print("\nTry:")
print(" a) Check motor parameters with Damiao Debugging Tools")
print(" b) Verify CAN FD is enabled: ip -d link show can0 | grep fd")
print(" c) Run setup script: ./examples/openarms/setup_can.sh")
else:
# Run speed test on interfaces with motors
print("COMMUNICATION SPEED TEST")
print("\nTesting maximum communication frequency...")
for result in all_results:
interface = result['interface']
# Find first responding motor
responding_motor = None
for motor_id, motor_data in result['motors'].items():
if motor_data.get('found'):
responding_motor = motor_id
break
if responding_motor:
print(f"\n{interface}: Testing with motor 0x{responding_motor:02X}...")
hz, latency = test_communication_speed(interface, responding_motor, num_iterations=100)
if hz:
print(f" ✓ Max frequency: {hz:.1f} Hz")
print(f" ✓ Avg latency: {latency:.2f} ms")
print(f" ✓ Commands per second: ~{int(hz)}")
else:
print(f" ✗ Speed test failed")
else:
print(f"\n{interface}: No motors found, skipping speed test")
print()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n\nTesting interrupted by user.")
sys.exit(1)
except Exception as e:
print(f"\nUnexpected error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
-360
View File
@@ -1,360 +0,0 @@
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenArms Policy Evaluation
Evaluates a trained policy on the OpenArms robot by running inference and recording
the evaluation episodes to a dataset. Supports optional leader arm for manual resets.
Example usage:
python examples/openarms/evaluate.py
"""
import time
from pathlib import Path
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.configs.policies import PreTrainedConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.utils import combine_feature_dicts
from lerobot.policies.factory import make_policy, make_pre_post_processors
from lerobot.processor import make_default_processors
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.scripts.lerobot_record import record_loop
from lerobot.teleoperators.openarms.config_openarms_leader import OpenArmsLeaderConfig
from lerobot.teleoperators.openarms.openarms_leader import OpenArmsLeader
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
HF_MODEL_ID = "lerobot-data-collection/three-folds-pi0" # TODO: Replace with your trained model
HF_EVAL_DATASET_ID = "lerobot-data-collection/three-folds-pi0_eval7" # TODO: Replace with your eval dataset name
TASK_DESCRIPTION = "three-folds-dataset" # TODO: Replace with your task, this should match!!
NUM_EPISODES = 1
FPS = 30
EPISODE_TIME_SEC = 300
RESET_TIME_SEC = 60
# Robot CAN interfaces
FOLLOWER_LEFT_PORT = "can0"
FOLLOWER_RIGHT_PORT = "can1"
# If enabled, you can manually reset the environment between evaluation episodes
USE_LEADER_FOR_RESETS = True # Set to False if you don't want to use leader
LEADER_LEFT_PORT = "can2"
LEADER_RIGHT_PORT = "can3"
# Camera configuration
CAMERA_CONFIG = {
"left_wrist": OpenCVCameraConfig(index_or_path="/dev/video5", width=640, height=480, fps=FPS),
"right_wrist": OpenCVCameraConfig(index_or_path="/dev/video1", width=640, height=480, fps=FPS),
"base": OpenCVCameraConfig(index_or_path="/dev/video3", width=640, height=480, fps=FPS),
}
def main():
"""Main evaluation function."""
print("OpenArms Policy Evaluation")
print(f"\nModel: {HF_MODEL_ID}")
print(f"Evaluation Dataset: {HF_EVAL_DATASET_ID}")
print(f"Task: {TASK_DESCRIPTION}")
print(f"Episodes: {NUM_EPISODES}")
print(f"Episode Duration: {EPISODE_TIME_SEC}s")
print(f"Reset Duration: {RESET_TIME_SEC}s")
print(f"Use Leader for Resets: {USE_LEADER_FOR_RESETS}")
follower_config = OpenArmsFollowerConfig(
port_left=FOLLOWER_LEFT_PORT,
port_right=FOLLOWER_RIGHT_PORT,
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0,
cameras=CAMERA_CONFIG,
)
follower = OpenArmsFollower(follower_config)
follower.connect(calibrate=False)
if not follower.is_connected:
raise RuntimeError("Follower robot failed to connect!")
leader = None
if USE_LEADER_FOR_RESETS:
leader_config = OpenArmsLeaderConfig(
port_left=LEADER_LEFT_PORT,
port_right=LEADER_RIGHT_PORT,
can_interface="socketcan",
id="openarms_leader",
manual_control=False, # Enable torque control for gravity compensation
)
leader = OpenArmsLeader(leader_config)
leader.connect(calibrate=False)
if not leader.is_connected:
raise RuntimeError("Leader robot failed to connect!")
# Enable gravity compensation
if leader.pin_robot is not None:
leader.bus_right.enable_torque()
leader.bus_left.enable_torque()
time.sleep(0.1)
print(f"Leader connected with gravity compensation ({LEADER_LEFT_PORT}, {LEADER_RIGHT_PORT})")
else:
print(f"Leader connected but gravity compensation unavailable (no URDF)")
# Build default processors for action and observation
teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors()
# Build dataset features from robot features and processors
# For actions, only include positions (no velocity or torque)
action_features_hw = {}
for key, value in follower.action_features.items():
if key.endswith(".pos"):
action_features_hw[key] = value
dataset_features = combine_feature_dicts(
aggregate_pipeline_dataset_features(
pipeline=teleop_action_processor,
initial_features=create_initial_features(action=action_features_hw),
use_videos=True,
),
aggregate_pipeline_dataset_features(
pipeline=robot_observation_processor,
initial_features=create_initial_features(observation=follower.observation_features),
use_videos=True,
),
)
# Check if dataset already exists
dataset_path = Path.home() / ".cache" / "huggingface" / "lerobot" / HF_EVAL_DATASET_ID
if dataset_path.exists():
print(f"Evaluation dataset already exists at: {dataset_path}")
print("This will append new episodes to the existing dataset.")
choice = input(" Continue? (y/n): ").strip().lower()
if choice != 'y':
print(" Aborting evaluation.")
follower.disconnect()
if leader:
leader.disconnect()
return
# Create dataset
dataset = LeRobotDataset.create(
repo_id=HF_EVAL_DATASET_ID,
fps=FPS,
features=dataset_features,
robot_type=follower.name,
use_videos=True,
image_writer_processes=0,
image_writer_threads=12,
)
# Load policy config from pretrained model and create policy using factory
policy_config = PreTrainedConfig.from_pretrained(HF_MODEL_ID)
policy_config.pretrained_path = HF_MODEL_ID
policy = make_policy(policy_config, ds_meta=dataset.meta)
preprocessor, postprocessor = make_pre_post_processors(
policy_cfg=policy.config,
pretrained_path=HF_MODEL_ID,
dataset_stats=dataset.meta.stats,
preprocessor_overrides={
"device_processor": {"device": str(policy.config.device)}
},
)
print(f"\nRunning evaluation...")
# Initialize keyboard listener and visualization
listener, events = init_keyboard_listener()
init_rerun(session_name="openarms_evaluation")
episode_idx = 0
try:
while episode_idx < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Evaluating episode {episode_idx + 1} of {NUM_EPISODES}")
print(f"\nRunning inference for episode {episode_idx + 1}...")
# Run inference with policy
record_loop(
robot=follower,
events=events,
fps=FPS,
teleop_action_processor=teleop_action_processor,
robot_action_processor=robot_action_processor,
robot_observation_processor=robot_observation_processor,
policy=policy,
preprocessor=preprocessor,
postprocessor=postprocessor,
dataset=dataset,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
)
# Handle re-recording
if events["rerecord_episode"]:
log_say("Re-recording episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Save episode
if dataset.episode_buffer is not None and dataset.episode_buffer.get("size", 0) > 0:
print(f"Saving episode {episode_idx + 1} ({dataset.episode_buffer['size']} frames)...")
dataset.save_episode()
episode_idx += 1
# Reset environment between episodes (if not last episode)
if not events["stop_recording"] and episode_idx < NUM_EPISODES:
if USE_LEADER_FOR_RESETS and leader:
log_say("Reset the environment using leader arms")
print(f"\nManual reset period ({RESET_TIME_SEC}s)...")
# Use leader for manual reset with gravity compensation
import numpy as np
dt = 1 / FPS
reset_start_time = time.perf_counter()
while time.perf_counter() - reset_start_time < RESET_TIME_SEC:
if events["exit_early"] or events["stop_recording"]:
break
loop_start = time.perf_counter()
# Get leader state
leader_action = leader.get_action()
# Extract positions and velocities
leader_positions_deg = {}
leader_velocities_deg_per_sec = {}
for motor in leader.bus_right.motors:
pos_key = f"right_{motor}.pos"
vel_key = f"right_{motor}.vel"
if pos_key in leader_action:
leader_positions_deg[f"right_{motor}"] = leader_action[pos_key]
if vel_key in leader_action:
leader_velocities_deg_per_sec[f"right_{motor}"] = leader_action[vel_key]
for motor in leader.bus_left.motors:
pos_key = f"left_{motor}.pos"
vel_key = f"left_{motor}.vel"
if pos_key in leader_action:
leader_positions_deg[f"left_{motor}"] = leader_action[pos_key]
if vel_key in leader_action:
leader_velocities_deg_per_sec[f"left_{motor}"] = leader_action[vel_key]
# Calculate gravity and friction torques
leader_positions_rad = {k: np.deg2rad(v) for k, v in leader_positions_deg.items()}
leader_gravity_torques_nm = leader._gravity_from_q(leader_positions_rad)
leader_velocities_rad_per_sec = {k: np.deg2rad(v) for k, v in leader_velocities_deg_per_sec.items()}
leader_friction_torques_nm = leader._friction_from_velocity(
leader_velocities_rad_per_sec,
friction_scale=1.0
)
# Combine torques
leader_total_torques_nm = {}
for motor_name in leader_gravity_torques_nm:
gravity = leader_gravity_torques_nm.get(motor_name, 0.0)
friction = leader_friction_torques_nm.get(motor_name, 0.0)
leader_total_torques_nm[motor_name] = gravity + friction
# Apply compensation
for motor in leader.bus_right.motors:
full_name = f"right_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
kd = leader.get_damping_kd(motor)
leader.bus_right._mit_control(
motor=motor, kp=0.0, kd=kd,
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque,
)
for motor in leader.bus_left.motors:
full_name = f"left_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
kd = leader.get_damping_kd(motor)
leader.bus_left._mit_control(
motor=motor, kp=0.0, kd=kd,
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque,
)
# Send leader positions to follower
follower_action = {}
for joint in leader_positions_deg.keys():
pos_key = f"{joint}.pos"
if pos_key in leader_action:
follower_action[pos_key] = leader_action[pos_key]
if follower_action:
follower.send_action(follower_action)
# Maintain loop rate
loop_duration = time.perf_counter() - loop_start
sleep_time = dt - loop_duration
if sleep_time > 0:
time.sleep(sleep_time)
print("Reset complete")
else:
log_say("Waiting for manual reset")
print(f"Manually reset the environment and press ENTER to continue")
input("Press ENTER when ready...")
print(f"Evaluation complete! {episode_idx} episodes recorded")
log_say("Evaluation complete", blocking=True)
except KeyboardInterrupt:
print("\n\nEvaluation interrupted by user")
finally:
if leader:
leader.bus_right.disable_torque()
leader.bus_left.disable_torque()
time.sleep(0.1)
leader.disconnect()
follower.disconnect()
if listener is not None:
listener.stop()
dataset.finalize()
print("\nUploading to Hugging Face Hub...")
dataset.push_to_hub(private=True)
if __name__ == "__main__":
main()
-653
View File
@@ -1,653 +0,0 @@
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenArms Policy Evaluation with Real-Time Chunking (RTC)
Evaluates a trained policy on the OpenArms robot using RTC for smooth, continuous motion.
RTC enables large flow-matching policies (Pi0, Pi0.5, SmolVLA) to produce reactive motion
despite high inference latency by asynchronously generating action chunks.
Features:
- Thread-based asynchronous action generation and execution
- RTC for smooth transitions between action chunks
- Dataset recording for evaluation episodes
Example usage:
python examples/openarms/evaluate_with_rtc.py
# With custom RTC parameters
python examples/openarms/evaluate_with_rtc.py \
--rtc.execution_horizon=12 \
--rtc.max_guidance_weight=10.0
"""
import logging
import math
import sys
import time
import traceback
from dataclasses import dataclass, field
from pathlib import Path
from threading import Event, Lock, Thread
import torch
from torch import Tensor
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.configs import parser
from lerobot.configs.policies import PreTrainedConfig
from lerobot.configs.types import RTCAttentionSchedule
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.utils import build_dataset_frame, combine_feature_dicts, hw_to_dataset_features
from lerobot.policies.factory import get_policy_class, make_pre_post_processors
from lerobot.policies.rtc.action_queue import ActionQueue
from lerobot.policies.rtc.configuration_rtc import RTCConfig
from lerobot.policies.rtc.latency_tracker import LatencyTracker
from lerobot.processor import make_default_processors
from lerobot.rl.process import ProcessSignalHandler
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.utils.hub import HubMixin
from lerobot.utils.utils import init_logging, log_say
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ============================================================================
# Default Configuration Constants
# ============================================================================
DEFAULT_HF_MODEL_ID = "lerobot-data-collection/three-folds-pi0"
DEFAULT_HF_EVAL_DATASET_ID = "lerobot-data-collection/three-folds-pi0_eval_rtc"
DEFAULT_TASK_DESCRIPTION = "three-folds-dataset"
DEFAULT_NUM_EPISODES = 1
DEFAULT_FPS = 30
DEFAULT_EPISODE_TIME_SEC = 300
DEFAULT_RESET_TIME_SEC = 60
DEFAULT_FOLLOWER_LEFT_PORT = "can0"
DEFAULT_FOLLOWER_RIGHT_PORT = "can1"
DEFAULT_CAMERA_CONFIG = {
"left_wrist": OpenCVCameraConfig(index_or_path="/dev/video5", width=640, height=480, fps=DEFAULT_FPS),
"right_wrist": OpenCVCameraConfig(index_or_path="/dev/video1", width=640, height=480, fps=DEFAULT_FPS),
"base": OpenCVCameraConfig(index_or_path="/dev/video3", width=640, height=480, fps=DEFAULT_FPS),
}
# ============================================================================
# Thread-Safe Robot Wrapper
# ============================================================================
class RobotWrapper:
"""Thread-safe wrapper for robot operations."""
def __init__(self, robot: OpenArmsFollower):
self.robot = robot
self.lock = Lock()
def get_observation(self) -> dict[str, Tensor]:
with self.lock:
return self.robot.get_observation()
def send_action(self, action: dict) -> None:
with self.lock:
self.robot.send_action(action)
@property
def observation_features(self) -> dict:
with self.lock:
return self.robot.observation_features
@property
def action_features(self) -> dict:
with self.lock:
return self.robot.action_features
@property
def name(self) -> str:
return self.robot.name
# ============================================================================
# Configuration
# ============================================================================
@dataclass
class OpenArmsRTCEvalConfig(HubMixin):
"""Configuration for OpenArms evaluation with RTC."""
policy: PreTrainedConfig | None = None
rtc: RTCConfig = field(
default_factory=lambda: RTCConfig(
enabled=True,
execution_horizon=10,
max_guidance_weight=10.0,
prefix_attention_schedule=RTCAttentionSchedule.EXP,
)
)
model_id: str = DEFAULT_HF_MODEL_ID
eval_dataset_id: str = DEFAULT_HF_EVAL_DATASET_ID
task: str = DEFAULT_TASK_DESCRIPTION
num_episodes: int = DEFAULT_NUM_EPISODES
fps: float = DEFAULT_FPS
episode_time_sec: float = DEFAULT_EPISODE_TIME_SEC
reset_time_sec: float = DEFAULT_RESET_TIME_SEC
follower_left_port: str = DEFAULT_FOLLOWER_LEFT_PORT
follower_right_port: str = DEFAULT_FOLLOWER_RIGHT_PORT
device: str = "cuda"
# Should be higher than inference_delay + execution_horizon
action_queue_size_to_get_new_actions: int = 30
record_dataset: bool = True
push_to_hub: bool = True
use_torch_compile: bool = False
torch_compile_backend: str = "inductor"
torch_compile_mode: str = "default"
torch_compile_disable_cudagraphs: bool = True
def __post_init__(self):
policy_path = parser.get_path_arg("policy")
if policy_path:
cli_overrides = parser.get_cli_overrides("policy")
self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
self.policy.pretrained_path = policy_path
self.model_id = policy_path
elif self.model_id:
self.policy = PreTrainedConfig.from_pretrained(self.model_id)
self.policy.pretrained_path = self.model_id
@classmethod
def __get_path_fields__(cls) -> list[str]:
return ["policy"]
# ============================================================================
# Action Generation Thread
# ============================================================================
def get_actions_thread(
policy,
robot: RobotWrapper,
robot_observation_processor,
action_queue: ActionQueue,
shutdown_event: Event,
cfg: OpenArmsRTCEvalConfig,
episode_active: Event,
):
"""Thread function to asynchronously generate action chunks from the policy."""
try:
logger.info("[GET_ACTIONS] Starting action generation thread")
latency_tracker = LatencyTracker()
time_per_chunk = 1.0 / cfg.fps
hw_features = hw_to_dataset_features(robot.observation_features, "observation")
policy_device = policy.config.device
logger.info(f"[GET_ACTIONS] Loading preprocessor/postprocessor from {cfg.policy.pretrained_path}")
preprocessor, postprocessor = make_pre_post_processors(
policy_cfg=cfg.policy,
pretrained_path=cfg.policy.pretrained_path,
dataset_stats=None,
preprocessor_overrides={
"device_processor": {"device": cfg.device},
},
)
logger.info("[GET_ACTIONS] Preprocessor/postprocessor loaded successfully")
get_actions_threshold = cfg.action_queue_size_to_get_new_actions
if not cfg.rtc.enabled:
get_actions_threshold = 0
while not shutdown_event.is_set():
if not episode_active.is_set():
time.sleep(0.01)
continue
if action_queue.qsize() <= get_actions_threshold:
current_time = time.perf_counter()
action_index_before_inference = action_queue.get_action_index()
prev_actions = action_queue.get_left_over()
inference_latency = latency_tracker.max()
inference_delay = math.ceil(inference_latency / time_per_chunk) if inference_latency else 0
obs = robot.get_observation()
obs_processed = robot_observation_processor(obs)
obs_with_policy_features = build_dataset_frame(
hw_features, obs_processed, prefix="observation"
)
for name in obs_with_policy_features:
obs_with_policy_features[name] = torch.from_numpy(obs_with_policy_features[name])
if "image" in name:
obs_with_policy_features[name] = (
obs_with_policy_features[name].type(torch.float32) / 255
)
obs_with_policy_features[name] = (
obs_with_policy_features[name].permute(2, 0, 1).contiguous()
)
obs_with_policy_features[name] = obs_with_policy_features[name].unsqueeze(0)
obs_with_policy_features[name] = obs_with_policy_features[name].to(policy_device)
obs_with_policy_features["task"] = [cfg.task]
obs_with_policy_features["robot_type"] = robot.name
preprocessed_obs = preprocessor(obs_with_policy_features)
actions = policy.predict_action_chunk(
preprocessed_obs,
inference_delay=inference_delay,
prev_chunk_left_over=prev_actions,
)
original_actions = actions.squeeze(0).clone()
postprocessed_actions = postprocessor(actions).squeeze(0)
new_latency = time.perf_counter() - current_time
new_delay = math.ceil(new_latency / time_per_chunk)
latency_tracker.add(new_latency)
if cfg.action_queue_size_to_get_new_actions < cfg.rtc.execution_horizon + new_delay:
logger.warning(
"[GET_ACTIONS] action_queue_size_to_get_new_actions too small. "
"Should be higher than inference delay + execution horizon."
)
action_queue.merge(
original_actions, postprocessed_actions, new_delay, action_index_before_inference
)
logger.debug(
f"[GET_ACTIONS] Generated chunk, latency={new_latency:.3f}s, "
f"delay={new_delay}, queue_size={action_queue.qsize()}"
)
else:
time.sleep(0.01)
logger.info("[GET_ACTIONS] Action generation thread shutting down")
except Exception as e:
logger.error(f"[GET_ACTIONS] Fatal exception: {e}")
logger.error(traceback.format_exc())
shutdown_event.set()
sys.exit(1)
# ============================================================================
# Action Execution Thread
# ============================================================================
def actor_thread(
robot: RobotWrapper,
robot_action_processor,
action_queue: ActionQueue,
shutdown_event: Event,
cfg: OpenArmsRTCEvalConfig,
episode_active: Event,
dataset: LeRobotDataset | None,
dataset_lock: Lock,
teleop_action_processor,
robot_observation_processor,
):
"""Thread function to execute actions on the robot."""
try:
logger.info("[ACTOR] Starting actor thread")
action_count = 0
action_interval = 1.0 / cfg.fps
action_keys = [k for k in robot.action_features.keys() if k.endswith(".pos")]
while not shutdown_event.is_set():
if not episode_active.is_set():
time.sleep(0.01)
continue
start_time = time.perf_counter()
action = action_queue.get()
if action is not None:
action = action.cpu()
action_dict = {}
for i, key in enumerate(action_keys):
if i < len(action):
action_dict[key] = action[i].item()
action_processed = robot_action_processor((action_dict, None))
robot.send_action(action_processed)
if cfg.record_dataset and dataset is not None:
with dataset_lock:
obs = robot.get_observation()
obs_processed = robot_observation_processor(obs)
action_for_dataset = teleop_action_processor((action_dict, None))
frame = {}
for key, value in obs_processed.items():
frame[f"observation.{key}"] = value
for key, value in action_for_dataset.items():
frame[f"action.{key}"] = value
frame["task"] = cfg.task
dataset.add_frame(frame)
action_count += 1
dt_s = time.perf_counter() - start_time
sleep_time = max(0, action_interval - dt_s - 0.001)
if sleep_time > 0:
time.sleep(sleep_time)
logger.info(f"[ACTOR] Actor thread shutting down. Total actions executed: {action_count}")
except Exception as e:
logger.error(f"[ACTOR] Fatal exception: {e}")
logger.error(traceback.format_exc())
shutdown_event.set()
sys.exit(1)
# ============================================================================
# Main Evaluation Function
# ============================================================================
def _apply_torch_compile(policy, cfg: OpenArmsRTCEvalConfig):
"""Apply torch.compile to the policy's predict_action_chunk method."""
if policy.name in ["pi05", "pi0"]:
return policy
try:
if not hasattr(torch, "compile"):
logger.warning(
f"torch.compile not available. Requires PyTorch 2.0+. "
f"Current version: {torch.__version__}. Skipping compilation."
)
return policy
logger.info("Applying torch.compile to predict_action_chunk...")
compile_kwargs = {
"backend": cfg.torch_compile_backend,
"mode": cfg.torch_compile_mode,
}
if cfg.torch_compile_disable_cudagraphs:
compile_kwargs["options"] = {"triton.cudagraphs": False}
original_method = policy.predict_action_chunk
compiled_method = torch.compile(original_method, **compile_kwargs)
policy.predict_action_chunk = compiled_method
logger.info("Successfully compiled predict_action_chunk")
except Exception as e:
logger.error(f"Failed to apply torch.compile: {e}")
logger.warning("Continuing without torch.compile")
return policy
@parser.wrap()
def main(cfg: OpenArmsRTCEvalConfig):
"""Main evaluation function with RTC."""
init_logging()
print("=" * 60)
print("OpenArms Policy Evaluation with RTC")
print("=" * 60)
print(f"\nModel: {cfg.model_id}")
print(f"Evaluation Dataset: {cfg.eval_dataset_id}")
print(f"Task: {cfg.task}")
print(f"Episodes: {cfg.num_episodes}")
print(f"Episode Duration: {cfg.episode_time_sec}s")
print(f"RTC Enabled: {cfg.rtc.enabled}")
print(f"RTC Execution Horizon: {cfg.rtc.execution_horizon}")
print(f"RTC Max Guidance Weight: {cfg.rtc.max_guidance_weight}")
print(f"Device: {cfg.device}")
print("=" * 60)
signal_handler = ProcessSignalHandler(use_threads=True, display_pid=False)
shutdown_event = signal_handler.shutdown_event
episode_active = Event()
# Initialize Robot
follower_config = OpenArmsFollowerConfig(
port_left=cfg.follower_left_port,
port_right=cfg.follower_right_port,
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0,
cameras=DEFAULT_CAMERA_CONFIG,
)
follower = OpenArmsFollower(follower_config)
follower.connect(calibrate=False)
if not follower.is_connected:
raise RuntimeError("Follower robot failed to connect!")
robot = RobotWrapper(follower)
logger.info("Follower robot connected")
# Build Processors and Dataset Features
teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors()
action_features_hw = {}
for key, value in follower.action_features.items():
if key.endswith(".pos"):
action_features_hw[key] = value
dataset_features = combine_feature_dicts(
aggregate_pipeline_dataset_features(
pipeline=teleop_action_processor,
initial_features=create_initial_features(action=action_features_hw),
use_videos=True,
),
aggregate_pipeline_dataset_features(
pipeline=robot_observation_processor,
initial_features=create_initial_features(observation=follower.observation_features),
use_videos=True,
),
)
# Create or Load Dataset
dataset = None
dataset_lock = Lock()
if cfg.record_dataset:
dataset_path = Path.home() / ".cache" / "huggingface" / "lerobot" / cfg.eval_dataset_id
if dataset_path.exists():
logger.info(f"Evaluation dataset exists at: {dataset_path}")
logger.info("New episodes will be appended.")
choice = input("Continue? (y/n): ").strip().lower()
if choice != "y":
logger.info("Aborting evaluation.")
follower.disconnect()
return
dataset = LeRobotDataset.create(
repo_id=cfg.eval_dataset_id,
fps=int(cfg.fps),
features=dataset_features,
robot_type=follower.name,
use_videos=True,
image_writer_processes=0,
image_writer_threads=12,
)
logger.info(f"Dataset created: {cfg.eval_dataset_id}")
# Load Policy
logger.info(f"Loading policy from: {cfg.model_id}")
policy_class = get_policy_class(cfg.policy.type)
config = PreTrainedConfig.from_pretrained(cfg.policy.pretrained_path)
if cfg.policy.type in ["pi05", "pi0"]:
config.compile_model = cfg.use_torch_compile
policy = policy_class.from_pretrained(cfg.policy.pretrained_path, config=config)
policy.config.rtc_config = cfg.rtc
policy.init_rtc_processor()
assert policy.name in ["smolvla", "pi05", "pi0"], "Only smolvla, pi05, and pi0 are supported for RTC"
policy = policy.to(cfg.device)
policy.eval()
if cfg.use_torch_compile:
policy = _apply_torch_compile(policy, cfg)
logger.info(f"Policy loaded: {policy.name}")
# Create Action Queue and Start Threads
action_queue = ActionQueue(cfg.rtc)
get_actions_t = Thread(
target=get_actions_thread,
args=(
policy,
robot,
robot_observation_processor,
action_queue,
shutdown_event,
cfg,
episode_active,
),
daemon=True,
name="GetActions",
)
get_actions_t.start()
logger.info("Started action generation thread")
actor_t = Thread(
target=actor_thread,
args=(
robot,
robot_action_processor,
action_queue,
shutdown_event,
cfg,
episode_active,
dataset,
dataset_lock,
teleop_action_processor,
robot_observation_processor,
),
daemon=True,
name="Actor",
)
actor_t.start()
logger.info("Started actor thread")
# Run Evaluation Episodes
episode_idx = 0
try:
while episode_idx < cfg.num_episodes and not shutdown_event.is_set():
log_say(f"Evaluating episode {episode_idx + 1} of {cfg.num_episodes}")
logger.info(f"\n{'='*40}")
logger.info(f"Episode {episode_idx + 1} / {cfg.num_episodes}")
logger.info(f"{'='*40}")
action_queue = ActionQueue(cfg.rtc)
episode_active.set()
episode_start_time = time.time()
while (time.time() - episode_start_time) < cfg.episode_time_sec:
if shutdown_event.is_set():
break
elapsed = time.time() - episode_start_time
if int(elapsed) % 10 == 0 and int(elapsed) > 0:
logger.info(
f"[MAIN] Episode progress: {elapsed:.0f}/{cfg.episode_time_sec}s, "
f"queue_size={action_queue.qsize()}"
)
time.sleep(0.5)
episode_active.clear()
logger.info(f"Episode {episode_idx + 1} completed")
if cfg.record_dataset and dataset is not None:
with dataset_lock:
if dataset.episode_buffer is not None and dataset.episode_buffer.get("size", 0) > 0:
logger.info(
f"Saving episode {episode_idx + 1} "
f"({dataset.episode_buffer['size']} frames)"
)
dataset.save_episode()
episode_idx += 1
# Manual reset between episodes
if not shutdown_event.is_set() and episode_idx < cfg.num_episodes:
log_say("Waiting for manual reset")
logger.info("Manually reset the environment and press ENTER to continue")
input("Press ENTER when ready...")
logger.info(f"Evaluation complete! {episode_idx} episodes recorded")
log_say("Evaluation complete", blocking=True)
except KeyboardInterrupt:
logger.info("\n\nEvaluation interrupted by user")
finally:
shutdown_event.set()
episode_active.clear()
if get_actions_t.is_alive():
logger.info("Waiting for action generation thread to finish...")
get_actions_t.join(timeout=5.0)
if actor_t.is_alive():
logger.info("Waiting for actor thread to finish...")
actor_t.join(timeout=5.0)
follower.disconnect()
logger.info("Follower disconnected")
if cfg.record_dataset and dataset is not None:
dataset.finalize()
if cfg.push_to_hub:
logger.info("Uploading to Hugging Face Hub...")
dataset.push_to_hub(private=True)
logger.info("Cleanup completed")
if __name__ == "__main__":
main()
-216
View File
@@ -1,216 +0,0 @@
import time
import numpy as np
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
# Friction model parameters from OpenArms config/follower.yaml
# τ_fric(ω) = Fo + Fv·ω + Fc·tanh(k·ω)
# For 8 motors: [joint_1, joint_2, joint_3, joint_4, joint_5, joint_6, joint_7, gripper]
FRICTION_PARAMS = {
"Fc": [0.306, 0.306, 0.40, 0.166, 0.050, 0.093, 0.172, 0.0512], # Coulomb friction [Nm]
"k": [28.417, 28.417, 29.065, 130.038, 151.771, 242.287, 7.888, 4.000], # tanh steepness
"Fv": [0.063, 0.0630, 0.604, 0.813, 0.029, 0.072, 0.084, 0.084], # Viscous friction [Nm·s/rad]
"Fo": [0.088, 0.088, 0.008, -0.058, 0.005, 0.009, -0.059, -0.050], # Offset torque [Nm]
}
# Constants from OpenArms C++ implementation
AMP_TMP = 1.0
COEF_TMP = 0.1
FRICTION_SCALE = 1.0 # OpenArms C++ uses 0.3 factor in unilateral mode
DAMPING_KD = [0.5, 0.5, 0.5, 0.5, 0.1, 0.1, 0.1, 0.1] # Damping gains for stability
def compute_friction_torque(velocity_rad_per_sec: float, motor_index: int) -> float:
"""
Compute friction torque for a single motor using the tanh friction model.
Args:
velocity_rad_per_sec: Angular velocity in rad/s
motor_index: Index of the motor (0-7)
Returns:
Friction torque in N·m (scaled for stability)
"""
Fc = FRICTION_PARAMS["Fc"][motor_index]
k = FRICTION_PARAMS["k"][motor_index]
Fv = FRICTION_PARAMS["Fv"][motor_index]
Fo = FRICTION_PARAMS["Fo"][motor_index]
# Friction model: τ_fric = amp * Fc * tanh(coef * k * ω) + Fv * ω + Fo
friction_torque = (
AMP_TMP * Fc * np.tanh(COEF_TMP * k * velocity_rad_per_sec) +
Fv * velocity_rad_per_sec +
Fo
)
# Scale down friction compensation for stability at lower control rates
# (OpenArms C++ uses 0.3 factor in unilateral mode)!!
friction_torque *= FRICTION_SCALE
return friction_torque
def main() -> None:
config = OpenArmsFollowerConfig(
port_left="can0",
port_right="can1",
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=5.0,
)
print("Initializing robot...")
follower = OpenArmsFollower(config)
follower.connect(calibrate=True)
print(f"Applying friction compensation")
print(" 1. Support the arm before starting")
print(" 2. The arm will be held in place by friction compensation")
print(" 3. You should be able to move it with gentle force")
print("\nPress ENTER when ready to start...")
input()
print(f"✓ Motors enabled")
print("\nStarting friction compensation loop...")
print("Press Ctrl+C to stop\n")
loop_times = []
last_print_time = time.perf_counter()
# Motor name to index mapping
motor_name_to_index = {
"joint_1": 0,
"joint_2": 1,
"joint_3": 2,
"joint_4": 3,
"joint_5": 4,
"joint_6": 5,
"joint_7": 6,
"gripper": 7,
}
try:
while True:
loop_start = time.perf_counter()
# Get current joint positions and velocities from robot
obs = follower.get_observation()
# Extract velocities in degrees per second
velocities_deg_per_sec = {}
positions_deg = {}
for motor in follower.bus_right.motors:
vel_key = f"right_{motor}.vel"
pos_key = f"right_{motor}.pos"
if vel_key in obs:
velocities_deg_per_sec[f"right_{motor}"] = obs[vel_key]
if pos_key in obs:
positions_deg[f"right_{motor}"] = obs[pos_key]
for motor in follower.bus_left.motors:
vel_key = f"left_{motor}.vel"
pos_key = f"left_{motor}.pos"
if vel_key in obs:
velocities_deg_per_sec[f"left_{motor}"] = obs[vel_key]
if pos_key in obs:
positions_deg[f"left_{motor}"] = obs[pos_key]
# Convert velocities to rad/s and compute friction torques
friction_torques_nm = {}
for motor_full_name, velocity_deg_per_sec in velocities_deg_per_sec.items():
# Extract motor name without arm prefix
if motor_full_name.startswith("right_"):
motor_name = motor_full_name.removeprefix("right_")
elif motor_full_name.startswith("left_"):
motor_name = motor_full_name.removeprefix("left_")
else:
continue
# Get motor index for friction parameters
motor_index = motor_name_to_index.get(motor_name, 0)
# Convert velocity to rad/s
velocity_rad_per_sec = np.deg2rad(velocity_deg_per_sec)
# Compute friction torque
friction_torque = compute_friction_torque(velocity_rad_per_sec, motor_index)
friction_torques_nm[motor_full_name] = friction_torque
# Apply friction compensation to right arm (all joints INCLUDING gripper)
for motor in follower.bus_right.motors:
full_name = f"right_{motor}"
position = positions_deg.get(full_name, 0.0)
torque = friction_torques_nm.get(full_name, 0.0)
# Get motor index for damping gain
motor_index = motor_name_to_index.get(motor, 0)
kd = DAMPING_KD[motor_index]
# Send MIT control command with friction compensation + damping
follower.bus_right._mit_control(
motor=motor,
kp=0.0, # No position control
kd=kd, # Add damping for stability
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque
)
# Apply friction compensation to left arm (all joints INCLUDING gripper)
for motor in follower.bus_left.motors:
full_name = f"left_{motor}"
position = positions_deg.get(full_name, 0.0)
torque = friction_torques_nm.get(full_name, 0.0)
# Get motor index for damping gain
motor_index = motor_name_to_index.get(motor, 0)
kd = DAMPING_KD[motor_index]
# Send MIT control command with friction compensation + damping
follower.bus_left._mit_control(
motor=motor,
kp=0.0, # No position control
kd=kd, # Add damping for stability
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque
)
# Measure loop time
loop_end = time.perf_counter()
loop_time = loop_end - loop_start
loop_times.append(loop_time)
# Print status every 2 seconds
if loop_end - last_print_time >= 2.0:
if loop_times:
avg_time = sum(loop_times) / len(loop_times)
current_hz = 1.0 / avg_time if avg_time > 0 else 0
print(f"{current_hz:.1f} Hz")
loop_times = []
last_print_time = loop_end
time.sleep(0.001)
except KeyboardInterrupt:
print("\n\nStopping friction compensation...")
finally:
print("\nDisabling all motors and disconnecting...")
follower.bus_right.disable_torque()
follower.bus_left.disable_torque()
time.sleep(0.1)
follower.disconnect()
print("✓ Safe shutdown complete")
if __name__ == "__main__":
main()
-142
View File
@@ -1,142 +0,0 @@
import time
import numpy as np
import pinocchio as pin
from os.path import join, dirname, exists, expanduser
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
def main() -> None:
config = OpenArmsFollowerConfig(
port_left="can0",
port_right="can1",
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=5.0,
)
print("Initializing robot...")
follower = OpenArmsFollower(config)
follower.connect(calibrate=True)
# Load URDF for Pinocchio dynamics
urdf_path = "/home/croissant/Documents/openarm_description/openarm_bimanual_pybullet.urdf"
pin_robot = pin.RobotWrapper.BuildFromURDF(urdf_path, dirname(urdf_path))
pin_robot.data = pin_robot.model.createData()
print(f"✓ Loaded Pinocchio model with {pin_robot.nq} DoFs")
follower.pin_robot = pin_robot
print(f"Applying gravity compensation")
print(" 1. Support the arm before starting")
print(" 2. The arm will be held in place by gravity compensation")
print(" 3. You should be able to move it with gentle force")
print("\nPress ENTER when ready to start...")
input()
print(f"✓ Motors enabled")
print("\nStarting gravity compensation loop...")
print("Press Ctrl+C to stop\n")
loop_times = []
last_print_time = time.perf_counter()
try:
while True:
loop_start = time.perf_counter()
# Get current joint positions from robot
obs = follower.get_observation()
# Extract positions in degrees
positions_deg = {}
for motor in follower.bus_right.motors:
key = f"right_{motor}.pos"
if key in obs:
positions_deg[f"right_{motor}"] = obs[key]
for motor in follower.bus_left.motors:
key = f"left_{motor}.pos"
if key in obs:
positions_deg[f"left_{motor}"] = obs[key]
# Convert to radians and calculate gravity torques
# Use the built-in method from OpenArmsFollower
positions_rad = {k: np.deg2rad(v) for k, v in positions_deg.items()}
torques_nm = follower._gravity_from_q(positions_rad)
# Apply gravity compensation to right arm (all joints except gripper)
for motor in follower.bus_right.motors:
if motor == "gripper":
continue # Skip gripper
full_name = f"right_{motor}"
position = positions_deg.get(full_name, 0.0)
torque = torques_nm.get(full_name, 0.0)
# Send MIT control command with gravity compensation torque
follower.bus_right._mit_control(
motor=motor,
kp=0.0, # No position control
kd=0.0, # No velocity damping
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque
)
# Apply gravity compensation to left arm (all joints except gripper)
for motor in follower.bus_left.motors:
if motor == "gripper":
continue # Skip gripper
full_name = f"left_{motor}"
position = positions_deg.get(full_name, 0.0)
torque = torques_nm.get(full_name, 0.0)
# Send MIT control command with gravity compensation torque
follower.bus_left._mit_control(
motor=motor,
kp=0.0, # No position control
kd=0.0, # No velocity damping
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque
)
# Measure loop time
loop_end = time.perf_counter()
loop_time = loop_end - loop_start
loop_times.append(loop_time)
# Print status every 2 seconds
if loop_end - last_print_time >= 2.0:
if loop_times:
avg_time = sum(loop_times) / len(loop_times)
current_hz = 1.0 / avg_time if avg_time > 0 else 0
print(f"{current_hz:.1f} Hz ({avg_time*1000:.1f} ms)")
loop_times = []
last_print_time = loop_end
time.sleep(0.005)
except KeyboardInterrupt:
print("\n\nStopping gravity compensation...")
finally:
print("\nDisabling all motors and disconnecting...")
follower.bus_right.disable_torque()
follower.bus_left.disable_torque()
time.sleep(0.1)
follower.disconnect()
print("✓ Safe shutdown complete")
if __name__ == "__main__":
main()
@@ -1,395 +0,0 @@
"""
OpenArms Dataset Recording with Gravity + Friction Compensation
Records a dataset using OpenArms follower robot with leader teleoperator.
Leader arms have gravity and friction compensation for weightless, easy movement.
Includes 3 cameras: left wrist, right wrist, and base camera.
Uses the same compensation approach as teleop_with_compensation.py
"""
import shutil
import time
from pathlib import Path
import numpy as np
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.teleoperators.openarms.config_openarms_leader import OpenArmsLeaderConfig
from lerobot.teleoperators.openarms.openarms_leader import OpenArmsLeader
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
# Recording parameters
NUM_EPISODES = 1
FPS = 30
EPISODE_TIME_SEC = 600
RESET_TIME_SEC = 120
TASK_DESCRIPTION = "OpenArms task description"
# Friction compensation scale factor (1.0 = full, 0.3 = 30% for stability)
FRICTION_SCALE = 1.0
def record_loop_with_compensation(
robot,
leader,
events,
fps,
dataset,
dataset_features,
control_time_s,
single_task,
display_data=True,
):
"""
Custom record loop that applies gravity + friction compensation to leader.
Based on record_loop but with integrated compensation.
"""
dt = 1 / fps
episode_start_time = time.perf_counter()
# All joints (both arms)
all_joints = []
for motor in leader.bus_right.motors:
all_joints.append(f"right_{motor}")
for motor in leader.bus_left.motors:
all_joints.append(f"left_{motor}")
while True:
loop_start = time.perf_counter()
elapsed = loop_start - episode_start_time
# Check if we should exit
if elapsed >= control_time_s or events["exit_early"] or events["stop_recording"]:
break
# Get leader state
leader_action = leader.get_action()
# Extract positions and velocities in degrees
leader_positions_deg = {}
leader_velocities_deg_per_sec = {}
for motor in leader.bus_right.motors:
pos_key = f"right_{motor}.pos"
vel_key = f"right_{motor}.vel"
if pos_key in leader_action:
leader_positions_deg[f"right_{motor}"] = leader_action[pos_key]
if vel_key in leader_action:
leader_velocities_deg_per_sec[f"right_{motor}"] = leader_action[vel_key]
for motor in leader.bus_left.motors:
pos_key = f"left_{motor}.pos"
vel_key = f"left_{motor}.vel"
if pos_key in leader_action:
leader_positions_deg[f"left_{motor}"] = leader_action[pos_key]
if vel_key in leader_action:
leader_velocities_deg_per_sec[f"left_{motor}"] = leader_action[vel_key]
# Calculate gravity torques for leader using built-in method
leader_positions_rad = {k: np.deg2rad(v) for k, v in leader_positions_deg.items()}
leader_gravity_torques_nm = leader._gravity_from_q(leader_positions_rad)
# Calculate friction torques for leader using built-in method
leader_velocities_rad_per_sec = {k: np.deg2rad(v) for k, v in leader_velocities_deg_per_sec.items()}
leader_friction_torques_nm = leader._friction_from_velocity(
leader_velocities_rad_per_sec,
friction_scale=FRICTION_SCALE
)
# Combine gravity + friction torques
leader_total_torques_nm = {}
for motor_name in leader_gravity_torques_nm:
gravity = leader_gravity_torques_nm.get(motor_name, 0.0)
friction = leader_friction_torques_nm.get(motor_name, 0.0)
leader_total_torques_nm[motor_name] = gravity + friction
# Apply gravity + friction compensation to leader RIGHT arm (all joints including gripper)
for motor in leader.bus_right.motors:
full_name = f"right_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
# Get damping gain for stability
kd = leader.get_damping_kd(motor)
leader.bus_right._mit_control(
motor=motor,
kp=0.0,
kd=kd, # Add damping for stability
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque,
)
# Apply gravity + friction compensation to leader LEFT arm (all joints including gripper)
for motor in leader.bus_left.motors:
full_name = f"left_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
# Get damping gain for stability
kd = leader.get_damping_kd(motor)
leader.bus_left._mit_control(
motor=motor,
kp=0.0,
kd=kd, # Add damping for stability
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque,
)
# Send leader positions to follower (both arms)
follower_action = {}
for joint in all_joints:
pos_key = f"{joint}.pos"
if pos_key in leader_action:
follower_action[pos_key] = leader_action[pos_key]
# Send action to robot
if follower_action:
robot.send_action(follower_action)
# Get observation from robot (includes camera images)
observation = robot.get_observation()
# Add to dataset if we have a dataset
if dataset is not None:
# Build properly formatted observation frame
obs_frame = build_dataset_frame(dataset_features, observation, prefix="observation")
# Build properly formatted action frame (keep .pos suffix - it matches the feature names)
action_frame = build_dataset_frame(dataset_features, follower_action, prefix="action")
# Combine into single frame
frame = {**obs_frame, **action_frame}
# Add metadata (task is required, timestamp will be auto-calculated by add_frame)
frame["task"] = single_task
dataset.add_frame(frame)
# Display data if requested
if display_data:
log_rerun_data(observation=observation, action=follower_action)
# Maintain loop rate
loop_duration = time.perf_counter() - loop_start
sleep_time = dt - loop_duration
if sleep_time > 0:
time.sleep(sleep_time)
def main():
"""Main recording loop with gravity compensation."""
print("=" * 70)
print("OpenArms Dataset Recording with Compensation")
print("=" * 70)
# Create camera configurations (3 cameras: left wrist, right wrist, base)
# Using actual device paths found by lerobot-find-cameras opencv
camera_config = {
"left_wrist": OpenCVCameraConfig(index_or_path="/dev/video0", width=640, height=480, fps=FPS),
"right_wrist": OpenCVCameraConfig(index_or_path="/dev/video1", width=640, height=480, fps=FPS),
"base": OpenCVCameraConfig(index_or_path="/dev/video7", width=640, height=480, fps=FPS),
}
# Configure follower robot with cameras
follower_config = OpenArmsFollowerConfig(
port_left="can2",
port_right="can3",
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0,
cameras=camera_config,
)
# Configure leader teleoperator (no cameras needed)
leader_config = OpenArmsLeaderConfig(
port_left="can0",
port_right="can1",
can_interface="socketcan",
id="openarms_leader",
manual_control=False, # Enable torque control for gravity compensation
)
# Initialize robot and teleoperator
print("\nInitializing devices...")
follower = OpenArmsFollower(follower_config)
leader = OpenArmsLeader(leader_config)
# Connect devices
print("Connecting and calibrating...")
follower.connect(calibrate=True)
leader.connect(calibrate=True)
# Verify URDF is loaded for gravity compensation
if leader.pin_robot is None:
raise RuntimeError("URDF model not loaded on leader. Gravity compensation not available.")
# Configure the dataset features
# For actions, we only want to record positions (not velocity or torque)
action_features_hw = {}
for key, value in follower.action_features.items():
if key.endswith(".pos"):
action_features_hw[key] = value
action_features = hw_to_dataset_features(action_features_hw, "action")
obs_features = hw_to_dataset_features(follower.observation_features, "observation")
dataset_features = {**action_features, **obs_features}
# Create the dataset
print("\nCreating dataset...")
repo_id = "<hf_username>/<dataset_repo_id>" # TODO: Replace with your Hugging Face repo
# Check if dataset already exists and prompt user
dataset_path = Path.home() / ".cache" / "huggingface" / "lerobot" / repo_id
while dataset_path.exists():
print(f"\nDataset already exists at: {dataset_path}")
print("\nOptions:")
print(" 1. Overwrite existing dataset")
print(" 2. Use a different name")
print(" 3. Abort")
choice = input("\nEnter your choice (1/2/3): ").strip()
if choice == '1':
print(f"Removing existing dataset...")
shutil.rmtree(dataset_path)
print("✓ Existing dataset removed")
break
elif choice == '2':
print("\nCurrent repo_id:", repo_id)
new_repo_id = input("Enter new repo_id (format: <username>/<dataset_name>): ").strip()
if new_repo_id and '/' in new_repo_id:
repo_id = new_repo_id
dataset_path = Path.home() / ".cache" / "huggingface" / "lerobot" / repo_id
print(f"✓ Using new repo_id: {repo_id}")
# Loop will continue if this new path also exists
else:
print("Invalid repo_id format. Please use format: <username>/<dataset_name>")
elif choice == '3':
print("Aborting. Please remove the existing dataset manually or restart with a different repo_id.")
follower.disconnect()
leader.disconnect()
return
else:
print("Invalid choice. Please enter 1, 2, or 3.")
dataset = LeRobotDataset.create(
repo_id=repo_id,
fps=FPS,
features=dataset_features,
robot_type=follower.name,
use_videos=True,
image_writer_threads=4,
)
# Initialize keyboard listener and visualization
_, events = init_keyboard_listener()
init_rerun(session_name="openarms_recording")
# Enable motors on both leader arms for gravity compensation
leader.bus_right.enable_torque()
leader.bus_left.enable_torque()
time.sleep(0.1)
print("\n" + "=" * 70)
print(f"Recording {NUM_EPISODES} episodes")
print(f"Task: {TASK_DESCRIPTION}")
print("=" * 70)
print("\nLeader BOTH arms: Gravity + Friction comp | Follower BOTH arms: Teleop")
print("\nKeyboard controls:")
print(" - Press 'q' to stop recording")
print(" - Press 'r' to re-record current episode")
print("=" * 70)
episode_idx = 0
try:
while episode_idx < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}")
# Record episode with compensation active
record_loop_with_compensation(
robot=follower,
leader=leader,
events=events,
fps=FPS,
dataset=dataset,
dataset_features=dataset_features,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
)
# Reset the environment if not stopping or re-recording
if not events["stop_recording"] and (episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]):
log_say("Reset the environment")
record_loop_with_compensation(
robot=follower,
leader=leader,
events=events,
fps=FPS,
dataset=None, # Don't save reset period
dataset_features=dataset_features,
control_time_s=RESET_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
)
# Handle re-recording
if events["rerecord_episode"]:
log_say("Re-recording episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Only save episode if frames were recorded
if dataset.episode_buffer is not None and dataset.episode_buffer["size"] > 0:
dataset.save_episode()
episode_idx += 1
else:
log_say("No frames recorded, skipping episode save")
# Clear the empty buffer
dataset.episode_buffer = None
except KeyboardInterrupt:
print("\n\nStopping recording...")
finally:
# Clean up
log_say("Stop recording")
try:
leader.bus_right.disable_torque()
leader.bus_left.disable_torque()
time.sleep(0.1)
leader.disconnect()
follower.disconnect()
print("✓ Shutdown complete")
except Exception as e:
print(f"Shutdown error: {e}")
# Upload dataset
print("\nUploading dataset to Hugging Face Hub...")
try:
dataset.push_to_hub()
print("✓ Dataset uploaded successfully")
except Exception as e:
print(f"Warning: Failed to upload dataset: {e}")
print("You can manually upload later using: dataset.push_to_hub()")
print("✓ Recording complete!")
if __name__ == "__main__":
main()
-166
View File
@@ -1,166 +0,0 @@
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenArms Dataset Replay Example
Replays position actions from a recorded dataset on an OpenArms follower robot.
Only position commands (ending with .pos) are replayed, not velocity or torque.
Example usage:
python examples/openarms/replay.py
"""
import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.utils.constants import ACTION
from lerobot.utils.robot_utils import busy_wait
from lerobot.utils.utils import log_say
# Configuration
EPISODE_IDX = 0
DATASET_REPO_ID = "lerobot-data-collection/replay-this-2025-11-02-17-58" # TODO: Replace with your dataset
DATASET_ROOT = None # Use default cache location, or specify custom path
# Robot configuration - adjust these to match your setup
ROBOT_CONFIG = OpenArmsFollowerConfig(
port_left="can2", # CAN interface for left arm
port_right="can3", # CAN interface for right arm
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0, # Safety limit: max degrees to move per step
)
def main():
"""Main replay function."""
print("=" * 70)
print("OpenArms Dataset Replay")
print("=" * 70)
print(f"\nDataset: {DATASET_REPO_ID}")
print(f"Episode: {EPISODE_IDX}")
print(f"Robot: {ROBOT_CONFIG.id}")
print(f" Left arm: {ROBOT_CONFIG.port_left}")
print(f" Right arm: {ROBOT_CONFIG.port_right}")
print("\n" + "=" * 70)
# Initialize the robot
print("\n[1/3] Initializing robot...")
robot = OpenArmsFollower(ROBOT_CONFIG)
# Load the dataset
print(f"\n[2/3] Loading dataset '{DATASET_REPO_ID}'...")
dataset = LeRobotDataset(
DATASET_REPO_ID,
root=DATASET_ROOT,
episodes=[EPISODE_IDX]
)
# Filter dataset to only include frames from the specified episode
# (required for dataset V3.0 where episodes are chunked)
episode_frames = dataset.hf_dataset.filter(
lambda x: x["episode_index"] == EPISODE_IDX
)
if len(episode_frames) == 0:
raise ValueError(
f"No frames found for episode {EPISODE_IDX} in dataset {DATASET_REPO_ID}"
)
print(f" Found {len(episode_frames)} frames in episode {EPISODE_IDX}")
# Extract action features from dataset
action_features = dataset.features.get(ACTION, {})
action_names = action_features.get("names", [])
# Filter to only position actions (ending with .pos)
position_action_names = [name for name in action_names if name.endswith(".pos")]
if not position_action_names:
raise ValueError(
f"No position actions found in dataset. Action names: {action_names}"
)
print(f" Found {len(position_action_names)} position actions to replay")
print(f" Actions: {', '.join(position_action_names[:5])}{'...' if len(position_action_names) > 5 else ''}")
# Select only action columns from dataset
actions = episode_frames.select_columns(ACTION)
# Connect to the robot
print(f"\n[3/3] Connecting to robot...")
robot.connect(calibrate=False) # Skip calibration for replay
if not robot.is_connected:
raise RuntimeError("Robot failed to connect!")
print("\n" + "=" * 70)
print("Ready to replay!")
print("=" * 70)
print("\nThe robot will replay the recorded positions.")
print("Press Ctrl+C to stop at any time.\n")
input("Press ENTER to start replaying...")
# Replay loop
log_say(f"Replaying episode {EPISODE_IDX}", blocking=True)
try:
for idx in range(len(episode_frames)):
loop_start = time.perf_counter()
# Extract action array from dataset
action_array = actions[idx][ACTION]
# Build action dictionary, but only include position actions
action = {}
for i, name in enumerate(action_names):
# Only include position actions (ending with .pos)
if name.endswith(".pos"):
action[name] = float(action_array[i])
# Send action to robot
robot.send_action(action)
# Maintain replay rate (use dataset fps)
loop_duration = time.perf_counter() - loop_start
dt_s = 1.0 / dataset.fps - loop_duration
busy_wait(dt_s)
# Progress indicator every 100 frames
if (idx + 1) % 100 == 0:
progress = (idx + 1) / len(episode_frames) * 100
print(f"Progress: {idx + 1}/{len(episode_frames)} frames ({progress:.1f}%)")
print(f"\n✓ Successfully replayed {len(episode_frames)} frames")
log_say("Replay complete", blocking=True)
except KeyboardInterrupt:
print("\n\nReplay interrupted by user")
finally:
# Disconnect robot
print("\nDisconnecting robot...")
robot.disconnect()
print("✓ Replay complete!")
if __name__ == "__main__":
main()
-73
View File
@@ -1,73 +0,0 @@
#!/bin/bash
# Setup all OpenArms CAN interfaces with CAN FD
set -e
echo "=========================================="
echo "OpenArms CAN FD Interface Setup"
echo "=========================================="
echo ""
echo "Mode: CAN FD"
echo " - Nominal bitrate: 1 Mbps"
echo " - Data bitrate: 5 Mbps"
echo ""
echo "Configuring interfaces can0, can1, can2, can3..."
echo ""
# Configure each CAN interface with CAN FD
for i in 0 1 2 3; do
interface="can$i"
# Check if interface exists
if ! ip link show "$interface" &> /dev/null; then
echo "$interface: Not found, skipping"
continue
fi
# Bring down interface
sudo ip link set "$interface" down 2>/dev/null
# Configure CAN FD mode
sudo ip link set "$interface" type can \
bitrate 1000000 \
dbitrate 5000000 \
fd on
# Bring up interface
sudo ip link set "$interface" up
# Verify configuration
if ip link show "$interface" | grep -q "UP"; then
echo "$interface: Configured and UP"
else
echo "$interface: Failed to bring UP"
fi
done
echo ""
echo "=========================================="
echo "Verification"
echo "=========================================="
echo ""
# Show detailed status for each interface
for i in 0 1 2 3; do
interface="can$i"
if ip link show "$interface" &> /dev/null; then
echo "$interface:"
# Show key parameters
ip -d link show "$interface" | grep -E "can|state|bitrate|dbitrate" | head -3
echo ""
fi
done
echo "=========================================="
echo "Setup Complete!"
echo "=========================================="
echo ""
echo "All interfaces configured for CAN FD mode"
echo ""
echo "Next steps:"
echo " 1. Test motors: python debug_can_communication.py"
echo " 2. Run teleoperation: python examples/openarms/teleop.py"
echo ""
-148
View File
@@ -1,148 +0,0 @@
"""
OpenArms Teleoperation Example - Full Dual Arms
This script demonstrates teleoperation of OpenArms follower robot using an OpenArms leader arm.
It first calibrates both devices, then enters a teleoperation loop for both arms.
"""
import time
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.teleoperators.openarms.openarms_leader import OpenArmsLeader
from lerobot.teleoperators.openarms.config_openarms_leader import OpenArmsLeaderConfig
follower_config = OpenArmsFollowerConfig(
port_left="can2", # CAN interface for follower left arm
port_right="can3", # CAN interface for follower right arm
can_interface="socketcan", # Linux SocketCAN
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=5.0, # Safety limit
)
leader_config = OpenArmsLeaderConfig(
port_left="can0", # CAN interface for leader left arm
port_right="can1", # CAN interface for leader right arm
can_interface="socketcan", # Linux SocketCAN
id="openarms_leader",
manual_control=True, # Enable manual control (torque disabled)
)
print("=" * 60)
print("OpenArms Teleoperation - Full Dual Arms")
print("=" * 60)
# Initialize devices
print("\n[1/4] Initializing devices...")
follower = OpenArmsFollower(follower_config)
leader = OpenArmsLeader(leader_config)
# Connect and calibrate follower
print("\n[2/4] Connecting and calibrating follower robot...")
print("Note: If you have existing calibration, just press ENTER to use it.")
follower.connect(calibrate=True)
# Connect and calibrate leader
print("\n[3/4] Connecting and calibrating leader arm...")
print("Note: The leader arm will have torque disabled for manual control.")
leader.connect(calibrate=True)
# Wait for user to be ready
print("\n[4/4] Ready for teleoperation!")
print("\nBoth arms will be controlled (16 motors total):")
print(" RIGHT ARM: joints 1-7 + gripper")
print(" LEFT ARM: joints 1-7 + gripper")
print("\nPress ENTER to start teleoperation...")
input()
print("\nTeleoperation started! Move both leader arms.")
print("Press Ctrl+C to stop.\n")
# All joints for both arms (16 motors total)
all_joints = [
# Right arm
"right_joint_1",
"right_joint_2",
"right_joint_3",
"right_joint_4",
"right_joint_5",
"right_joint_6",
"right_joint_7",
"right_gripper",
# Left arm
"left_joint_1",
"left_joint_2",
"left_joint_3",
"left_joint_4",
"left_joint_5",
"left_joint_6",
"left_joint_7",
"left_gripper",
]
# Performance monitoring
loop_times = []
start_time = time.perf_counter()
last_print_time = start_time
try:
while True:
loop_start = time.perf_counter()
# Get action from leader
leader_action = leader.get_action()
# Filter to only position data for all joints (both arms)
joint_action = {}
for joint in all_joints:
pos_key = f"{joint}.pos"
if pos_key in leader_action:
joint_action[pos_key] = leader_action[pos_key]
# Send action to follower (both arms)
if joint_action:
follower.send_action(joint_action)
# Measure loop time
loop_end = time.perf_counter()
loop_time = loop_end - loop_start
loop_times.append(loop_time)
# Print stats every 2 seconds
if loop_end - last_print_time >= 2.0:
if loop_times:
avg_time = sum(loop_times) / len(loop_times)
current_hz = 1.0 / avg_time if avg_time > 0 else 0
min_time = min(loop_times)
max_time = max(loop_times)
max_hz = 1.0 / min_time if min_time > 0 else 0
min_hz = 1.0 / max_time if max_time > 0 else 0
print(f"[Hz Stats] Avg: {current_hz:.1f} Hz | "
f"Range: {min_hz:.1f}-{max_hz:.1f} Hz | "
f"Avg loop time: {avg_time*1000:.1f} ms")
# Reset for next measurement window
loop_times = []
last_print_time = loop_end
except KeyboardInterrupt:
print("\n\nStopping teleoperation...")
finally:
# Disconnect devices
print("Disconnecting devices...")
try:
follower.disconnect()
except Exception as e:
print(f"Error disconnecting follower: {e}")
try:
leader.disconnect()
except Exception as e:
print(f"Error disconnecting leader: {e}")
print("Done!")
-197
View File
@@ -1,197 +0,0 @@
"""
OpenArms Mini Teleoperation Example
This script demonstrates teleoperation of an OpenArms follower robot using
an OpenArms Mini leader (Feetech-based) with dual arms (16 motors total).
The OpenArms Mini has:
- Right arm: 8 motors (joint_1 to joint_7 + gripper)
- Left arm: 8 motors (joint_1 to joint_7 + gripper)
Note on gripper normalization:
- OpenArms Mini gripper: 0-100 scale (0=closed, 100=open)
- OpenArms follower gripper: degrees (0=closed, -65=open)
- This script automatically converts between the two ranges
"""
import time
import os
import sys
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.teleoperators.openarms_mini.openarms_mini import OpenArmsMini
from lerobot.teleoperators.openarms_mini.config_openarms_mini import OpenArmsMiniConfig
from lerobot.utils.robot_utils import busy_wait
# Target control frequency
TARGET_FPS = 30
# Configure the OpenArms follower (Damiao motors on CAN bus)
follower_config = OpenArmsFollowerConfig(
port_left="can0", # CAN interface for follower left arm
port_right="can1", # CAN interface for follower right arm
can_interface="socketcan", # Linux SocketCAN
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0, # Safety limit (degrees per step)
)
# Configure the OpenArms Mini leader (Feetech motors on serial)
leader_config = OpenArmsMiniConfig(
port_right="/dev/ttyACM0", # Serial port for right arm
port_left="/dev/ttyACM1", # Serial port for left arm
id="openarms_mini",
use_degrees=True,
)
print("OpenArms Mini → OpenArms Follower Teleoperation")
# Initialize devices
follower = OpenArmsFollower(follower_config)
leader = OpenArmsMini(leader_config)
# Connect and calibrate follower
print("Note: If you have existing calibration, just press ENTER to use it.")
follower.connect(calibrate=True)
# Connect and calibrate leader
print("Note: The leader arms will have torque disabled for manual control.")
leader.connect(calibrate=True)
print("\nPress ENTER to start teleoperation...")
input()
print("Press Ctrl+C to stop.\n")
# All joints for both arms (16 motors total)
all_joints = [
# Right arm
"right_joint_1",
"right_joint_2",
"right_joint_3",
"right_joint_4",
"right_joint_5",
"right_joint_6",
"right_joint_7",
"right_gripper",
# Left arm
"left_joint_1",
"left_joint_2",
"left_joint_3",
"left_joint_4",
"left_joint_5",
"left_joint_6",
"left_joint_7",
"left_gripper",
]
# Performance monitoring
loop_times = []
avg_loop_time = 0.0
min_loop_time = float('inf')
max_loop_time = 0.0
stats_update_interval = 1.0 # Update stats every 1 second
last_stats_update = time.perf_counter()
SWAPPED_JOINTS = {
"right_joint_6": "right_joint_7",
"right_joint_7": "right_joint_6",
"left_joint_6": "left_joint_7",
"left_joint_7": "left_joint_6",
}
try:
while True:
loop_start = time.perf_counter()
# Get actions and observations
leader_action = leader.get_action()
follower_obs = follower.get_observation()
joint_action = {}
for joint in all_joints:
leader_key = f"{joint}.pos"
# Determine which follower joint this leader joint controls
follower_joint = SWAPPED_JOINTS.get(joint, joint)
follower_key = f"{follower_joint}.pos"
# Get leader position (default 0 if missing)
pos = leader_action.get(leader_key, 0.0)
# Convert gripper values: Mini uses 0-100, OpenArms uses 0 to -65 degrees
if "gripper" in joint:
# Map 0-100 (Mini) to 0 to -65 (OpenArms)
# 0 (closed) -> 0°, 100 (open) -> -65°
pos = (pos / 100.0) * -65.0
# Store in action dict for follower
joint_action[follower_key] = pos
follower.send_action(joint_action)
# Loop timing
loop_end = time.perf_counter()
loop_time = loop_end - loop_start
loop_times.append(loop_time)
# Update stats periodically
current_time = time.perf_counter()
if current_time - last_stats_update >= stats_update_interval:
if loop_times:
avg_loop_time = sum(loop_times) / len(loop_times)
min_loop_time = min(loop_times)
max_loop_time = max(loop_times)
loop_times = []
last_stats_update = current_time
# Display everything
sys.stdout.write("\033[H\033[J") # Clear screen
# Show timing stats at the top
if avg_loop_time > 0:
avg_hz = 1.0 / avg_loop_time
min_hz = 1.0 / max_loop_time if max_loop_time > 0 else 0
max_hz = 1.0 / min_loop_time if min_loop_time > 0 and min_loop_time < float('inf') else 0
print(f"[Performance] Target: {TARGET_FPS} Hz | Avg: {avg_hz:.1f} Hz | Range: {min_hz:.1f}-{max_hz:.1f} Hz | Loop: {avg_loop_time*1000:.1f} ms\n")
else:
print(f"[Performance] Target: {TARGET_FPS} Hz | Measuring...\n")
# Show joint positions
print(f"{'Joint':<20} {'Leader':>15} {'Follower':>15}")
print(f"{'':20} {'(0-100/deg)':>15} {'(deg)':>15}")
print("-" * 52)
for joint in all_joints:
leader_key = f"{joint}.pos"
follower_joint = SWAPPED_JOINTS.get(joint, joint)
follower_key = f"{follower_joint}.pos"
leader_pos = leader_action.get(leader_key, 0.0)
follower_pos = follower_obs.get(follower_key, 0.0)
print(f"{joint:<20} {leader_pos:>15.2f} {follower_pos:>15.2f}")
# Smart sleep to maintain target FPS
dt_s = time.perf_counter() - loop_start
busy_wait(max(0, 1.0 / TARGET_FPS - dt_s))
except KeyboardInterrupt:
print("\n\nStopping teleoperation...")
finally:
# Disconnect devices
print("Disconnecting devices...")
try:
follower.disconnect()
except Exception as e:
print(f"Error disconnecting follower: {e}")
try:
leader.disconnect()
except Exception as e:
print(f"Error disconnecting leader: {e}")
print("Done!")
@@ -1,202 +0,0 @@
"""
OpenArms Teleoperation with Gravity + Friction Compensation
Leader arms (both LEFT and RIGHT): Gravity + Friction compensation (weightless, easy to move)
Follower arms (both LEFT and RIGHT): Mirror leader movements
Uses the URDF file from the lerobot repository.
"""
import time
import numpy as np
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.teleoperators.openarms.config_openarms_leader import OpenArmsLeaderConfig
from lerobot.teleoperators.openarms.openarms_leader import OpenArmsLeader
# Friction compensation scale factor (1.0 = full, 0.3 = 30% for stability)
FRICTION_SCALE = 1.0
def main():
"""Main teleoperation loop with gravity compensation"""
print("=" * 70)
print("OpenArms Teleoperation with Gravity Compensation")
print("=" * 70)
# Configuration
follower_config = OpenArmsFollowerConfig(
port_left="can2",
port_right="can3",
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0,
)
leader_config = OpenArmsLeaderConfig(
port_left="can0",
port_right="can1",
can_interface="socketcan",
id="openarms_leader",
manual_control=False, # Enable torque control for gravity compensation
)
# Initialize and connect
print("\nInitializing devices...")
follower = OpenArmsFollower(follower_config)
leader = OpenArmsLeader(leader_config)
follower.connect()
leader.connect()
# URDF is automatically loaded in the leader constructor
if leader.pin_robot is None:
raise RuntimeError("URDF model not loaded on leader. Gravity compensation not available.")
print("\nLeader BOTH arms: Gravity + Friction comp | Follower BOTH arms: Teleop")
print("Press ENTER to start...")
input()
# Enable motors on both leader arms for gravity compensation
leader.bus_right.enable_torque()
leader.bus_left.enable_torque()
time.sleep(0.1)
print("Press Ctrl+C to stop\n")
# Main control loop
loop_times = []
last_print_time = time.perf_counter()
# All joints (both arms)
all_joints = []
for motor in leader.bus_right.motors:
all_joints.append(f"right_{motor}")
for motor in leader.bus_left.motors:
all_joints.append(f"left_{motor}")
try:
while True:
loop_start = time.perf_counter()
# Get leader state
leader_action = leader.get_action()
# Extract positions and velocities in degrees
leader_positions_deg = {}
leader_velocities_deg_per_sec = {}
for motor in leader.bus_right.motors:
pos_key = f"right_{motor}.pos"
vel_key = f"right_{motor}.vel"
if pos_key in leader_action:
leader_positions_deg[f"right_{motor}"] = leader_action[pos_key]
if vel_key in leader_action:
leader_velocities_deg_per_sec[f"right_{motor}"] = leader_action[vel_key]
for motor in leader.bus_left.motors:
pos_key = f"left_{motor}.pos"
vel_key = f"left_{motor}.vel"
if pos_key in leader_action:
leader_positions_deg[f"left_{motor}"] = leader_action[pos_key]
if vel_key in leader_action:
leader_velocities_deg_per_sec[f"left_{motor}"] = leader_action[vel_key]
# Calculate gravity torques for leader using built-in method
leader_positions_rad = {k: np.deg2rad(v) for k, v in leader_positions_deg.items()}
leader_gravity_torques_nm = leader._gravity_from_q(leader_positions_rad)
# Calculate friction torques for leader using built-in method
leader_velocities_rad_per_sec = {k: np.deg2rad(v) for k, v in leader_velocities_deg_per_sec.items()}
leader_friction_torques_nm = leader._friction_from_velocity(
leader_velocities_rad_per_sec,
friction_scale=FRICTION_SCALE
)
# Combine gravity + friction torques
leader_total_torques_nm = {}
for motor_name in leader_gravity_torques_nm:
gravity = leader_gravity_torques_nm.get(motor_name, 0.0)
friction = leader_friction_torques_nm.get(motor_name, 0.0)
leader_total_torques_nm[motor_name] = gravity + friction
# Apply gravity + friction compensation to leader RIGHT arm (all joints including gripper)
for motor in leader.bus_right.motors:
full_name = f"right_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
# Get damping gain for stability
kd = leader.get_damping_kd(motor)
leader.bus_right._mit_control(
motor=motor,
kp=0.0,
kd=kd, # Add damping for stability
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque,
)
# Apply gravity + friction compensation to leader LEFT arm (all joints including gripper)
for motor in leader.bus_left.motors:
full_name = f"left_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
# Get damping gain for stability
kd = leader.get_damping_kd(motor)
leader.bus_left._mit_control(
motor=motor,
kp=0.0,
kd=kd, # Add damping for stability
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque,
)
# Send leader positions to follower (both arms)
follower_action = {}
for joint in all_joints:
pos_key = f"{joint}.pos"
if pos_key in leader_action:
follower_action[pos_key] = leader_action[pos_key]
if follower_action:
follower.send_action(follower_action)
# Performance monitoring
loop_end = time.perf_counter()
loop_time = loop_end - loop_start
loop_times.append(loop_time)
if loop_end - last_print_time >= 2.0:
if loop_times:
avg_time = sum(loop_times) / len(loop_times)
current_hz = 1.0 / avg_time if avg_time > 0 else 0
print(f"{current_hz:.1f} Hz ({avg_time*1000:.1f} ms)")
loop_times = []
last_print_time = loop_end
except KeyboardInterrupt:
print("\n\nStopping...")
finally:
try:
leader.bus_right.disable_torque()
leader.bus_left.disable_torque()
time.sleep(0.1)
leader.disconnect()
follower.disconnect()
print("✓ Shutdown complete")
except Exception as e:
print(f"Shutdown error: {e}")
if __name__ == "__main__":
main()
-745
View File
@@ -1,745 +0,0 @@
body {
margin: 0;
padding: 0;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, sans-serif;
background: #f5f5f5;
}
main {
min-height: 100vh;
padding: 2rem;
}
header {
text-align: center;
margin-bottom: 2rem;
}
h1 {
font-size: 2rem;
font-weight: 600;
color: #333;
margin: 0;
}
h2 {
font-size: 1.25rem;
font-weight: 600;
color: #333;
margin: 0 0 1rem 0;
}
h3 {
font-size: 0.875rem;
font-weight: 600;
color: #666;
margin: 0 0 0.5rem 0;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.container {
max-width: 1920px;
margin: 0 auto;
display: grid;
grid-template-columns: minmax(500px, 600px) 1fr;
gap: 2rem;
align-items: start;
}
/* Left column container */
.left-column {
display: flex;
flex-direction: column;
gap: 1.5rem;
}
/* Right column container */
.right-column {
display: flex;
flex-direction: column;
gap: 1.5rem;
}
/* Responsive: Stack on smaller screens */
@media (max-width: 1200px) {
.container {
grid-template-columns: 1fr;
}
}
.panel {
background: white;
border-radius: 8px;
padding: 1.5rem;
box-shadow: 0 1px 3px rgba(0,0,0,0.1);
}
.config-panel {
border: 2px solid #e5e7eb;
}
.config-header {
display: flex;
justify-content: space-between;
align-items: center;
cursor: pointer;
user-select: none;
padding: 0.5rem 0;
}
.config-header:hover {
opacity: 0.7;
}
.toggle-icon {
font-size: 1rem;
color: #6b7280;
transition: transform 0.2s;
}
.config-content {
margin-top: 1rem;
padding-top: 1rem;
border-top: 1px solid #e5e7eb;
}
.robot-setup {
margin-bottom: 0.5rem;
}
.robot-status {
display: flex;
align-items: center;
justify-content: space-between;
padding: 1rem;
border-radius: 6px;
font-weight: 500;
gap: 1rem;
}
.robot-status.ready {
background: linear-gradient(135deg, #d1fae5 0%, #a7f3d0 100%);
color: #065f46;
border: 1px solid #10b981;
}
.robot-status.not-ready {
background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%);
color: #92400e;
border: 1px solid #f59e0b;
}
.btn-setup {
background: #10b981;
color: white;
border: none;
padding: 0.5rem 1rem;
border-radius: 4px;
font-size: 0.875rem;
font-weight: 500;
cursor: pointer;
transition: background 0.2s;
}
.btn-setup:hover:not(:disabled) {
background: #059669;
}
.btn-setup:disabled {
background: #d1d5db;
cursor: not-allowed;
}
.btn-zero {
background: #8b5cf6;
color: white;
border: none;
padding: 0.5rem 1rem;
border-radius: 4px;
font-size: 0.875rem;
font-weight: 500;
cursor: pointer;
transition: background 0.2s;
}
.btn-zero:hover:not(:disabled) {
background: #7c3aed;
}
.btn-zero:disabled {
background: #d1d5db;
cursor: not-allowed;
}
.zero-position-section {
margin-top: 1rem;
padding-top: 1rem;
border-top: 1px solid #e5e7eb;
}
.btn-zero-large {
width: 100%;
background: #8b5cf6;
color: white;
border: none;
padding: 0.875rem 1.5rem;
border-radius: 8px;
font-size: 1rem;
font-weight: 600;
cursor: pointer;
transition: all 0.2s;
box-shadow: 0 2px 4px rgba(139, 92, 246, 0.2);
}
.btn-zero-large:hover:not(:disabled) {
background: #7c3aed;
box-shadow: 0 4px 8px rgba(139, 92, 246, 0.3);
transform: translateY(-1px);
}
.btn-zero-large:disabled {
background: #d1d5db;
cursor: not-allowed;
box-shadow: none;
transform: none;
}
.delete-episode-section {
margin-top: 1rem;
padding-top: 1rem;
border-top: 1px solid #e5e7eb;
}
.btn-delete {
width: 100%;
background: #ef4444;
color: white;
border: none;
padding: 0.875rem 1.5rem;
border-radius: 8px;
font-size: 1rem;
font-weight: 600;
cursor: pointer;
transition: all 0.2s;
box-shadow: 0 2px 4px rgba(239, 68, 68, 0.2);
}
.btn-delete:hover:not(:disabled) {
background: #dc2626;
box-shadow: 0 4px 8px rgba(239, 68, 68, 0.3);
transform: translateY(-1px);
}
.btn-delete:disabled {
background: #d1d5db;
cursor: not-allowed;
box-shadow: none;
transform: none;
}
.delete-info {
margin-top: 0.5rem;
font-size: 0.875rem;
color: #666;
text-align: center;
font-style: italic;
}
.btn-disconnect {
background: #ef4444;
color: white;
border: none;
padding: 0.5rem 1rem;
border-radius: 4px;
font-size: 0.875rem;
font-weight: 500;
cursor: pointer;
transition: background 0.2s;
}
.btn-disconnect:hover {
background: #dc2626;
}
.btn-refresh {
background: #3b82f6;
color: white;
border: none;
padding: 0.4rem 0.8rem;
border-radius: 4px;
font-size: 0.75rem;
font-weight: 500;
cursor: pointer;
transition: background 0.2s;
}
.btn-refresh:hover:not(:disabled) {
background: #2563eb;
}
.btn-refresh:disabled {
background: #d1d5db;
cursor: not-allowed;
}
.control-panel {
border: 2px solid #10b981;
}
.status-banner {
display: flex;
align-items: center;
gap: 1rem;
padding: 1rem 1.5rem;
border-radius: 6px;
margin-bottom: 1.5rem;
font-weight: 500;
font-size: 0.95rem;
}
.status-banner.initializing {
background: linear-gradient(135deg, #dbeafe 0%, #bfdbfe 100%);
color: #1e40af;
border-left: 4px solid #3b82f6;
}
.status-banner.encoding {
background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%);
color: #92400e;
border-left: 4px solid #f59e0b;
}
.status-banner.uploading {
background: linear-gradient(135deg, #e0e7ff 0%, #c7d2fe 100%);
color: #3730a3;
border-left: 4px solid #6366f1;
}
.status-banner.success {
background: linear-gradient(135deg, #d1fae5 0%, #a7f3d0 100%);
color: #065f46;
border-left: 4px solid #10b981;
}
.status-banner.warning {
background: linear-gradient(135deg, #fee2e2 0%, #fecaca 100%);
color: #991b1b;
border-left: 4px solid #ef4444;
}
.spinner {
width: 20px;
height: 20px;
border: 3px solid rgba(0, 0, 0, 0.1);
border-top-color: currentColor;
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
.control-horizontal {
display: flex;
flex-direction: column;
gap: 1.5rem;
}
.control-left {
display: flex;
flex-direction: column;
gap: 1rem;
}
.control-right {
display: flex;
align-items: center;
justify-content: center;
}
.input-group {
display: flex;
gap: 0.5rem;
margin-bottom: 0;
}
input[type="text"] {
flex: 1;
padding: 0.75rem;
border: 1px solid #ddd;
border-radius: 4px;
font-size: 1rem;
}
input[type="text"]:disabled {
background: #f5f5f5;
cursor: not-allowed;
}
input[type="text"]:focus {
outline: none;
border-color: #10b981;
}
button {
padding: 0.75rem 1.5rem;
border: none;
border-radius: 4px;
font-size: 1rem;
font-weight: 500;
cursor: pointer;
transition: all 0.2s;
}
.btn-set-task {
background: #3b82f6;
color: white;
min-width: 120px;
}
.btn-set-task:hover:not(:disabled) {
background: #2563eb;
}
.btn-set-task:disabled {
background: #d1d5db;
cursor: not-allowed;
}
.btn-start {
background: #10b981;
color: white;
}
.btn-start:hover:not(:disabled) {
background: #059669;
}
.btn-start:disabled {
background: #d1d5db;
cursor: not-allowed;
}
.btn-stop {
background: #ef4444;
color: white;
}
.btn-stop:hover {
background: #dc2626;
}
.btn-reset {
padding: 0.5rem 1rem;
background: #6b7280;
color: white;
font-size: 0.875rem;
}
.btn-reset:hover {
background: #4b5563;
}
.status {
display: flex;
align-items: center;
gap: 0.75rem;
padding: 1rem;
border-radius: 4px;
margin-bottom: 1rem;
}
.status.recording {
background: #fee2e2;
color: #991b1b;
}
.status.recording.recording-active {
display: flex;
flex-direction: column;
gap: 1rem;
background: #dc2626;
color: white;
padding: 1.5rem;
border: 4px solid #991b1b;
box-shadow: 0 4px 12px rgba(220, 38, 38, 0.4);
font-weight: 700;
font-size: 1rem;
}
.status.recording.recording-active .indicator {
width: 20px;
height: 20px;
background: #fef2f2;
animation: pulse-strong 1s ease-in-out infinite;
}
@keyframes pulse-strong {
0%, 100% {
opacity: 1;
transform: scale(1);
}
50% {
opacity: 0.7;
transform: scale(1.1);
}
}
.status.recording.recording-active .time-display {
display: flex;
flex-direction: column;
gap: 0.5rem;
font-size: 1.5rem;
font-weight: 700;
color: white;
}
.fps-display {
font-size: 1rem;
font-weight: 500;
opacity: 0.95;
}
.fps-warning {
color: #fef2f2;
animation: pulse-warning 1s ease-in-out infinite;
}
@keyframes pulse-warning {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.status.recording.recording-active .btn-stop {
align-self: stretch;
}
.ramp-up-countdown {
display: flex;
justify-content: center;
margin-bottom: 1rem;
}
.countdown-box {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 2rem 3rem;
background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%);
border: 4px solid #f59e0b;
border-radius: 16px;
box-shadow: 0 6px 20px rgba(245, 158, 11, 0.4);
min-width: 280px;
animation: pulse-warm 1.5s ease-in-out infinite;
}
@keyframes pulse-warm {
0%, 100% {
box-shadow: 0 6px 20px rgba(245, 158, 11, 0.4);
}
50% {
box-shadow: 0 6px 25px rgba(245, 158, 11, 0.6);
}
}
.countdown-label {
font-size: 1rem;
color: #92400e;
text-transform: uppercase;
letter-spacing: 1.5px;
font-weight: 800;
margin-bottom: 1rem;
text-align: center;
}
.countdown-value {
font-size: 4.5rem;
font-weight: 900;
color: #d97706;
font-family: 'Courier New', monospace;
line-height: 1;
text-shadow: 2px 2px 6px rgba(0, 0, 0, 0.15);
margin-bottom: 0.5rem;
}
.countdown-subtitle {
font-size: 0.875rem;
color: #78350f;
font-weight: 600;
font-style: italic;
text-align: center;
margin-top: 0.5rem;
}
.status.idle {
background: #f3f4f6;
color: #374151;
}
.indicator {
width: 12px;
height: 12px;
border-radius: 50%;
background: #ef4444;
animation: pulse 1.5s ease-in-out infinite;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.counter {
display: flex;
flex-direction: column;
align-items: center;
gap: 0.75rem;
padding: 1.5rem;
background: linear-gradient(135deg, #f9fafb 0%, #f3f4f6 100%);
border-radius: 8px;
border: 2px solid #e5e7eb;
min-width: 200px;
}
.counter-label {
font-size: 0.75rem;
color: #6b7280;
text-transform: uppercase;
letter-spacing: 0.5px;
font-weight: 600;
}
.counter-value {
font-size: 3rem;
font-weight: 700;
color: #10b981;
line-height: 1;
}
.time-display {
font-size: 1.5rem;
font-weight: 600;
font-family: 'Courier New', monospace;
}
.error-box {
padding: 1rem;
background: #fee2e2;
color: #991b1b;
border-radius: 4px;
border-left: 4px solid #ef4444;
font-size: 0.875rem;
}
.config-section {
margin-bottom: 1.5rem;
}
.config-section:last-child {
margin-bottom: 0;
}
.config-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 1rem;
}
label {
display: flex;
flex-direction: column;
gap: 0.5rem;
font-size: 0.875rem;
color: #374151;
font-weight: 500;
}
select {
padding: 0.5rem;
border: 1px solid #ddd;
border-radius: 4px;
font-size: 0.875rem;
background: white;
}
select:disabled {
background: #f5f5f5;
cursor: not-allowed;
}
/* Camera Layout */
.camera-layout {
display: flex;
flex-direction: column;
gap: 1.5rem;
}
.camera-base {
width: 100%;
}
.camera-wrist-container {
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: 1.5rem;
}
.camera-wrist {
width: 100%;
}
.camera {
border: 1px solid #e5e7eb;
border-radius: 4px;
overflow: hidden;
}
.camera h3 {
padding: 0.75rem;
background: #f9fafb;
border-bottom: 1px solid #e5e7eb;
margin: 0;
}
.camera img {
width: 100%;
height: auto;
display: block;
background: #000;
min-height: 300px;
object-fit: cover;
}
.camera-placeholder {
text-align: center;
padding: 4rem 2rem;
background: #f9fafb;
border-radius: 4px;
border: 2px dashed #d1d5db;
}
.camera-placeholder p {
margin: 0.5rem 0;
font-size: 1rem;
color: #6b7280;
}
.camera-placeholder p:first-child {
font-size: 1.25rem;
font-weight: 500;
color: #374151;
}
.hint {
margin-top: 0.5rem;
font-size: 0.75rem;
color: #6b7280;
display: flex;
align-items: center;
gap: 0.5rem;
flex-wrap: wrap;
}
-857
View File
@@ -1,857 +0,0 @@
import { useState, useEffect, useCallback, useRef } from 'react';
import './App.css';
const API_BASE = 'http://localhost:8000/api';
function App() {
// State
const [task, setTask] = useState('');
const [isRecording, setIsRecording] = useState(false);
const [isInitializing, setIsInitializing] = useState(false);
const [isEncoding, setIsEncoding] = useState(false);
const [isUploading, setIsUploading] = useState(false);
const [robotsReady, setRobotsReady] = useState(false);
const [elapsedTime, setElapsedTime] = useState(0);
const [currentFps, setCurrentFps] = useState(0);
const [loopFps, setLoopFps] = useState(0);
const [episodeCount, setEpisodeCount] = useState(0);
const [error, setError] = useState(null);
const [statusMessage, setStatusMessage] = useState('Ready');
const [uploadStatus, setUploadStatus] = useState(null);
const [rampUpRemaining, setRampUpRemaining] = useState(0);
const [movingToZero, setMovingToZero] = useState(false);
const [configExpanded, setConfigExpanded] = useState(false);
const [latestRepoId, setLatestRepoId] = useState(null);
// Configuration
const [config, setConfig] = useState({
leader_type: 'openarms', // 'openarms' or 'openarms_mini'
leader_left: 'can0',
leader_right: 'can1',
follower_left: 'can2',
follower_right: 'can3',
left_wrist: '/dev/video0',
right_wrist: '/dev/video1',
base: '/dev/video4'
});
// Available options
const [availableCameras, setAvailableCameras] = useState([]);
const [availableUsbPorts, setAvailableUsbPorts] = useState([]);
const canInterfaces = ['can0', 'can1', 'can2', 'can3'];
const statusIntervalRef = useRef(null);
const hasInitializedRef = useRef(false);
const loadConfig = () => {
try {
const saved = localStorage.getItem('openarms_config');
if (saved) {
const loadedConfig = JSON.parse(saved);
setConfig(prev => ({ ...prev, ...loadedConfig }));
}
} catch (e) {
console.error('Load config error:', e);
}
};
const saveConfig = (newConfig) => {
try {
localStorage.setItem('openarms_config', JSON.stringify(newConfig || config));
} catch (e) {
console.error('Save config error:', e);
}
};
// Fetch status periodically
const fetchStatus = async () => {
try {
const response = await fetch(`${API_BASE}/status`);
const data = await response.json();
setIsRecording(data.is_recording);
setIsInitializing(data.is_initializing);
setIsEncoding(data.is_encoding);
setIsUploading(data.is_uploading);
setRobotsReady(data.robots_ready);
setElapsedTime(data.elapsed_time);
setCurrentFps(data.current_fps || 0);
setLoopFps(data.loop_fps || 0);
setEpisodeCount(data.episode_count);
setError(data.error);
setStatusMessage(data.status_message || 'Ready');
setUploadStatus(data.upload_status);
setRampUpRemaining(data.ramp_up_remaining || 0);
setMovingToZero(data.moving_to_zero || false);
// Track the latest repo_id from the backend
if (data.latest_repo_id) {
setLatestRepoId(data.latest_repo_id);
}
if (data.config) {
// Only merge server config if we don't have a saved config (first load)
if (!localStorage.getItem('openarms_config')) {
setConfig(prev => {
const merged = { ...data.config, ...prev };
localStorage.setItem('openarms_config', JSON.stringify(merged));
return merged;
});
}
}
} catch (e) {
console.error('Failed to fetch status:', e);
}
};
const setupRobots = async () => {
// Show warning to verify camera positions
const confirmed = window.confirm(
'⚠️ IMPORTANT: Before connecting robots, please verify:\n\n' +
'📹 Check that cameras are correctly positioned:\n' +
' • LEFT wrist camera is actually on the LEFT arm\n' +
' • RIGHT wrist camera is actually on the RIGHT arm\n' +
' • BASE camera is actually the BASE/overhead camera\n\n' +
'Incorrect camera positioning will result in invalid training data!\n\n' +
'Click OK to continue with robot setup, or Cancel to review configuration.'
);
if (!confirmed) {
return; // User cancelled, don't proceed
}
setError(null);
try {
const response = await fetch(`${API_BASE}/robots/setup`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(config)
});
if (!response.ok) {
const data = await response.json();
throw new Error(data.detail || 'Failed to setup robots');
}
await response.json();
saveConfig(config);
} catch (e) {
setError(`Robot setup failed: ${e.message}`);
}
};
// Disconnect robots
const disconnectRobots = async () => {
try {
await fetch(`${API_BASE}/robots/disconnect`, { method: 'POST' });
setRobotsReady(false);
} catch (e) {
console.error('Failed to disconnect robots:', e);
}
};
// Discover cameras
const discoverCameras = async () => {
try {
const response = await fetch(`${API_BASE}/cameras/discover`);
const data = await response.json();
const cameras = data.cameras || [];
setAvailableCameras(cameras);
// Get list of valid camera IDs
const validCameraIds = cameras.map(cam => String(cam.id));
// Auto-fix config if current values are invalid or not set
const updated = { ...config };
let changed = false;
// Auto-fix invalid camera config
if (!config.left_wrist || !validCameraIds.includes(config.left_wrist)) {
if (cameras.length >= 1) {
updated.left_wrist = String(cameras[0].id);
changed = true;
}
}
if (!config.right_wrist || !validCameraIds.includes(config.right_wrist)) {
if (cameras.length >= 2) {
updated.right_wrist = String(cameras[1].id);
changed = true;
}
}
if (!config.base || !validCameraIds.includes(config.base)) {
if (cameras.length >= 3) {
updated.base = String(cameras[2].id);
changed = true;
}
}
if (changed) {
setConfig(updated);
saveConfig(updated);
}
if (cameras.length === 0) {
setError('No cameras detected! Please connect cameras and refresh.');
}
} catch (e) {
console.error('Failed to discover cameras:', e);
setError(`Camera discovery failed: ${e.message}`);
}
};
// Discover USB ports
const discoverUsbPorts = async () => {
try {
const response = await fetch(`${API_BASE}/usb/discover`);
const data = await response.json();
const ports = data.ports || [];
setAvailableUsbPorts(ports);
// Auto-fix config if OpenArms Mini is selected and ports are invalid
if (config.leader_type === 'openarms_mini') {
const updated = { ...config };
let changed = false;
if (ports.length >= 1 && !ports.includes(config.leader_left)) {
updated.leader_left = ports[0];
changed = true;
}
if (ports.length >= 2 && !ports.includes(config.leader_right)) {
updated.leader_right = ports[1];
changed = true;
}
if (changed) {
setConfig(updated);
saveConfig(updated);
}
}
if (ports.length === 0) {
console.warn('No USB ports detected for OpenArms Mini');
}
} catch (e) {
console.error('Failed to discover USB ports:', e);
}
};
// Set task only (for pedal use)
const setTaskOnly = async () => {
if (!task.trim()) {
setError('Please enter a task description');
return;
}
setError(null);
try {
const response = await fetch(`${API_BASE}/recording/set-task`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ task, ...config })
});
if (!response.ok) {
const data = await response.json();
throw new Error(data.detail || 'Failed to set task');
}
const result = await response.json();
setStatusMessage(result.message || `Task set: ${task}`);
saveConfig(config);
// Clear success message after 3 seconds
setTimeout(() => {
if (!isRecording && !isInitializing) {
setStatusMessage('Ready');
}
}, 3000);
} catch (e) {
setError(e.message);
}
};
// Start recording
const startRecording = async () => {
if (!task.trim()) {
setError('Please enter a task description');
return;
}
setError(null);
try {
const response = await fetch(`${API_BASE}/recording/start`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ task, ...config })
});
if (!response.ok) {
const data = await response.json();
throw new Error(data.detail || 'Failed to start recording');
}
await response.json();
saveConfig(config);
} catch (e) {
setError(e.message);
}
};
// Stop recording
const stopRecording = async () => {
try {
const response = await fetch(`${API_BASE}/recording/stop`, {
method: 'POST'
});
if (!response.ok) {
const data = await response.json();
throw new Error(data.detail || 'Failed to stop recording');
}
const data = await response.json();
setError(null);
// Update latest repo_id after recording
if (data.dataset_name) {
setLatestRepoId(`lerobot-data-collection/${data.dataset_name}`);
}
} catch (e) {
setError(e.message);
}
};
const deleteLatestEpisode = async () => {
if (!latestRepoId) {
setError('No episode to delete');
return;
}
const confirmed = window.confirm(
`WARNING: This will permanently delete the repository:\n\n${latestRepoId}\n\nThis action cannot be undone. Continue?`
);
if (!confirmed) {
return;
}
try {
const response = await fetch(`${API_BASE}/recording/delete-latest`, { method: 'POST' });
if (!response.ok) {
const data = await response.json();
throw new Error(data.detail || 'Failed to delete episode');
}
const data = await response.json();
setLatestRepoId(null);
setEpisodeCount(Math.max(0, episodeCount - 1));
setStatusMessage(`Deleted: ${data.deleted_repo}`);
setTimeout(() => {
if (!isRecording && !isInitializing) {
setStatusMessage('Ready');
}
}, 3000);
} catch (e) {
setError(`Delete failed: ${e.message}`);
}
};
// Reset counter
const resetCounter = async () => {
try {
await fetch(`${API_BASE}/counter/reset`, { method: 'POST' });
setEpisodeCount(0);
} catch (e) {
console.error('Failed to reset counter:', e);
}
};
// Move robot to zero position
const moveToZero = async () => {
setError(null);
try {
const response = await fetch(`${API_BASE}/robots/move-to-zero`, { method: 'POST' });
if (!response.ok) {
const data = await response.json();
throw new Error(data.detail || 'Failed to move to zero position');
}
await response.json();
} catch (e) {
setError(`Move to zero failed: ${e.message}`);
}
};
// Format time as MM:SS
const formatTime = (seconds) => {
const mins = Math.floor(seconds / 60);
const secs = Math.floor(seconds % 60);
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
};
// Update config and save
const updateConfig = (key, value) => {
const updated = { ...config, [key]: value };
setConfig(updated);
saveConfig(updated);
};
// Initialize on mount only
useEffect(() => {
// Prevent double-initialization in development
if (hasInitializedRef.current) {
return;
}
hasInitializedRef.current = true;
loadConfig();
discoverCameras();
discoverUsbPorts();
fetchStatus();
statusIntervalRef.current = setInterval(fetchStatus, 1000);
return () => {
if (statusIntervalRef.current) {
clearInterval(statusIntervalRef.current);
}
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []); // Run only once on mount
// Discover USB ports when leader type changes to Mini
useEffect(() => {
if (config.leader_type === 'openarms_mini') {
discoverUsbPorts();
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [config.leader_type]);
return (
<main>
<header>
<h1>OpenArms Recording</h1>
</header>
<div className="container">
{/* Left Column: Configuration and Recording Control */}
<div className="left-column">
{/* Configuration Panel */}
<section className="panel config-panel">
<div
className="config-header"
onClick={() => setConfigExpanded(!configExpanded)}
role="button"
tabIndex={0}
onKeyDown={(e) => e.key === 'Enter' && setConfigExpanded(!configExpanded)}
>
<h2> Configuration</h2>
<span className="toggle-icon">{configExpanded ? '▼' : '▶'}</span>
</div>
{configExpanded && (
<div className="config-content">
{/* Robot Setup */}
<div className="config-section">
<h3>🤖 Robot Setup</h3>
<div className="robot-setup">
{robotsReady ? (
<div className="robot-status ready">
<span> Robots Ready - Recording will start instantly</span>
<button onClick={disconnectRobots} className="btn-disconnect">
Disconnect Robots
</button>
</div>
) : (
<div className="robot-status not-ready">
<span> Robots not initialized - Recording will take ~10 seconds</span>
<button
onClick={setupRobots}
disabled={isRecording || isInitializing}
className="btn-setup"
>
🚀 Setup Robots
</button>
</div>
)}
</div>
</div>
{/* Leader Type Selection */}
<div className="config-section">
<h3>🎮 Leader Type</h3>
<div className="config-grid">
<label style={{gridColumn: '1 / -1'}}>
Leader Arm Type
<select
value={config.leader_type}
onChange={(e) => updateConfig('leader_type', e.target.value)}
disabled={isRecording || robotsReady}
>
<option value="openarms">OpenArms (CAN Bus - Damiao Motors)</option>
<option value="openarms_mini">OpenArms Mini (USB - Feetech Motors)</option>
</select>
</label>
</div>
</div>
{/* Leader Interfaces (CAN or USB based on type) */}
<div className="config-section">
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: '0.5rem' }}>
<h3>
{config.leader_type === 'openarms_mini'
? `Leader Ports (USB/Serial) ${availableUsbPorts.length > 0 ? `(${availableUsbPorts.length} detected)` : ''}`
: 'Leader Interfaces (CAN)'}
</h3>
{config.leader_type === 'openarms_mini' && (
<button
onClick={discoverUsbPorts}
className="btn-refresh"
disabled={isRecording || robotsReady}
>
🔄 Refresh
</button>
)}
</div>
<div className="config-grid">
<label>
Leader Left
<select
value={config.leader_left}
onChange={(e) => updateConfig('leader_left', e.target.value)}
disabled={isRecording || robotsReady}
>
{config.leader_type === 'openarms_mini' ? (
availableUsbPorts.length > 0 ? (
availableUsbPorts.map((port) => (
<option key={port} value={port}>{port}</option>
))
) : (
<option value="">No USB ports detected</option>
)
) : (
canInterfaces.map((iface) => (
<option key={iface} value={iface}>{iface}</option>
))
)}
</select>
</label>
<label>
Leader Right
<select
value={config.leader_right}
onChange={(e) => updateConfig('leader_right', e.target.value)}
disabled={isRecording || robotsReady}
>
{config.leader_type === 'openarms_mini' ? (
availableUsbPorts.length > 0 ? (
availableUsbPorts.map((port) => (
<option key={port} value={port}>{port}</option>
))
) : (
<option value="">No USB ports detected</option>
)
) : (
canInterfaces.map((iface) => (
<option key={iface} value={iface}>{iface}</option>
))
)}
</select>
</label>
</div>
</div>
{/* Follower CAN Interfaces */}
<div className="config-section">
<h3>Follower Interfaces (CAN)</h3>
<div className="config-grid">
<label>
Follower Left
<select
value={config.follower_left}
onChange={(e) => updateConfig('follower_left', e.target.value)}
disabled={isRecording || robotsReady}
>
{canInterfaces.map((iface) => (
<option key={iface} value={iface}>{iface}</option>
))}
</select>
</label>
<label>
Follower Right
<select
value={config.follower_right}
onChange={(e) => updateConfig('follower_right', e.target.value)}
disabled={isRecording || robotsReady}
>
{canInterfaces.map((iface) => (
<option key={iface} value={iface}>{iface}</option>
))}
</select>
</label>
</div>
</div>
{/* Camera Configuration */}
<div className="config-section">
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: '0.5rem' }}>
<h3>Cameras {availableCameras.length > 0 && `(${availableCameras.length} detected)`}</h3>
<button
onClick={discoverCameras}
className="btn-refresh"
disabled={isRecording || robotsReady}
>
🔄 Refresh
</button>
</div>
<div className="config-grid">
<label>
Left Wrist
<select
value={config.left_wrist}
onChange={(e) => updateConfig('left_wrist', e.target.value)}
disabled={isRecording || robotsReady}
>
{availableCameras.map((cam) => (
<option key={cam.id} value={String(cam.id)}>
{cam.name || `Camera @ ${cam.id}`}
</option>
))}
</select>
</label>
<label>
Right Wrist
<select
value={config.right_wrist}
onChange={(e) => updateConfig('right_wrist', e.target.value)}
disabled={isRecording || robotsReady}
>
{availableCameras.map((cam) => (
<option key={cam.id} value={String(cam.id)}>
{cam.name || `Camera @ ${cam.id}`}
</option>
))}
</select>
</label>
<label>
Base Camera
<select
value={config.base}
onChange={(e) => updateConfig('base', e.target.value)}
disabled={isRecording || robotsReady}
>
{availableCameras.map((cam) => (
<option key={cam.id} value={String(cam.id)}>
{cam.name || `Camera @ ${cam.id}`}
</option>
))}
</select>
</label>
</div>
</div>
</div>
)}
</section>
{/* Control Panel */}
<section className="panel control-panel">
<h2>🎬 Recording Control</h2>
{/* Status Banner - Always show important statuses */}
{isInitializing && (
<div className="status-banner initializing">
<div className="spinner"></div>
<span>{statusMessage}</span>
</div>
)}
{isEncoding && (
<div className="status-banner encoding">
<div className="spinner"></div>
<span>📹 {statusMessage}</span>
</div>
)}
{isUploading && (
<div className="status-banner uploading">
<div className="spinner"></div>
<span> {statusMessage}</span>
</div>
)}
{uploadStatus && !isRecording && !isEncoding && !isUploading && (
<div className={`status-banner ${uploadStatus.startsWith('✓') ? 'success' : 'warning'}`}>
<span>{uploadStatus}</span>
</div>
)}
<div className="control-horizontal">
{/* Task Input and Status */}
<div className="control-left">
<div className="input-group">
<input
type="text"
value={task}
onChange={(e) => setTask(e.target.value)}
placeholder="Task description (e.g., 'pick and place')"
disabled={isRecording || isInitializing || isEncoding || isUploading}
onKeyPress={(e) => {
if (e.key === 'Enter' && robotsReady) {
setTaskOnly();
}
}}
/>
<button
onClick={setTaskOnly}
disabled={isRecording || isInitializing || isEncoding || isUploading || !robotsReady}
className="btn-set-task"
title={!robotsReady ? 'Please setup robots first' : 'Store task for pedal use (Enter key)'}
>
💾 Set Task
</button>
<button
onClick={startRecording}
disabled={isRecording || isInitializing || isEncoding || isUploading || !robotsReady}
className="btn-start"
title={!robotsReady ? 'Please setup robots first' : ''}
>
{isInitializing
? '⏳ Initializing...'
: isRecording
? '⏺ Recording...'
: robotsReady
? '⏺ Start Recording'
: '⏺ Setup Robots First'}
</button>
</div>
{/* Ramp-up Countdown */}
{isRecording && rampUpRemaining > 0 && (
<div className="ramp-up-countdown">
<div className="countdown-box">
<div className="countdown-label"> WARMING UP - PID RAMP-UP</div>
<div className="countdown-value">{rampUpRemaining.toFixed(1)}s</div>
<div className="countdown-subtitle">Recording will start automatically...</div>
</div>
</div>
)}
{/* Recording Status - Only show after ramp-up */}
{isRecording && rampUpRemaining <= 0 && (
<div className="status recording recording-active">
<div className="indicator"></div>
<div className="time-display">
<span>{formatTime(elapsedTime)}</span>
<span className="fps-display">
Loop: {loopFps.toFixed(1)} Hz
{loopFps > 0 && loopFps < 29 && <span className="fps-warning"> </span>}
</span>
<span className="fps-display">Recording: {currentFps.toFixed(1)} FPS</span>
</div>
<button onClick={stopRecording} className="btn-stop">
Stop
</button>
</div>
)}
</div>
{/* Episode Counter */}
<div className="control-right">
<div className="counter">
<div className="counter-label">Episodes Recorded</div>
<div className="counter-value">{episodeCount}</div>
<button onClick={resetCounter} className="btn-reset">
Reset
</button>
</div>
</div>
</div>
{/* Delete Latest Episode Button */}
{!isRecording && !isInitializing && latestRepoId && (
<div className="delete-episode-section">
<button
onClick={deleteLatestEpisode}
className="btn-delete"
title="Delete the latest recorded episode from HuggingFace Hub"
>
Delete Latest Episode
</button>
<div className="delete-info">Will delete: {latestRepoId}</div>
</div>
)}
{/* Move to Zero Button */}
{robotsReady && !isRecording && !isInitializing && (
<div className="zero-position-section">
<button
onClick={moveToZero}
disabled={movingToZero}
className="btn-zero-large"
title="Move both leader and follower robots to zero position (2s)"
>
{movingToZero ? '⏳ Moving to Zero Position...' : '🎯 Move to Zero Position (Leader + Follower)'}
</button>
</div>
)}
{/* Error Display */}
{error && (
<div className="error-box">
{error}
</div>
)}
</section>
</div>
{/* Right Column: Camera Feeds */}
<div className="right-column">
<section className="panel cameras">
<h2>📹 Camera Views</h2>
{robotsReady || isRecording || isInitializing ? (
<div className="camera-layout">
{/* Base camera - full width */}
<div className="camera camera-base">
<h3>Base Camera</h3>
<img src={`${API_BASE}/camera/stream/base`} alt="Base Camera" />
</div>
{/* Wrist cameras - side by side */}
<div className="camera-wrist-container">
<div className="camera camera-wrist">
<h3>Left Wrist</h3>
<img src={`${API_BASE}/camera/stream/left_wrist`} alt="Left Wrist Camera" />
</div>
<div className="camera camera-wrist">
<h3>Right Wrist</h3>
<img src={`${API_BASE}/camera/stream/right_wrist`} alt="Right Wrist Camera" />
</div>
</div>
</div>
) : (
<div className="camera-placeholder">
<p>📷 Camera feeds will appear when robots are set up</p>
<p className="hint">Click "Setup Robots" above to preview camera feeds</p>
</div>
)}
</section>
</div>
</div>
</main>
);
}
export default App;
-41
View File
@@ -1,41 +0,0 @@
# OpenArms Web Recording Interface
A web interface for recording OpenArms datasets.
## Installation
```bash
cd examples/openarms_web_interface
npm install
```
## Usage
**Start everything with one command:**
```bash
./launch.sh
```
This will:
- Start the FastAPI backend on port 8000
- Start the React frontend on port 5173
- Show live logs from both services
Then open your browser to: **http://localhost:5173**
**Stop with:** `Ctrl+C`
---
## Workflow
1. **Configure CAN interfaces** and **camera paths** in the dropdowns
2. Click **"Setup Robots"** to initialize (once at start)
3. Enter a **task description**
4. Click **"Start Recording"** to begin an episode
5. Click **"Stop Recording"** when done
6. Dataset is automatically encoded and uploaded to HuggingFace Hub as **private**
7. Repeat steps 3-6 for more episodes (no need to re-setup robots!)
---
@@ -1,12 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>OpenArms Recording Interface</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/main.jsx"></script>
</body>
</html>
-142
View File
@@ -1,142 +0,0 @@
#!/bin/bash
# OpenArms Web Interface Launcher
# Starts Rerun viewer, FastAPI backend, and React frontend
set -e
# Colors for output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# Get script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$SCRIPT_DIR"
echo -e "${BLUE}╔════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ OpenArms Web Recording Interface ║${NC}"
echo -e "${BLUE}╚════════════════════════════════════════╝${NC}"
echo ""
# Function to cleanup on exit
cleanup() {
echo ""
echo -e "${YELLOW}Shutting down services...${NC}"
# Kill all child processes
pkill -P $$ 2>/dev/null || true
# Kill specific services by port
lsof -ti:8000 | xargs kill -9 2>/dev/null || true # Backend
lsof -ti:5173 | xargs kill -9 2>/dev/null || true # Frontend
lsof -ti:9876 | xargs kill -9 2>/dev/null || true # Rerun (if spawned)
echo -e "${GREEN}✓ Services stopped${NC}"
exit 0
}
# Register cleanup on script exit
trap cleanup EXIT INT TERM
# Check if required commands exist
command -v rerun >/dev/null 2>&1 || {
echo -e "${RED}✗ Error: 'rerun' not found. Please install: pip install rerun-sdk${NC}"
exit 1
}
command -v python >/dev/null 2>&1 || {
echo -e "${RED}✗ Error: 'python' not found${NC}"
exit 1
}
command -v npm >/dev/null 2>&1 || {
echo -e "${RED}✗ Error: 'npm' not found${NC}"
exit 1
}
# Check if node_modules exists
if [ ! -d "node_modules" ]; then
echo -e "${YELLOW}⚠ node_modules not found. Running npm install...${NC}"
npm install
echo -e "${GREEN}✓ Dependencies installed${NC}"
echo ""
fi
echo -e "${GREEN}Starting services...${NC}"
echo ""
# 1. Start FastAPI backend (Rerun will start when recording begins)
echo -e "${BLUE}[1/2]${NC} Starting FastAPI backend on port 8000..."
cd "$SCRIPT_DIR"
# Use Python from current environment (if lerobot env is active, it will use that)
# Otherwise, check if we need to use conda run
if [[ "$CONDA_DEFAULT_ENV" == "lerobot" ]]; then
# Already in lerobot environment
echo -e "${GREEN}✓ Using active lerobot environment${NC}"
PYTHON_CMD="python"
elif command -v conda >/dev/null 2>&1 && conda env list | grep -q "^lerobot "; then
# lerobot env exists but not active - use conda run
echo -e "${YELLOW}Using conda run with lerobot environment...${NC}"
PYTHON_CMD="conda run -n lerobot --no-capture-output python"
else
# Fall back to system python
echo -e "${YELLOW}⚠ Warning: lerobot environment not found, using system python${NC}"
PYTHON_CMD="python"
fi
$PYTHON_CMD web_record_server.py > /tmp/openarms_backend.log 2>&1 &
BACKEND_PID=$!
sleep 3
if ps -p $BACKEND_PID > /dev/null; then
echo -e "${GREEN}✓ Backend started${NC} (PID: $BACKEND_PID)"
echo -e " URL: ${BLUE}http://localhost:8000${NC}"
else
echo -e "${RED}✗ Failed to start backend${NC}"
echo -e "${YELLOW}Check logs: tail -f /tmp/openarms_backend.log${NC}"
exit 1
fi
echo ""
# 2. Start React frontend
echo -e "${BLUE}[2/2]${NC} Starting React frontend on port 5173..."
cd "$SCRIPT_DIR"
npm run dev > /tmp/openarms_frontend.log 2>&1 &
FRONTEND_PID=$!
sleep 3
if ps -p $FRONTEND_PID > /dev/null; then
echo -e "${GREEN}✓ Frontend started${NC} (PID: $FRONTEND_PID)"
echo -e " URL: ${BLUE}http://localhost:5173${NC}"
else
echo -e "${RED}✗ Failed to start frontend${NC}"
echo -e "${YELLOW}Check logs: tail -f /tmp/openarms_frontend.log${NC}"
exit 1
fi
echo ""
# Display status
echo -e "${GREEN}╔════════════════════════════════════════╗${NC}"
echo -e "${GREEN}║ All services running! 🚀 ║${NC}"
echo -e "${GREEN}╚════════════════════════════════════════╝${NC}"
echo ""
echo -e "🔧 ${BLUE}Backend:${NC} http://localhost:8000"
echo -e "🌐 ${BLUE}Frontend:${NC} http://localhost:5173"
echo -e "📊 ${BLUE}Rerun:${NC} Will spawn automatically when recording starts"
echo ""
echo -e "${YELLOW}Open your browser to:${NC} ${BLUE}http://localhost:5173${NC}"
echo ""
echo -e "${YELLOW}Logs:${NC}"
echo -e " • Backend: tail -f /tmp/openarms_backend.log"
echo -e " • Frontend: tail -f /tmp/openarms_frontend.log"
echo ""
echo -e "${RED}Press Ctrl+C to stop all services${NC}"
echo ""
# Keep script running and wait for any service to exit
wait
-7
View File
@@ -1,7 +0,0 @@
import { createRoot } from 'react-dom/client'
import App from './App.jsx'
createRoot(document.getElementById('root')).render(
<App />
)
File diff suppressed because it is too large Load Diff
@@ -1,21 +0,0 @@
{
"name": "openarms-web-interface",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"react": "^18.3.1",
"react-dom": "^18.3.1"
},
"devDependencies": {
"@types/react": "^18.3.12",
"@types/react-dom": "^18.3.1",
"@vitejs/plugin-react": "^4.3.4",
"vite": "^6.0.1"
}
}
@@ -1,17 +0,0 @@
import { defineConfig } from 'vite'
import react from '@vitejs/plugin-react'
// https://vite.dev/config/
export default defineConfig({
plugins: [react()],
server: {
port: 5173,
strictPort: false,
host: true,
open: false
},
build: {
outDir: 'dist',
sourcemap: true
}
})
File diff suppressed because it is too large Load Diff
+47
View File
@@ -0,0 +1,47 @@
# Voice Assistant Examples
Voice-enabled robot assistant examples using speech-to-text (STT), and text-to-speech (TTS).
## Overview
These examples demonstrate how to build a voice interface for robot control:
1. **Hold SPACE** → Push-to-talk recording starts
2. **Release SPACE** → Recording stops
3. **STT (Whisper)** → Converts speech to text (high-level task prompt)
4. **Pi0.5** → Generates robot response/utterance
5. **TTS (Kokoro)** → Speaks the response back
## Requirements
```bash
pip install torch transformers sounddevice numpy pynput kokoro>=0.9.2
```
## Usage
### With Pi0.5 Model
```bash
python examples/voice_assistant/voice_assistant_pi05.py \
--pretrained_path path/to/pi05/checkpoint
```
## How It Works
### Pi0.5 Voice Integration
Pi0.5 can generate robot utterances as part of its subtask prediction. The flow:
1. **High-level prompt**: User voice command is transcribed and formatted as a task prompt
2. **Subtask generation**: Pi0.5 autoregressively generates a response
3. **Utterance extraction**: If the response contains `<utterance>...</utterance>` tags, the content is extracted
4. **TTS output**: The response is spoken back to the user
## Configuration Options
| Option | Default | Description |
|--------|---------|-------------|
| `--pretrained_path` | None | Path to Pi0.5 checkpoint |
| `--record_seconds` | 5.0 | Audio recording duration |
| `--max_response_tokens` | 100 | Max tokens in generated response |
@@ -0,0 +1,336 @@
#!/usr/bin/env python
"""
Voice Assistant with Pi0.5: Microphone → STT → Pi0.5 → TTS → Speaker
This example demonstrates how to use Pi0.5 as a conversational robot assistant:
1. Hold SPACE to record your voice command
2. Speech-to-text (Whisper) converts speech to text
3. Text is fed as a high-level prompt to Pi0.5
4. Pi0.5 generates a response (robot utterance)
5. Text-to-speech (Kokoro) speaks the response back
Requirements:
pip install torch transformers sounddevice numpy pynput kokoro>=0.9.2
Usage:
python examples/voice_assistant/voice_assistant_pi05.py \
--pretrained_path lerobot/pi0.5-base
"""
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
import argparse
import re
import subprocess
import threading
import time
import numpy as np
import sounddevice as sd
import torch
from pynput import keyboard
from transformers import AutoTokenizer, WhisperForConditionalGeneration, WhisperProcessor
from lerobot.policies.pi05.configuration_pi05 import PI05Config
from lerobot.policies.pi05.modeling_pi05 import PI05Pytorch
SAMPLE_RATE = 16000
def get_device():
if torch.cuda.is_available():
return torch.device("cuda")
elif torch.backends.mps.is_available():
return torch.device("mps")
return torch.device("cpu")
class Pi05VoiceAssistant:
"""Voice assistant using Pi0.5 for generating robot utterances."""
def __init__(
self,
pretrained_path: str | None = None,
max_response_tokens: int = 100,
max_record_seconds: float = 30.0,
):
self.device = get_device()
self.dtype = torch.float32 if self.device.type == "mps" else torch.bfloat16
self.max_response_tokens = max_response_tokens
self.max_record_seconds = max_record_seconds
# Push-to-talk state
self._recording = False
self._audio_chunks: list[np.ndarray] = []
self._stream: sd.InputStream | None = None
print(f"Using device: {self.device}")
self._load_models(pretrained_path)
def _load_models(self, pretrained_path: str | None):
print("Loading STT (Whisper tiny)...")
self.stt_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
self.stt_model = WhisperForConditionalGeneration.from_pretrained(
"openai/whisper-tiny.en", torch_dtype=self.dtype
).to(self.device)
print("Loading Pi0.5 model...")
self._load_pi05(pretrained_path)
print("Loading tokenizer...")
self.tokenizer = AutoTokenizer.from_pretrained("google/paligemma-3b-pt-224")
self._load_tts()
print("Ready!\n")
def _load_pi05(self, pretrained_path: str | None):
"""Load Pi0.5 model for utterance generation."""
config = PI05Config()
config.dtype = "float32" if self.device.type == "mps" else "bfloat16"
self.pi05_model = PI05Pytorch(config)
if pretrained_path:
try:
from safetensors.torch import load_file
state_dict = load_file(f"{pretrained_path}/model.safetensors")
self.pi05_model.load_state_dict(state_dict, strict=False)
print(f"✓ Loaded Pi0.5 weights from {pretrained_path}")
except Exception as e:
print(f"Warning: Could not load pretrained weights: {e}")
print("Using randomly initialized model for demo purposes")
self.pi05_model = self.pi05_model.to(self.device)
self.pi05_model.eval()
def _load_tts(self):
try:
print("Loading TTS (Kokoro 82M)...")
from kokoro import KPipeline
self.tts_pipeline = KPipeline(lang_code="a") # American English
self.tts_voice = "af_heart"
self.tts_type = "kokoro"
print("Kokoro loaded!")
except Exception as e:
print(f"Kokoro not available ({e})")
print("Using macOS `say` for TTS")
self.tts_pipeline = None
self.tts_type = "system"
def _audio_callback(self, indata, frames, time_info, status):
"""Callback for audio stream - collects chunks while recording."""
if self._recording:
self._audio_chunks.append(indata.copy())
def _start_recording(self):
"""Start recording audio."""
if self._recording:
return
self._recording = True
self._audio_chunks = []
print("🎤 Recording... (release SPACE to stop)")
def _stop_recording(self) -> np.ndarray | None:
"""Stop recording and return the audio."""
if not self._recording:
return None
self._recording = False
if not self._audio_chunks:
return None
audio = np.concatenate(self._audio_chunks, axis=0).flatten()
duration = len(audio) / SAMPLE_RATE
volume = np.abs(audio).max()
print(f"Recorded {duration:.1f}s, volume: {volume:.4f}")
if volume < 0.001:
print("⚠️ Very low audio - check microphone permissions!")
return None
return audio
def wait_for_spacebar(self) -> np.ndarray | None:
"""Wait for spacebar press, record while held, return audio on release."""
audio_result = None
recording_done = threading.Event()
def on_press(key):
if key == keyboard.Key.space:
self._start_recording()
def on_release(key):
nonlocal audio_result
if key == keyboard.Key.space and self._recording:
audio_result = self._stop_recording()
recording_done.set()
return False # Stop listener
# Start audio stream
self._stream = sd.InputStream(
samplerate=SAMPLE_RATE,
channels=1,
dtype="float32",
callback=self._audio_callback,
blocksize=int(SAMPLE_RATE * 0.1), # 100ms blocks
)
with self._stream:
print("\n⏳ Press and hold SPACE to speak...")
with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
# Wait for recording to complete or timeout
recording_done.wait(timeout=self.max_record_seconds)
if self._recording:
audio_result = self._stop_recording()
return audio_result
def transcribe(self, audio: np.ndarray) -> str:
start = time.perf_counter()
inputs = self.stt_processor(audio, sampling_rate=SAMPLE_RATE, return_tensors="pt")
input_features = inputs.input_features.to(self.device, dtype=self.dtype)
tokens = self.stt_model.generate(input_features)
text = self.stt_processor.batch_decode(tokens, skip_special_tokens=True)[0]
print(f"STT: {time.perf_counter() - start:.2f}s")
return text.strip()
def _create_dummy_images(self, batch_size: int = 1) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
"""Create placeholder images for Pi0.5 when no camera is available."""
image_shape = (batch_size, 3, 224, 224)
dummy_image = torch.zeros(image_shape, dtype=torch.float32, device=self.device)
dummy_mask = torch.ones(batch_size, dtype=torch.bool, device=self.device)
return [dummy_image], [dummy_mask]
def _tokenize_prompt(self, text: str) -> tuple[torch.Tensor, torch.Tensor]:
"""Tokenize the user prompt for Pi0.5."""
prompt = f"User request: {text}\nRobot response:"
tokenized = self.tokenizer(
[prompt],
max_length=200,
truncation=True,
padding="max_length",
return_tensors="pt",
)
tokens = tokenized["input_ids"].to(self.device)
masks = tokenized["attention_mask"].to(self.device, dtype=torch.bool)
return tokens, masks
def generate_response(self, user_text: str) -> str:
"""Generate robot utterance using Pi0.5's language generation."""
start = time.perf_counter()
images, img_masks = self._create_dummy_images()
tokens, masks = self._tokenize_prompt(user_text)
with torch.no_grad():
generated_tokens = self.pi05_model._generate_subtask_tokens(
images=images,
img_masks=img_masks,
tokens=tokens,
masks=masks,
tokenizer=self.tokenizer,
max_length=self.max_response_tokens,
device=self.device,
)
# Decode generated tokens
valid_tokens = generated_tokens[0][generated_tokens[0] != 0]
response = self.tokenizer.decode(valid_tokens, skip_special_tokens=True)
# Extract utterance if marked with special tokens
response = self._extract_utterance(response)
print(f"Pi0.5: {time.perf_counter() - start:.2f}s")
return response.strip()
def _extract_utterance(self, text: str) -> str:
"""Extract utterance from between <utterance> tokens if present."""
pattern = r"<utterance>(.*?)</utterance>"
match = re.search(pattern, text, re.DOTALL)
if match:
return match.group(1).strip()
return text
def speak(self, text: str):
start = time.perf_counter()
if self.tts_type == "kokoro":
generator = self.tts_pipeline(text, voice=self.tts_voice)
audio_chunks = [audio for _, _, audio in generator]
if audio_chunks:
audio = np.concatenate(audio_chunks)
sd.play(audio, 24000)
sd.wait()
else:
subprocess.run(["say", text], check=True)
print(f"TTS: {time.perf_counter() - start:.2f}s")
def run(self):
print("=" * 50)
print("Pi0.5 Voice Assistant")
print("=" * 50)
print("• Hold SPACE to record your voice command")
print("• Release SPACE when done speaking")
print("• Press Ctrl+C to exit")
print("=" * 50)
while True:
try:
audio = self.wait_for_spacebar()
if audio is None:
print("(no audio captured)\n")
continue
user_text = self.transcribe(audio)
if not user_text:
print("(no speech detected)\n")
continue
print(f"You: {user_text}")
response = self.generate_response(user_text)
print(f"Robot: {response}\n")
self.speak(response)
except KeyboardInterrupt:
print("\nGoodbye!")
break
def main():
parser = argparse.ArgumentParser(description="Pi0.5 Voice Assistant")
parser.add_argument(
"--pretrained_path",
type=str,
default=None,
help="Path to pretrained Pi0.5 model (optional)",
)
parser.add_argument(
"--max_response_tokens",
type=int,
default=100,
help="Maximum tokens in generated response",
)
parser.add_argument(
"--max_record_seconds",
type=float,
default=30.0,
help="Maximum recording duration in seconds",
)
args = parser.parse_args()
assistant = Pi05VoiceAssistant(
pretrained_path=args.pretrained_path,
max_response_tokens=args.max_response_tokens,
max_record_seconds=args.max_record_seconds,
)
assistant.run()
if __name__ == "__main__":
main()
+27
View File
@@ -0,0 +1,27 @@
{
"repo_id": "local",
"vocab_size": 1024,
"scale": 10.0,
"encoded_dims": "0:7",
"encoded_dim_ranges": [
[
0,
7
]
],
"total_encoded_dims": 7,
"delta_dims": null,
"delta_dim_list": null,
"use_delta_transform": false,
"state_key": "observation.state",
"normalization_mode": "QUANTILES",
"action_horizon": 10,
"num_training_chunks": 25065,
"compression_stats": {
"compression_ratio": 3.464660463274599,
"mean_token_length": 20.204,
"p99_token_length": 36.00999999999999,
"min_token_length": 5.0,
"max_token_length": 38.0
}
}
@@ -0,0 +1,158 @@
import logging
from typing import ClassVar
import numpy as np
from scipy.fft import dct
from scipy.fft import idct
from tokenizers import ByteLevelBPETokenizer
from tokenizers.trainers import BpeTrainer
from transformers import PreTrainedTokenizerFast
from transformers.processing_utils import ProcessorMixin
class UniversalActionProcessor(ProcessorMixin):
attributes: ClassVar[list[str]] = ["bpe_tokenizer"]
bpe_tokenizer_class: str = "AutoTokenizer"
def __init__(
self,
bpe_tokenizer: PreTrainedTokenizerFast,
scale: float = 10,
vocab_size: int = 1024,
min_token: int = 0,
*,
action_dim: int | None = None,
time_horizon: int | None = None,
):
self.scale = scale
self.vocab_size = vocab_size
self.min_token = min_token
# Action horizon and dimension needed during decoding. These can be specified
# in three ways (in order of priority):
# 1. passed in as kwargs to decode()
# 2. in the constructor
# 3. cached from the last time decode() was called
self.time_horizon = time_horizon
self.action_dim = action_dim
self.called_time_horizon = time_horizon
self.called_action_dim = action_dim
super().__init__(bpe_tokenizer)
def __call__(self, action_chunk: np.array) -> np.array:
assert action_chunk.ndim <= 3, "Only 3 dimensions supported: [batch, timesteps, action_dim]"
if action_chunk.ndim == 2:
action_chunk = action_chunk[None, ...]
# Cache the time horizon and action dimension for decoding
self.called_time_horizon = action_chunk.shape[-2]
self.called_action_dim = action_chunk.shape[-1]
dct_coeff = dct(action_chunk, axis=1, norm="ortho")
dct_coeff = np.around(dct_coeff * self.scale)
tokens = []
for elem in dct_coeff:
token_str = "".join(map(chr, np.maximum(elem.flatten() - self.min_token, 0).astype(int)))
tokens.append(self.bpe_tokenizer(token_str)["input_ids"])
return tokens
def decode(
self,
tokens: list[list[int]],
*,
time_horizon: int | None = None,
action_dim: int | None = None,
) -> np.array:
self.time_horizon = time_horizon or self.time_horizon or self.called_time_horizon
self.action_dim = action_dim or self.action_dim or self.called_action_dim
# Cache the time horizon and action dimension for the next call
self.called_time_horizon = self.time_horizon
self.called_action_dim = self.action_dim
assert (
self.time_horizon is not None and self.action_dim is not None
), "Tokenizer not initialized, call encode() once or pass in time_horizon and action_dim."
decoded_actions = []
for token in tokens:
try:
decoded_tokens = self.bpe_tokenizer.decode(token)
decoded_dct_coeff = np.array(list(map(ord, decoded_tokens))) + self.min_token
decoded_dct_coeff = decoded_dct_coeff.reshape(-1, self.action_dim)
assert (
decoded_dct_coeff.shape
== (
self.time_horizon,
self.action_dim,
)
), f"Decoded DCT coefficients have shape {decoded_dct_coeff.shape}, expected ({self.time_horizon}, {self.action_dim})"
except Exception as e:
print(f"Error decoding tokens: {e}")
print(f"Tokens: {token}")
decoded_dct_coeff = np.zeros((self.time_horizon, self.action_dim))
decoded_actions.append(idct(decoded_dct_coeff / self.scale, axis=0, norm="ortho"))
return np.stack(decoded_actions)
@classmethod
def fit(
cls,
action_data: list[np.array],
scale: float = 10,
vocab_size: int = 1024,
*,
time_horizon: int | None = None,
action_dim: int | None = None,
) -> "UniversalActionProcessor":
# Run DCT over all inputs
dct_tokens = [dct(a, axis=0, norm="ortho").flatten() for a in action_data]
# Quantize and find min token
max_token = int(np.around(np.concatenate(dct_tokens) * scale).max())
min_token = int(np.around(np.concatenate(dct_tokens) * scale).min())
min_vocab_size = max_token - min_token
assert (
min_vocab_size <= vocab_size
), f"Vocab size {vocab_size} is too small for the range of tokens {min_vocab_size}"
if min_vocab_size + 100 > vocab_size:
logging.warning(
f"Initial alphabet size {min_vocab_size} is almost as large as the vocab"
f"size {vocab_size}, consider increasing vocab size"
)
# Make token iterator for BPE training
def _token_iter():
for tokens in dct_tokens:
rounded_tokens = np.around(tokens * scale) - min_token
rounded_tokens = rounded_tokens.astype(int)
string = "".join(map(chr, rounded_tokens))
yield string
# Train BPE tokenizer
bpe = ByteLevelBPETokenizer()
# Set up the entire range of possible tokens as the initial alphabet
alphabet = [chr(i) for i in range(max_token - min_token + 1)]
trainer = BpeTrainer(
vocab_size=vocab_size,
min_frequency=2,
show_progress=True,
special_tokens=[],
initial_alphabet=alphabet,
max_token_length=10000,
)
# Train the inner tokenizer (don't use ByteLevelBPETokenizer.train_from_iterator()
# because it doesn't support custom alphabets)
bpe._tokenizer.train_from_iterator(_token_iter(), trainer=trainer)
return cls(
PreTrainedTokenizerFast(tokenizer_object=bpe, clean_up_tokenization_spaces=False),
scale=scale,
vocab_size=vocab_size,
min_token=min_token,
time_horizon=time_horizon,
action_dim=action_dim,
)
@@ -0,0 +1,11 @@
{
"action_dim": 7,
"auto_map": {
"AutoProcessor": "processing_action_tokenizer.UniversalActionProcessor"
},
"min_token": -32,
"processor_class": "UniversalActionProcessor",
"scale": 10.0,
"time_horizon": 10,
"vocab_size": 1024
}
@@ -0,0 +1 @@
{}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,11 @@
{
"added_tokens_decoder": {},
"auto_map": {
"AutoProcessor": "processing_action_tokenizer.UniversalActionProcessor"
},
"clean_up_tokenization_spaces": false,
"extra_special_tokens": {},
"model_max_length": 1000000000000000019884624838656,
"processor_class": "UniversalActionProcessor",
"tokenizer_class": "PreTrainedTokenizerFast"
}
-10
View File
@@ -1,10 +0,0 @@
from huggingface_hub import HfApi, list_datasets
api = HfApi()
datasets = list_datasets(author="lerobot-data-collection")
print('"[', end="")
i=0
for dataset in datasets:
if "three-folds-dataset" in dataset.id:
print("'" + dataset.id + "',", end="")
print(']"',)
-4
View File
@@ -102,10 +102,8 @@ grpcio-dep = ["grpcio==1.73.1", "protobuf==6.31.0"] # TODO: Bumb dependency (com
# Motors
feetech = ["feetech-servo-sdk>=1.0.0,<2.0.0"]
dynamixel = ["dynamixel-sdk>=3.7.31,<3.9.0"]
damiao = ["python-can>=4.2.0,<5.0.0"]
# Robots
openarms = ["lerobot[damiao]"]
gamepad = ["lerobot[pygame-dep]", "hidapi>=0.14.0,<0.15.0"]
hopejr = ["lerobot[feetech]", "lerobot[pygame-dep]"]
lekiwi = ["lerobot[feetech]", "pyzmq>=26.2.1,<28.0.0"]
@@ -155,7 +153,6 @@ metaworld = ["metaworld==3.0.0"]
# All
all = [
"lerobot[dynamixel]",
"lerobot[openarms]",
"lerobot[gamepad]",
"lerobot[hopejr]",
"lerobot[lekiwi]",
@@ -266,7 +263,6 @@ default.extend-ignore-identifiers-re = [
"ein",
"thw",
"inpt",
"ROBOTIS",
]
# TODO: Uncomment when ready to use
+1 -1
View File
@@ -26,4 +26,4 @@ DEFAULT_OBS_QUEUE_TIMEOUT = 2
SUPPORTED_POLICIES = ["act", "smolvla", "diffusion", "tdmpc", "vqbet", "pi0", "pi05"]
# TODO: Add all other robots
SUPPORTED_ROBOTS = ["so100_follower", "so101_follower", "bi_so100_follower", "omx_follower"]
SUPPORTED_ROBOTS = ["so100_follower", "so101_follower", "bi_so100_follower"]
@@ -54,7 +54,6 @@ from lerobot.robots import ( # noqa: F401
bi_so100_follower,
koch_follower,
make_robot_from_config,
omx_follower,
so100_follower,
so101_follower,
)
-1
View File
@@ -56,7 +56,6 @@ class TrainPipelineConfig(HubMixin):
steps: int = 100_000
eval_freq: int = 20_000
log_freq: int = 200
tolerance_s: float = 1e-4
save_checkpoint: bool = True
# Checkpoint is saved every `save_freq` training iterations and after the last training step.
save_freq: int = 20_000
-2
View File
@@ -98,7 +98,6 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas
image_transforms=image_transforms,
revision=cfg.dataset.revision,
video_backend=cfg.dataset.video_backend,
tolerance_s=cfg.tolerance_s,
)
else:
dataset = StreamingLeRobotDataset(
@@ -109,7 +108,6 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas
image_transforms=image_transforms,
revision=cfg.dataset.revision,
max_num_shards=cfg.num_workers,
tolerance_s=cfg.tolerance_s,
)
else:
raise NotImplementedError("The MultiLeRobotDataset isn't supported for now.")
+8 -21
View File
@@ -23,13 +23,11 @@ from pathlib import Path
import datasets
import numpy as np
import os
import packaging.version
import pandas as pd
import PIL.Image
import pyarrow as pa
import pyarrow.parquet as pq
from concurrent.futures import ProcessPoolExecutor
import torch
import torch.utils
from huggingface_hub import HfApi, snapshot_download
@@ -60,6 +58,7 @@ from lerobot.datasets.utils import (
load_nested_dataset,
load_stats,
load_tasks,
load_tasks_high_level,
update_chunk_file_indices,
validate_episode_buffer,
validate_frame,
@@ -163,6 +162,7 @@ class LeRobotDatasetMetadata:
self.info = load_info(self.root)
check_version_compatibility(self.repo_id, self._version, CODEBASE_VERSION)
self.tasks = load_tasks(self.root)
# self.tasks_high_level = load_tasks_high_level(self.root)
self.episodes = load_episodes(self.root)
self.stats = load_stats(self.root)
@@ -1052,6 +1052,12 @@ class LeRobotDataset(torch.utils.data.Dataset):
# Add task as a string
task_idx = item["task_index"].item()
item["task"] = self.meta.tasks.iloc[task_idx].name
# Optionally add high level task index
if "task_index_high_level" in self.features:
high_level_task_idx = item["task_index_high_level"].item()
item["robot_utterance"] = self.meta.tasks_high_level.iloc[high_level_task_idx]["robot_utterance"]
item["user_prompt"] = self.meta.tasks_high_level.iloc[high_level_task_idx]["user_prompt"]
return item
def __repr__(self):
@@ -1201,9 +1207,6 @@ class LeRobotDataset(torch.utils.data.Dataset):
use_batched_encoding = self.batch_encoding_size > 1
if has_video_keys and not use_batched_encoding:
video_paths = self._encode_multiple_temporary_episode_videos(self.meta.video_keys, episode_index)
for (video_key, video_path) in zip(self.meta.video_keys, video_paths):
ep_metadata.update(self._save_episode_video(video_key, episode_index, video_path))
num_cameras = len(self.meta.video_keys)
if parallel_encoding and num_cameras > 1:
# TODO(Steven): Ideally we would like to control the number of threads per encoding such that:
@@ -1533,22 +1536,6 @@ class LeRobotDataset(torch.utils.data.Dataset):
"""
return _encode_video_worker(video_key, episode_index, self.root, self.fps)
def _encode_multiple_temporary_episode_videos(self, video_keys, episode_index):
temp_paths = []
img_dirs = []
for video_key in video_keys:
temp_paths.append(Path(tempfile.mkdtemp(dir=self.root)) / f"{video_key}_{episode_index:03d}.mp4")
img_dirs.append(self._get_image_file_dir(episode_index, video_key))
fps = [self.fps]*len(video_keys)
with ProcessPoolExecutor(max_workers=len(video_keys)) as executor:
executor.map(encode_video_frames,img_dirs,temp_paths,fps)
for img_dir in img_dirs:
shutil.rmtree(img_dir)
return temp_paths
@classmethod
def create(
cls,
+4
View File
@@ -60,6 +60,7 @@ VIDEO_DIR = "videos"
CHUNK_FILE_PATTERN = "chunk-{chunk_index:03d}/file-{file_index:03d}"
DEFAULT_TASKS_PATH = "meta/tasks.parquet"
DEFAULT_TASKS_HIGH_LEVEL_PATH = "meta/tasks_high_level.parquet"
DEFAULT_EPISODES_PATH = EPISODES_DIR + "/" + CHUNK_FILE_PATTERN + ".parquet"
DEFAULT_DATA_PATH = DATA_DIR + "/" + CHUNK_FILE_PATTERN + ".parquet"
DEFAULT_VIDEO_PATH = VIDEO_DIR + "/{video_key}/" + CHUNK_FILE_PATTERN + ".mp4"
@@ -352,6 +353,9 @@ def load_tasks(local_dir: Path) -> pandas.DataFrame:
tasks = pd.read_parquet(local_dir / DEFAULT_TASKS_PATH)
return tasks
def load_tasks_high_level(local_dir: Path) -> pandas.DataFrame:
tasks = pd.read_parquet(local_dir / DEFAULT_TASKS_HIGH_LEVEL_PATH)
return tasks
def write_episodes(episodes: Dataset, local_dir: Path) -> None:
"""Write episode metadata to a parquet file in the LeRobot v3.0 format.
+1 -4
View File
@@ -310,7 +310,7 @@ def encode_video_frames(
crf: int | None = 30,
fast_decode: int = 0,
log_level: int | None = av.logging.ERROR,
overwrite: bool = True,
overwrite: bool = False,
preset: int | None = None,
) -> None:
"""More info on ffmpeg arguments tuning on `benchmark/video/README.md`"""
@@ -355,9 +355,6 @@ def encode_video_frames(
if crf is not None:
video_options["crf"] = str(crf)
#TEMPORARY FIX
video_options["preset"] = "12"
if fast_decode:
key = "svtav1-params" if vcodec == "libsvtav1" else "tune"
value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode"
+1 -8
View File
@@ -14,11 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .motors_bus import (
Motor,
MotorCalibration,
MotorNormMode,
MotorsBus, # Backward compatibility (alias for SerialMotorsBus)
MotorsBusBase,
SerialMotorsBus,
)
from .motors_bus import Motor, MotorCalibration, MotorNormMode, MotorsBus
-18
View File
@@ -1,18 +0,0 @@
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .damiao import DamiaoMotorsBus
from .tables import *
-905
View File
@@ -1,905 +0,0 @@
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(pepijn): add license of: https://github.com/cmjang/DM_Control_Python?tab=MIT-1-ov-file#readme
import logging
import time
from contextlib import contextmanager
from copy import deepcopy
from functools import cached_property
from typing import Dict, List, Optional, Tuple, Union
import can
import numpy as np
from lerobot.motors import Motor, MotorCalibration, MotorNormMode, MotorsBusBase
from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
from lerobot.utils.utils import enter_pressed, move_cursor_up
from .tables import (
AVAILABLE_BAUDRATES,
CAN_CMD_DISABLE,
CAN_CMD_ENABLE,
CAN_CMD_REFRESH,
CAN_CMD_SET_ZERO,
CAN_PARAM_ID,
DEFAULT_BAUDRATE,
DEFAULT_TIMEOUT_MS,
MODEL_RESOLUTION,
MOTOR_LIMIT_PARAMS,
NORMALIZED_DATA,
MotorType,
)
logger = logging.getLogger(__name__)
NameOrID = Union[str, int]
Value = Union[int, float]
class DamiaoMotorsBus(MotorsBusBase):
"""
The Damiao implementation for a MotorsBus using CAN bus communication.
This class uses python-can for CAN bus communication with Damiao motors.
For more info, see:
- python-can documentation: https://python-can.readthedocs.io/en/stable/
- Seedstudio documentation: https://wiki.seeedstudio.com/damiao_series/
- DM_Control_Python repo: https://github.com/cmjang/DM_Control_Python
"""
# CAN-specific settings
available_baudrates = deepcopy(AVAILABLE_BAUDRATES)
default_baudrate = DEFAULT_BAUDRATE
default_timeout = DEFAULT_TIMEOUT_MS
# Motor configuration
model_resolution_table = deepcopy(MODEL_RESOLUTION)
normalized_data = deepcopy(NORMALIZED_DATA)
def __init__(
self,
port: str,
motors: dict[str, Motor],
calibration: dict[str, MotorCalibration] | None = None,
can_interface: str = "auto",
use_can_fd: bool = True,
bitrate: int = 1000000,
data_bitrate: int | None = 5000000,
):
"""
Initialize the Damiao motors bus.
Args:
port: CAN interface name (e.g., "can0" for Linux, "/dev/cu.usbmodem*" for macOS)
motors: Dictionary mapping motor names to Motor objects
calibration: Optional calibration data
can_interface: CAN interface type - "auto" (default), "socketcan" (Linux), or "slcan" (macOS/serial)
use_can_fd: Whether to use CAN FD mode (default: True for OpenArms)
bitrate: Nominal bitrate in bps (default: 1000000 = 1 Mbps)
data_bitrate: Data bitrate for CAN FD in bps (default: 5000000 = 5 Mbps), ignored if use_can_fd is False
"""
super().__init__(port, motors, calibration)
self.port = port
self.can_interface = can_interface
self.use_can_fd = use_can_fd
self.bitrate = bitrate
self.data_bitrate = data_bitrate
self.canbus = None
self._is_connected = False
# Map motor names to CAN IDs
self._motor_can_ids = {}
self._recv_id_to_motor = {}
# Store motor types and recv IDs
self._motor_types = {}
for name, motor in self.motors.items():
if hasattr(motor, "motor_type"):
self._motor_types[name] = motor.motor_type
else:
# Default to DM4310 if not specified
self._motor_types[name] = MotorType.DM4310
# Map recv_id to motor name for filtering responses
if hasattr(motor, "recv_id"):
self._recv_id_to_motor[motor.recv_id] = name
@property
def is_connected(self) -> bool:
"""Check if the CAN bus is connected."""
return self._is_connected and self.canbus is not None
def connect(self, handshake: bool = True) -> None:
"""
Open the CAN bus and initialize communication.
Args:
handshake: If True, ping all motors to verify they're present
"""
if self.is_connected:
raise DeviceAlreadyConnectedError(
f"{self.__class__.__name__}('{self.port}') is already connected."
)
try:
# Auto-detect interface type based on port name
if self.can_interface == "auto":
if self.port.startswith("/dev/"):
# Serial device (macOS/Windows)
self.can_interface = "slcan"
logger.info(f"Auto-detected slcan interface for port {self.port}")
else:
# Network interface (Linux)
self.can_interface = "socketcan"
logger.info(f"Auto-detected socketcan interface for port {self.port}")
# Connect to CAN bus
if self.can_interface == "socketcan":
# Linux SocketCAN with CAN FD support
if self.use_can_fd and self.data_bitrate is not None:
self.canbus = can.interface.Bus(
channel=self.port,
interface="socketcan",
bitrate=self.bitrate,
data_bitrate=self.data_bitrate,
fd=True
)
logger.info(f"Connected to {self.port} with CAN FD (bitrate={self.bitrate}, data_bitrate={self.data_bitrate})")
else:
self.canbus = can.interface.Bus(
channel=self.port,
interface="socketcan",
bitrate=self.bitrate
)
logger.info(f"Connected to {self.port} with CAN 2.0 (bitrate={self.bitrate})")
elif self.can_interface == "slcan":
# Serial Line CAN (macOS, Windows, or USB adapters)
# Note: SLCAN typically doesn't support CAN FD
self.canbus = can.interface.Bus(
channel=self.port,
interface="slcan",
bitrate=self.bitrate
)
logger.info(f"Connected to {self.port} with SLCAN (bitrate={self.bitrate})")
else:
# Generic interface (vector, pcan, etc.)
if self.use_can_fd and self.data_bitrate is not None:
self.canbus = can.interface.Bus(
channel=self.port,
interface=self.can_interface,
bitrate=self.bitrate,
data_bitrate=self.data_bitrate,
fd=True
)
else:
self.canbus = can.interface.Bus(
channel=self.port,
interface=self.can_interface,
bitrate=self.bitrate
)
self._is_connected = True
if handshake:
self._handshake()
logger.debug(f"{self.__class__.__name__} connected via {self.can_interface}.")
except Exception as e:
self._is_connected = False
raise ConnectionError(f"Failed to connect to CAN bus: {e}")
def _handshake(self) -> None:
"""Verify all motors are present by refreshing their status."""
for motor_name in self.motors:
self._refresh_motor(motor_name)
time.sleep(0.01) # Small delay between motors
def disconnect(self, disable_torque: bool = True) -> None:
"""
Close the CAN bus connection.
Args:
disable_torque: If True, disable torque on all motors before disconnecting
"""
if not self.is_connected:
raise DeviceNotConnectedError(
f"{self.__class__.__name__}('{self.port}') is not connected."
)
if disable_torque:
try:
self.disable_torque()
except Exception as e:
logger.warning(f"Failed to disable torque during disconnect: {e}")
if self.canbus:
self.canbus.shutdown()
self.canbus = None
self._is_connected = False
logger.debug(f"{self.__class__.__name__} disconnected.")
def configure_motors(self) -> None:
"""Configure all motors with default settings."""
# Damiao motors don't require much configuration in MIT mode
# Just ensure they're enabled
for motor in self.motors:
self._enable_motor(motor)
time.sleep(0.01)
def _enable_motor(self, motor: NameOrID) -> None:
"""Enable a single motor."""
motor_id = self._get_motor_id(motor)
recv_id = self._get_motor_recv_id(motor)
data = [0xFF] * 7 + [CAN_CMD_ENABLE]
msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False)
self.canbus.send(msg)
self._recv_motor_response(expected_recv_id=recv_id)
def _disable_motor(self, motor: NameOrID) -> None:
"""Disable a single motor."""
motor_id = self._get_motor_id(motor)
recv_id = self._get_motor_recv_id(motor)
data = [0xFF] * 7 + [CAN_CMD_DISABLE]
msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False)
self.canbus.send(msg)
self._recv_motor_response(expected_recv_id=recv_id)
def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
"""Enable torque on selected motors."""
motors = self._get_motors_list(motors)
for motor in motors:
for _ in range(num_retry + 1):
try:
self._enable_motor(motor)
break
except Exception as e:
if _ == num_retry:
raise e
time.sleep(0.01)
def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
"""Disable torque on selected motors."""
motors = self._get_motors_list(motors)
for motor in motors:
for _ in range(num_retry + 1):
try:
self._disable_motor(motor)
break
except Exception as e:
if _ == num_retry:
raise e
time.sleep(0.01)
@contextmanager
def torque_disabled(self, motors: str | list[str] | None = None):
"""
Context manager that guarantees torque is re-enabled.
This helper is useful to temporarily disable torque when configuring motors.
Examples:
>>> with bus.torque_disabled():
... # Safe operations here with torque disabled
... pass
"""
self.disable_torque(motors)
try:
yield
finally:
self.enable_torque(motors)
def set_zero_position(self, motors: str | list[str] | None = None) -> None:
"""Set current position as zero for selected motors."""
motors = self._get_motors_list(motors)
for motor in motors:
motor_id = self._get_motor_id(motor)
recv_id = self._get_motor_recv_id(motor)
data = [0xFF] * 7 + [CAN_CMD_SET_ZERO]
msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False)
self.canbus.send(msg)
self._recv_motor_response(expected_recv_id=recv_id)
time.sleep(0.01)
def _refresh_motor(self, motor: NameOrID) -> Optional[can.Message]:
"""Refresh motor status and return the response."""
motor_id = self._get_motor_id(motor)
recv_id = self._get_motor_recv_id(motor)
data = [motor_id & 0xFF, (motor_id >> 8) & 0xFF, CAN_CMD_REFRESH, 0, 0, 0, 0, 0]
msg = can.Message(arbitration_id=CAN_PARAM_ID, data=data, is_extended_id=False)
self.canbus.send(msg)
return self._recv_motor_response(expected_recv_id=recv_id)
def _recv_motor_response(self, expected_recv_id: Optional[int] = None, timeout: float = 0.001) -> Optional[can.Message]:
"""
Receive a response from a motor.
Args:
expected_recv_id: If provided, only return messages from this CAN ID
timeout: Timeout in seconds (default: 1ms for high-speed operation)
Returns:
CAN message if received, None otherwise
"""
try:
start_time = time.time()
messages_seen = []
while time.time() - start_time < timeout:
msg = self.canbus.recv(timeout=0.0001) # 100us timeout for fast polling
if msg:
messages_seen.append(f"0x{msg.arbitration_id:02X}")
# If no filter specified, return any message
if expected_recv_id is None:
return msg
# Otherwise, only return if it matches the expected recv_id
if msg.arbitration_id == expected_recv_id:
return msg
else:
logger.debug(f"Ignoring message from CAN ID 0x{msg.arbitration_id:02X}, expected 0x{expected_recv_id:02X}")
# Only log warnings if we're in debug mode to reduce overhead
if logger.isEnabledFor(logging.DEBUG):
if messages_seen:
logger.debug(f"Received {len(messages_seen)} message(s) from IDs {set(messages_seen)}, but expected 0x{expected_recv_id:02X}")
else:
logger.debug(f"No CAN messages received (expected from 0x{expected_recv_id:02X})")
except Exception as e:
logger.debug(f"Failed to receive CAN message: {e}")
return None
def _recv_all_responses(self, expected_recv_ids: list[int], timeout: float = 0.002) -> dict[int, can.Message]:
"""
Efficiently receive responses from multiple motors at once.
Uses the OpenArms pattern: collect all available messages within timeout.
Args:
expected_recv_ids: List of CAN IDs we expect responses from
timeout: Total timeout in seconds (default: 2ms)
Returns:
Dictionary mapping recv_id to CAN message
"""
responses = {}
expected_set = set(expected_recv_ids)
start_time = time.time()
try:
while len(responses) < len(expected_recv_ids) and (time.time() - start_time) < timeout:
msg = self.canbus.recv(timeout=0.0002) # 200us poll timeout (increased from 100us for better reliability)
if msg and msg.arbitration_id in expected_set:
responses[msg.arbitration_id] = msg
if len(responses) == len(expected_recv_ids):
break # Got all responses, exit early
except Exception as e:
logger.debug(f"Error receiving responses: {e}")
return responses
def _mit_control(
self,
motor: NameOrID,
kp: float,
kd: float,
position_degrees: float,
velocity_deg_per_sec: float,
torque: float,
) -> None:
"""
Send MIT control command to a motor.
Args:
motor: Motor name or ID
kp: Position gain
kd: Velocity gain
position_degrees: Target position (degrees)
velocity_deg_per_sec: Target velocity (degrees/s)
torque: Target torque (N·m)
"""
motor_id = self._get_motor_id(motor)
motor_name = self._get_motor_name(motor)
motor_type = self._motor_types.get(motor_name, MotorType.DM4310)
# Convert degrees to radians for motor control
position_rad = np.radians(position_degrees)
velocity_rad_per_sec = np.radians(velocity_deg_per_sec)
# Get motor limits
pmax, vmax, tmax = MOTOR_LIMIT_PARAMS[motor_type]
# Encode parameters
kp_uint = self._float_to_uint(kp, 0, 500, 12)
kd_uint = self._float_to_uint(kd, 0, 5, 12)
q_uint = self._float_to_uint(position_rad, -pmax, pmax, 16)
dq_uint = self._float_to_uint(velocity_rad_per_sec, -vmax, vmax, 12)
tau_uint = self._float_to_uint(torque, -tmax, tmax, 12)
# Pack data
data = [0] * 8
data[0] = (q_uint >> 8) & 0xFF
data[1] = q_uint & 0xFF
data[2] = dq_uint >> 4
data[3] = ((dq_uint & 0xF) << 4) | ((kp_uint >> 8) & 0xF)
data[4] = kp_uint & 0xFF
data[5] = kd_uint >> 4
data[6] = ((kd_uint & 0xF) << 4) | ((tau_uint >> 8) & 0xF)
data[7] = tau_uint & 0xFF
msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False)
self.canbus.send(msg)
recv_id = self._get_motor_recv_id(motor)
self._recv_motor_response(expected_recv_id=recv_id)
def _mit_control_batch(
self,
commands: Dict[NameOrID, Tuple[float, float, float, float, float]],
) -> None:
"""
Send MIT control commands to multiple motors in batch (optimized).
Sends all commands first, then collects responses. Much faster than sequential.
Args:
commands: Dict mapping motor name/ID to (kp, kd, position_deg, velocity_deg/s, torque)
Example: {'joint_1': (10.0, 0.5, 45.0, 0.0, 0.0), ...}
"""
if not commands:
return
expected_recv_ids = []
# Step 1: Send all MIT control commands (no waiting)
for motor, (kp, kd, position_degrees, velocity_deg_per_sec, torque) in commands.items():
motor_id = self._get_motor_id(motor)
motor_name = self._get_motor_name(motor)
motor_type = self._motor_types.get(motor_name, MotorType.DM4310)
# Convert degrees to radians
position_rad = np.radians(position_degrees)
velocity_rad_per_sec = np.radians(velocity_deg_per_sec)
# Get motor limits
pmax, vmax, tmax = MOTOR_LIMIT_PARAMS[motor_type]
# Encode parameters
kp_uint = self._float_to_uint(kp, 0, 500, 12)
kd_uint = self._float_to_uint(kd, 0, 5, 12)
q_uint = self._float_to_uint(position_rad, -pmax, pmax, 16)
dq_uint = self._float_to_uint(velocity_rad_per_sec, -vmax, vmax, 12)
tau_uint = self._float_to_uint(torque, -tmax, tmax, 12)
# Pack data
data = [0] * 8
data[0] = (q_uint >> 8) & 0xFF
data[1] = q_uint & 0xFF
data[2] = dq_uint >> 4
data[3] = ((dq_uint & 0xF) << 4) | ((kp_uint >> 8) & 0xF)
data[4] = kp_uint & 0xFF
data[5] = kd_uint >> 4
data[6] = ((kd_uint & 0xF) << 4) | ((tau_uint >> 8) & 0xF)
data[7] = tau_uint & 0xFF
# Send command
msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False)
self.canbus.send(msg)
# Track expected response
recv_id = self._get_motor_recv_id(motor)
expected_recv_ids.append(recv_id)
# Step 2: Collect all responses at once
self._recv_all_responses(expected_recv_ids, timeout=0.002)
def _float_to_uint(self, x: float, x_min: float, x_max: float, bits: int) -> int:
"""Convert float to unsigned integer for CAN transmission."""
x = max(x_min, min(x_max, x)) # Clamp to range
span = x_max - x_min
data_norm = (x - x_min) / span
return int(data_norm * ((1 << bits) - 1))
def _uint_to_float(self, x: int, x_min: float, x_max: float, bits: int) -> float:
"""Convert unsigned integer from CAN to float."""
span = x_max - x_min
data_norm = float(x) / ((1 << bits) - 1)
return data_norm * span + x_min
def _decode_motor_state(self, data: bytes, motor_type: MotorType) -> Tuple[float, float, float, int, int]:
"""
Decode motor state from CAN data.
Returns:
Tuple of (position_degrees, velocity_deg_per_sec, torque, temp_mos, temp_rotor)
"""
if len(data) < 8:
raise ValueError("Invalid motor state data")
# Extract encoded values
q_uint = (data[1] << 8) | data[2]
dq_uint = (data[3] << 4) | (data[4] >> 4)
tau_uint = ((data[4] & 0x0F) << 8) | data[5]
t_mos = data[6]
t_rotor = data[7]
# Get motor limits
pmax, vmax, tmax = MOTOR_LIMIT_PARAMS[motor_type]
# Decode to physical values (radians)
position_rad = self._uint_to_float(q_uint, -pmax, pmax, 16)
velocity_rad_per_sec = self._uint_to_float(dq_uint, -vmax, vmax, 12)
torque = self._uint_to_float(tau_uint, -tmax, tmax, 12)
# Convert to degrees
position_degrees = np.degrees(position_rad)
velocity_deg_per_sec = np.degrees(velocity_rad_per_sec)
return position_degrees, velocity_deg_per_sec, torque, t_mos, t_rotor
def read(
self,
data_name: str,
motor: str,
*,
normalize: bool = True,
num_retry: int = 0,
) -> Value:
"""Read a value from a single motor. Positions are always in degrees."""
if not self.is_connected:
raise DeviceNotConnectedError(f"{self} is not connected.")
# Refresh motor to get latest state
msg = self._refresh_motor(motor)
if msg is None:
motor_id = self._get_motor_id(motor)
recv_id = self._get_motor_recv_id(motor)
raise ConnectionError(
f"No response from motor '{motor}' (send ID: 0x{motor_id:02X}, recv ID: 0x{recv_id:02X}). "
f"Check that: 1) Motor is powered (24V), 2) CAN wiring is correct, "
f"3) Motor IDs are configured correctly using Damiao Debugging Tools"
)
motor_type = self._motor_types.get(motor, MotorType.DM4310)
position_degrees, velocity_deg_per_sec, torque, t_mos, t_rotor = self._decode_motor_state(msg.data, motor_type)
# Return requested data (already in degrees for position/velocity)
if data_name == "Present_Position":
value = position_degrees
elif data_name == "Present_Velocity":
value = velocity_deg_per_sec
elif data_name == "Present_Torque":
value = torque
elif data_name == "Temperature_MOS":
value = t_mos
elif data_name == "Temperature_Rotor":
value = t_rotor
else:
raise ValueError(f"Unknown data_name: {data_name}")
# For Damiao, positions are always in degrees, no normalization needed
# We keep the normalize parameter for compatibility but don't use it
return value
def write(
self,
data_name: str,
motor: str,
value: Value,
*,
normalize: bool = True,
num_retry: int = 0,
) -> None:
"""Write a value to a single motor. Positions are always in degrees."""
if not self.is_connected:
raise DeviceNotConnectedError(f"{self} is not connected.")
# Value is expected to be in degrees for positions
if data_name == "Goal_Position":
# Use MIT control with position in degrees
self._mit_control(motor, 10.0, 0.5, value, 0, 0)
else:
raise ValueError(f"Writing {data_name} not supported in MIT mode")
def sync_read(
self,
data_name: str,
motors: str | list[str] | None = None,
*,
normalize: bool = True,
num_retry: int = 0,
) -> Dict[str, Value]:
"""
Read the same value from multiple motors simultaneously.
Uses batched operations: sends all refresh commands, then collects all responses.
This is MUCH faster than sequential reads (OpenArms pattern).
"""
motors = self._get_motors_list(motors)
result = {}
# Step 1: Send refresh commands to ALL motors first (no waiting)
for motor in motors:
motor_id = self._get_motor_id(motor)
data = [motor_id & 0xFF, (motor_id >> 8) & 0xFF, CAN_CMD_REFRESH, 0, 0, 0, 0, 0]
msg = can.Message(arbitration_id=CAN_PARAM_ID, data=data, is_extended_id=False)
self.canbus.send(msg)
# Step 2: Collect all responses at once (batch receive)
expected_recv_ids = [self._get_motor_recv_id(motor) for motor in motors]
responses = self._recv_all_responses(expected_recv_ids, timeout=0.01) # 10ms total timeout
# Step 3: Parse responses
for motor in motors:
try:
recv_id = self._get_motor_recv_id(motor)
msg = responses.get(recv_id)
if msg is None:
logger.warning(f"No response from motor '{motor}' (recv ID: 0x{recv_id:02X})")
result[motor] = 0.0
continue
motor_type = self._motor_types.get(motor, MotorType.DM4310)
position_degrees, velocity_deg_per_sec, torque, t_mos, t_rotor = self._decode_motor_state(msg.data, motor_type)
# Return requested data
if data_name == "Present_Position":
value = position_degrees
elif data_name == "Present_Velocity":
value = velocity_deg_per_sec
elif data_name == "Present_Torque":
value = torque
elif data_name == "Temperature_MOS":
value = t_mos
elif data_name == "Temperature_Rotor":
value = t_rotor
else:
raise ValueError(f"Unknown data_name: {data_name}")
result[motor] = value
except Exception as e:
logger.warning(f"Failed to read {data_name} from {motor}: {e}")
result[motor] = 0.0
return result
def sync_read_all_states(
self,
motors: str | list[str] | None = None,
*,
num_retry: int = 0,
) -> Dict[str, Dict[str, Value]]:
"""
Read ALL motor states (position, velocity, torque) from multiple motors in ONE refresh cycle.
This is 3x faster than calling sync_read() three times separately.
Returns:
Dictionary mapping motor names to state dicts with keys: 'position', 'velocity', 'torque'
Example: {'joint_1': {'position': 45.2, 'velocity': 1.3, 'torque': 0.5}, ...}
"""
motors = self._get_motors_list(motors)
result = {}
# Step 1: Send refresh commands to ALL motors first (with small delays to reduce bus congestion)
for motor in motors:
motor_id = self._get_motor_id(motor)
data = [motor_id & 0xFF, (motor_id >> 8) & 0xFF, CAN_CMD_REFRESH, 0, 0, 0, 0, 0]
msg = can.Message(arbitration_id=CAN_PARAM_ID, data=data, is_extended_id=False)
self.canbus.send(msg)
time.sleep(0.0001) # 100us delay between commands to reduce bus congestion
# Step 2: Collect all responses at once (batch receive)
expected_recv_ids = [self._get_motor_recv_id(motor) for motor in motors]
responses = self._recv_all_responses(expected_recv_ids, timeout=0.015) # 15ms timeout (increased for reliability)
# Step 3: Parse responses and extract ALL state values
for motor in motors:
try:
recv_id = self._get_motor_recv_id(motor)
msg = responses.get(recv_id)
if msg is None:
logger.warning(f"No response from motor '{motor}' (recv ID: 0x{recv_id:02X})")
result[motor] = {"position": 0.0, "velocity": 0.0, "torque": 0.0}
continue
motor_type = self._motor_types.get(motor, MotorType.DM4310)
position_degrees, velocity_deg_per_sec, torque, t_mos, t_rotor = self._decode_motor_state(msg.data, motor_type)
# Return all state values in one dict
result[motor] = {
"position": position_degrees,
"velocity": velocity_deg_per_sec,
"torque": torque,
"temp_mos": t_mos,
"temp_rotor": t_rotor,
}
except Exception as e:
logger.warning(f"Failed to read state from {motor}: {e}")
result[motor] = {"position": 0.0, "velocity": 0.0, "torque": 0.0}
return result
def sync_write(
self,
data_name: str,
values: Dict[str, Value],
*,
normalize: bool = True,
num_retry: int = 0,
) -> None:
"""
Write different values to multiple motors simultaneously. Positions are always in degrees.
Uses batched operations: sends all commands first, then collects responses (OpenArms pattern).
"""
if data_name == "Goal_Position":
# Step 1: Send all MIT control commands first (no waiting)
for motor, value_degrees in values.items():
motor_id = self._get_motor_id(motor)
motor_name = self._get_motor_name(motor)
motor_type = self._motor_types.get(motor_name, MotorType.DM4310)
# Convert degrees to radians
position_rad = np.radians(value_degrees)
# Default gains for position control
kp, kd = 10.0, 0.5
# Get motor limits and encode parameters
pmax, vmax, tmax = MOTOR_LIMIT_PARAMS[motor_type]
kp_uint = self._float_to_uint(kp, 0, 500, 12)
kd_uint = self._float_to_uint(kd, 0, 5, 12)
q_uint = self._float_to_uint(position_rad, -pmax, pmax, 16)
dq_uint = self._float_to_uint(0, -vmax, vmax, 12)
tau_uint = self._float_to_uint(0, -tmax, tmax, 12)
# Pack data
data = [0] * 8
data[0] = (q_uint >> 8) & 0xFF
data[1] = q_uint & 0xFF
data[2] = dq_uint >> 4
data[3] = ((dq_uint & 0xF) << 4) | ((kp_uint >> 8) & 0xF)
data[4] = kp_uint & 0xFF
data[5] = kd_uint >> 4
data[6] = ((kd_uint & 0xF) << 4) | ((tau_uint >> 8) & 0xF)
data[7] = tau_uint & 0xFF
msg = can.Message(arbitration_id=motor_id, data=data, is_extended_id=False)
self.canbus.send(msg)
time.sleep(0.0001) # 100us delay between commands to reduce bus congestion
# Step 2: Collect all responses at once
expected_recv_ids = [self._get_motor_recv_id(motor) for motor in values.keys()]
self._recv_all_responses(expected_recv_ids, timeout=0.015) # 15ms timeout (increased for reliability)
else:
# Fall back to individual writes for other data types
for motor, value in values.items():
self.write(data_name, motor, value, normalize=normalize, num_retry=num_retry)
def read_calibration(self) -> dict[str, MotorCalibration]:
"""Read calibration data from motors."""
# Damiao motors don't store calibration internally
# Return existing calibration or empty dict
return self.calibration if self.calibration else {}
def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None:
"""Write calibration data to motors."""
# Damiao motors don't store calibration internally
# Just cache it in memory
if cache:
self.calibration = calibration_dict
def record_ranges_of_motion(
self, motors: NameOrID | list[NameOrID] | None = None, display_values: bool = True
) -> tuple[dict[NameOrID, Value], dict[NameOrID, Value]]:
"""
Interactively record the min/max values of each motor in degrees.
Move the joints by hand (with torque disabled) while the method streams live positions.
Press Enter to finish.
"""
if motors is None:
motors = list(self.motors.keys())
elif isinstance(motors, (str, int)):
motors = [motors]
# Disable torque for manual movement
self.disable_torque(motors)
time.sleep(0.1)
# Get initial positions (already in degrees)
start_positions = self.sync_read("Present_Position", motors, normalize=False)
mins = start_positions.copy()
maxes = start_positions.copy()
print("\nMove joints through their full range of motion. Press ENTER when done.")
user_pressed_enter = False
while not user_pressed_enter:
positions = self.sync_read("Present_Position", motors, normalize=False)
for motor in motors:
if motor in positions:
mins[motor] = min(positions[motor], mins.get(motor, positions[motor]))
maxes[motor] = max(positions[motor], maxes.get(motor, positions[motor]))
if display_values:
print("\n" + "=" * 50)
print(f"{'MOTOR':<20} | {'MIN (deg)':>12} | {'POS (deg)':>12} | {'MAX (deg)':>12}")
print("-" * 50)
for motor in motors:
if motor in positions:
print(f"{motor:<20} | {mins[motor]:>12.1f} | {positions[motor]:>12.1f} | {maxes[motor]:>12.1f}")
if enter_pressed():
user_pressed_enter = True
if display_values and not user_pressed_enter:
# Move cursor up to overwrite the previous output
move_cursor_up(len(motors) + 4)
time.sleep(0.05)
# Re-enable torque
self.enable_torque(motors)
# Validate ranges
for motor in motors:
if motor in mins and motor in maxes:
if abs(maxes[motor] - mins[motor]) < 5.0: # At least 5 degrees of range
raise ValueError(f"Motor {motor} has insufficient range of motion (< 5 degrees)")
return mins, maxes
def _get_motors_list(self, motors: str | list[str] | None) -> list[str]:
"""Convert motor specification to list of motor names."""
if motors is None:
return list(self.motors.keys())
elif isinstance(motors, str):
return [motors]
elif isinstance(motors, list):
return motors
else:
raise TypeError(f"Invalid motors type: {type(motors)}")
def _get_motor_id(self, motor: NameOrID) -> int:
"""Get CAN ID for a motor."""
if isinstance(motor, str):
if motor in self.motors:
return self.motors[motor].id
else:
raise ValueError(f"Unknown motor: {motor}")
else:
return motor
def _get_motor_name(self, motor: NameOrID) -> str:
"""Get motor name from name or ID."""
if isinstance(motor, str):
return motor
else:
for name, m in self.motors.items():
if m.id == motor:
return name
raise ValueError(f"Unknown motor ID: {motor}")
def _get_motor_recv_id(self, motor: NameOrID) -> Optional[int]:
"""Get motor recv_id from name or ID."""
motor_name = self._get_motor_name(motor)
motor_obj = self.motors.get(motor_name)
if motor_obj and hasattr(motor_obj, "recv_id"):
return motor_obj.recv_id
return None
@cached_property
def is_calibrated(self) -> bool:
"""Check if motors are calibrated."""
return bool(self.calibration)
-209
View File
@@ -1,209 +0,0 @@
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration tables for Damiao motors."""
from enum import IntEnum
from typing import Dict, List, Tuple
# Motor type definitions
class MotorType(IntEnum):
DM3507 = 0
DM4310 = 1
DM4310_48V = 2
DM4340 = 3
DM4340_48V = 4
DM6006 = 5
DM8006 = 6
DM8009 = 7
DM10010L = 8
DM10010 = 9
DMH3510 = 10
DMH6215 = 11
DMG6220 = 12
# Control modes
class ControlMode(IntEnum):
MIT = 1
POS_VEL = 2
VEL = 3
TORQUE_POS = 4
# Motor variable IDs (RID)
class MotorVariable(IntEnum):
UV_VALUE = 0
KT_VALUE = 1
OT_VALUE = 2
OC_VALUE = 3
ACC = 4
DEC = 5
MAX_SPD = 6
MST_ID = 7
ESC_ID = 8
TIMEOUT = 9
CTRL_MODE = 10
DAMP = 11
INERTIA = 12
HW_VER = 13
SW_VER = 14
SN = 15
NPP = 16
RS = 17
LS = 18
FLUX = 19
GR = 20
PMAX = 21
VMAX = 22
TMAX = 23
I_BW = 24
KP_ASR = 25
KI_ASR = 26
KP_APR = 27
KI_APR = 28
OV_VALUE = 29
GREF = 30
DETA = 31
V_BW = 32
IQ_C1 = 33
VL_C1 = 34
CAN_BR = 35
SUB_VER = 36
U_OFF = 50
V_OFF = 51
K1 = 52
K2 = 53
M_OFF = 54
DIR = 55
P_M = 80
XOUT = 81
# Motor limit parameters [PMAX, VMAX, TMAX]
# PMAX: Maximum position (rad)
# VMAX: Maximum velocity (rad/s)
# TMAX: Maximum torque (N·m)
MOTOR_LIMIT_PARAMS = {
MotorType.DM3507: (12.5, 30, 10),
MotorType.DM4310: (12.5, 30, 10),
MotorType.DM4310_48V: (12.5, 50, 10),
MotorType.DM4340: (12.5, 8, 28),
MotorType.DM4340_48V: (12.5, 10, 28),
MotorType.DM6006: (12.5, 45, 20),
MotorType.DM8006: (12.5, 45, 40),
MotorType.DM8009: (12.5, 45, 54),
MotorType.DM10010L: (12.5, 25, 200),
MotorType.DM10010: (12.5, 20, 200),
MotorType.DMH3510: (12.5, 280, 1),
MotorType.DMH6215: (12.5, 45, 10),
MotorType.DMG6220: (12.5, 45, 10),
}
# Motor model names
MODEL_NAMES = {
MotorType.DM3507: "dm3507",
MotorType.DM4310: "dm4310",
MotorType.DM4310_48V: "dm4310_48v",
MotorType.DM4340: "dm4340",
MotorType.DM4340_48V: "dm4340_48v",
MotorType.DM6006: "dm6006",
MotorType.DM8006: "dm8006",
MotorType.DM8009: "dm8009",
MotorType.DM10010L: "dm10010l",
MotorType.DM10010: "dm10010",
MotorType.DMH3510: "dmh3510",
MotorType.DMH6215: "dmh6215",
MotorType.DMG6220: "dmg6220",
}
# Motor resolution table (encoder counts per revolution)
MODEL_RESOLUTION = {
"dm3507": 65536,
"dm4310": 65536,
"dm4310_48v": 65536,
"dm4340": 65536,
"dm4340_48v": 65536,
"dm6006": 65536,
"dm8006": 65536,
"dm8009": 65536,
"dm10010l": 65536,
"dm10010": 65536,
"dmh3510": 65536,
"dmh6215": 65536,
"dmg6220": 65536,
}
# CAN baudrates supported by Damiao motors
AVAILABLE_BAUDRATES = [
125000, # 0: 125 kbps
200000, # 1: 200 kbps
250000, # 2: 250 kbps
500000, # 3: 500 kbps
1000000, # 4: 1 mbps (default for OpenArms)
2000000, # 5: 2 mbps
2500000, # 6: 2.5 mbps
3200000, # 7: 3.2 mbps
4000000, # 8: 4 mbps
5000000, # 9: 5 mbps
]
DEFAULT_BAUDRATE = 1000000 # 1 Mbps is standard for OpenArms
# Default timeout in milliseconds
DEFAULT_TIMEOUT_MS = 1000
# Data that should be normalized
NORMALIZED_DATA = ["Present_Position", "Goal_Position"]
# OpenArms specific configurations
# Based on: https://docs.openarm.dev/software/setup/configure-test
# OpenArms has 7 DOF per arm (14 total for dual arm)
OPENARMS_ARM_MOTOR_IDS = {
"joint_1": {"send": 0x01, "recv": 0x11}, # J1 - Shoulder pan
"joint_2": {"send": 0x02, "recv": 0x12}, # J2 - Shoulder lift
"joint_3": {"send": 0x03, "recv": 0x13}, # J3 - Elbow flex
"joint_4": {"send": 0x04, "recv": 0x14}, # J4 - Wrist flex
"joint_5": {"send": 0x05, "recv": 0x15}, # J5 - Wrist roll
"joint_6": {"send": 0x06, "recv": 0x16}, # J6 - Wrist pitch
"joint_7": {"send": 0x07, "recv": 0x17}, # J7 - Wrist rotation
}
OPENARMS_GRIPPER_MOTOR_IDS = {
"gripper": {"send": 0x08, "recv": 0x18}, # J8 - Gripper
}
# Default motor types for OpenArms
OPENARMS_DEFAULT_MOTOR_TYPES = {
"joint_1": MotorType.DM8009, # Shoulder pan - high torque
"joint_2": MotorType.DM8009, # Shoulder lift - high torque
"joint_3": MotorType.DM4340, # Shoulder rotation
"joint_4": MotorType.DM4340, # Elbow flex
"joint_5": MotorType.DM4310, # Wrist roll
"joint_6": MotorType.DM4310, # Wrist pitch
"joint_7": MotorType.DM4310, # Wrist rotation
"gripper": MotorType.DM4310, # Gripper
}
# MIT control parameter ranges
MIT_KP_RANGE = (0.0, 500.0)
MIT_KD_RANGE = (0.0, 5.0)
# CAN frame command IDs
CAN_CMD_ENABLE = 0xFC
CAN_CMD_DISABLE = 0xFD
CAN_CMD_SET_ZERO = 0xFE
CAN_CMD_REFRESH = 0xCC
CAN_CMD_QUERY_PARAM = 0x33
CAN_CMD_WRITE_PARAM = 0x55
CAN_CMD_SAVE_PARAM = 0xAA
# CAN ID for parameter operations
CAN_PARAM_ID = 0x7FF
+2 -2
View File
@@ -24,7 +24,7 @@ from enum import Enum
from lerobot.motors.encoding_utils import decode_twos_complement, encode_twos_complement
from ..motors_bus import Motor, MotorCalibration, NameOrID, SerialMotorsBus, Value, get_address
from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address
from .tables import (
AVAILABLE_BAUDRATES,
MODEL_BAUDRATE_TABLE,
@@ -100,7 +100,7 @@ def _split_into_byte_chunks(value: int, length: int) -> list[int]:
return data
class DynamixelMotorsBus(SerialMotorsBus):
class DynamixelMotorsBus(MotorsBus):
"""
The Dynamixel implementation for a MotorsBus. It relies on the python dynamixel sdk to communicate with
the motors. For more info, see the Dynamixel SDK Documentation:
+3 -3
View File
@@ -19,7 +19,7 @@ from pprint import pformat
from lerobot.motors.encoding_utils import decode_sign_magnitude, encode_sign_magnitude
from ..motors_bus import Motor, MotorCalibration, NameOrID, SerialMotorsBus, Value, get_address
from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address
from .tables import (
FIRMWARE_MAJOR_VERSION,
FIRMWARE_MINOR_VERSION,
@@ -96,7 +96,7 @@ def patch_setPacketTimeout(self, packet_length): # noqa: N802
self.packet_timeout = (self.tx_time_per_byte * packet_length) + (self.tx_time_per_byte * 3.0) + 50
class FeetechMotorsBus(SerialMotorsBus):
class FeetechMotorsBus(MotorsBus):
"""
The FeetechMotorsBus class allows to efficiently read and write to the attached motors. It relies on the
python feetech sdk to communicate with the motors, which is itself based on the dynamixel sdk.
@@ -165,7 +165,7 @@ class FeetechMotorsBus(SerialMotorsBus):
def _handshake(self) -> None:
self._assert_motors_exist()
#self._assert_same_firmware()
self._assert_same_firmware()
def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
if self.protocol_version == 0:
+4 -96
View File
@@ -19,8 +19,6 @@
# TODO(aliberts): Add block noqa when feature below is available
# https://github.com/astral-sh/ruff/issues/3711
from __future__ import annotations
import abc
import logging
from contextlib import contextmanager
@@ -43,92 +41,6 @@ Value: TypeAlias = int | float
logger = logging.getLogger(__name__)
class MotorsBusBase(abc.ABC):
"""
Base class for all motor bus implementations.
This is a minimal interface that all motor buses must implement, regardless of their
communication protocol (serial, CAN, etc.).
"""
def __init__(
self,
port: str,
motors: dict[str, Motor],
calibration: dict[str, MotorCalibration] | None = None,
):
self.port = port
self.motors = motors
self.calibration = calibration if calibration else {}
@abc.abstractmethod
def connect(self, handshake: bool = True) -> None:
"""Establish connection to the motors."""
pass
@abc.abstractmethod
def disconnect(self, disable_torque: bool = True) -> None:
"""Disconnect from the motors."""
pass
@property
@abc.abstractmethod
def is_connected(self) -> bool:
"""Check if connected to the motors."""
pass
@abc.abstractmethod
def read(self, data_name: str, motor: str, *, normalize: bool = True, num_retry: int = 0) -> Value:
"""Read a value from a single motor."""
pass
@abc.abstractmethod
def write(
self, data_name: str, motor: str, value: Value, *, normalize: bool = True, num_retry: int = 0
) -> None:
"""Write a value to a single motor."""
pass
@abc.abstractmethod
def sync_read(
self, data_name: str, motors: str | list[str] | None = None, *, normalize: bool = True
) -> dict[str, Value]:
"""Read a value from multiple motors."""
pass
@abc.abstractmethod
def sync_write(
self,
data_name: str,
values: Value | dict[str, Value],
motors: str | list[str] | None = None,
*,
normalize: bool = True,
) -> None:
"""Write values to multiple motors."""
pass
@abc.abstractmethod
def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
"""Enable torque on selected motors."""
pass
@abc.abstractmethod
def disable_torque(self, motors: int | str | list[str] | None = None, num_retry: int = 0) -> None:
"""Disable torque on selected motors."""
pass
@abc.abstractmethod
def read_calibration(self) -> dict[str, MotorCalibration]:
"""Read calibration parameters from the motors."""
pass
@abc.abstractmethod
def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None:
"""Write calibration parameters to the motors."""
pass
def get_ctrl_table(model_ctrl_table: dict[str, dict], model: str) -> dict[str, tuple[int, int]]:
ctrl_table = model_ctrl_table.get(model)
if ctrl_table is None:
@@ -291,15 +203,15 @@ class GroupSyncWrite(Protocol):
def txPacket(self): ...
class SerialMotorsBus(MotorsBusBase):
class MotorsBus(abc.ABC):
"""
A SerialMotorsBus allows to efficiently read and write to motors connected via serial communication.
A MotorsBus allows to efficiently read and write to the attached motors.
It represents several motors daisy-chained together and connected through a serial port.
There are currently two implementations of this class:
There are currently two implementations of this abstract class:
- DynamixelMotorsBus
- FeetechMotorsBus
This class is specifically for serial-based motor protocols (Dynamixel, Feetech, etc.).
Note: This class may evolve in the future should we add support for other types of bus.
A MotorsBus subclass instance requires a port (e.g. `FeetechMotorsBus(port="/dev/tty.usbmodem575E0031751"`)).
To find the port, you can run our utility script:
@@ -1300,7 +1212,3 @@ class SerialMotorsBus(MotorsBusBase):
for id_, value in ids_values.items():
data = self._serialize_data(value, length)
self.sync_writer.addParam(id_, data)
# Backward compatibility alias
MotorsBus = SerialMotorsBus
@@ -23,8 +23,6 @@ from lerobot.optim.schedulers import CosineDecayWithWarmupSchedulerConfig
from lerobot.policies.rtc.configuration_rtc import RTCConfig
from lerobot.utils.constants import OBS_IMAGES
DEFAULT_IMAGE_SIZE = 224
@PreTrainedConfig.register_subclass("pi0")
@dataclass
@@ -53,10 +51,7 @@ class PI0Config(PreTrainedConfig):
# Real-Time Chunking (RTC) configuration
rtc_config: RTCConfig | None = None
image_resolution: tuple[int, int] = (
DEFAULT_IMAGE_SIZE,
DEFAULT_IMAGE_SIZE,
) # see openpi `preprocessing_pytorch.py`
image_resolution: tuple[int, int] = (224, 224) # see openpi `preprocessing_pytorch.py`
# Add empty images. Used to add empty cameras when no image features are present.
empty_cameras: int = 0
+13 -14
View File
@@ -41,7 +41,7 @@ else:
PaliGemmaForConditionalGeneration = None
from lerobot.configs.policies import PreTrainedConfig
from lerobot.policies.pi0.configuration_pi0 import DEFAULT_IMAGE_SIZE, PI0Config
from lerobot.policies.pi0.configuration_pi0 import PI0Config
from lerobot.policies.pretrained import PreTrainedPolicy, T
from lerobot.policies.rtc.modeling_rtc import RTCProcessor
from lerobot.utils.constants import (
@@ -337,7 +337,6 @@ class PaliGemmaWithExpertModel(
action_expert_config,
use_adarms=None,
precision: Literal["bfloat16", "float32"] = "bfloat16",
image_size: int = DEFAULT_IMAGE_SIZE,
):
if use_adarms is None:
use_adarms = [False, False]
@@ -357,7 +356,6 @@ class PaliGemmaWithExpertModel(
vlm_config_hf.text_config.vocab_size = 257152
vlm_config_hf.text_config.use_adarms = use_adarms[0]
vlm_config_hf.text_config.adarms_cond_dim = vlm_config.width if use_adarms[0] else None
vlm_config_hf.vision_config.image_size = image_size
vlm_config_hf.vision_config.intermediate_size = 4304
vlm_config_hf.vision_config.projection_dim = 2048
vlm_config_hf.vision_config.projector_hidden_act = "gelu_fast"
@@ -521,17 +519,11 @@ class PI0Pytorch(nn.Module): # see openpi `PI0Pytorch`
paligemma_config = get_gemma_config(config.paligemma_variant)
action_expert_config = get_gemma_config(config.action_expert_variant)
if config.image_resolution[0] != config.image_resolution[1]:
raise ValueError(
f"PaliGemma expects square image resolution, invalid resolution: {config.image_resolution}"
)
self.paligemma_with_expert = PaliGemmaWithExpertModel(
paligemma_config,
action_expert_config,
use_adarms=[False, False],
precision=config.dtype,
image_size=config.image_resolution[0],
)
self.action_in_proj = nn.Linear(config.max_action_dim, action_expert_config.width)
@@ -820,13 +812,16 @@ class PI0Pytorch(nn.Module): # see openpi `PI0Pytorch`
)
dt = -1.0 / num_steps
dt = torch.tensor(dt, dtype=torch.float32, device=device)
x_t = noise
for step in range(num_steps):
time = 1.0 + step * dt
time_tensor = torch.tensor(time, dtype=torch.float32, device=device).expand(bsize)
time = torch.tensor(1.0, dtype=torch.float32, device=device)
while time >= -dt / 2:
expanded_time = time.expand(bsize)
def denoise_step_partial_call(input_x_t, current_timestep=time_tensor):
# Define a closure function to properly capture expanded_time
# This avoids the lambda expression (E731) and loop variable binding (B023) issues
def denoise_step_partial_call(input_x_t, current_timestep=expanded_time):
return self.denoise_step(
state=state,
prefix_pad_masks=prefix_pad_masks,
@@ -851,11 +846,15 @@ class PI0Pytorch(nn.Module): # see openpi `PI0Pytorch`
else:
v_t = denoise_step_partial_call(x_t)
x_t = x_t + dt * v_t
# Euler step
x_t += dt * v_t
# Record x_t and v_t after Euler step
if self.rtc_processor is not None and self.rtc_processor.is_debug_enabled():
self.rtc_processor.track(time=time, x_t=x_t, v_t=v_t)
time += dt
return x_t
def denoise_step(
@@ -0,0 +1,196 @@
# FAST Tokenizer Training for LeRobotDataset
This directory contains tools for training a FAST (Factorized Action Sequence Tokenizer) on LeRobot datasets.
## Files
- **`train_fast_tokenizer.py`**: Main training script (refactored for LeRobotDataset)
- **`train_fast_tokenizer_example.md`**: Usage examples and parameter documentation
- **`MIGRATION_NOTES.md`**: Migration guide from B1K to LeRobotDataset
## Quick Start
```bash
# Basic usage
python train_fast_tokenizer.py \
--repo_id "lerobot/aloha_sim_insertion_human" \
--action_horizon 10 \
--encoded_dims "0:14"
# With delta transform
python train_fast_tokenizer.py \
--repo_id "lerobot/aloha_sim_insertion_human" \
--action_horizon 10 \
--encoded_dims "0:14" \
--delta_dims "0,1,2,3,4,5,6,7,8,9,10,11,12,13" \
--state_key "observation.state" \
--vocab_size 1024
```
## What is FAST?
FAST is a tokenizer for robotic action sequences that:
1. Applies DCT (Discrete Cosine Transform) to action chunks
2. Quantizes DCT coefficients
3. Uses BPE (Byte-Pair Encoding) to compress the quantized sequence
4. Achieves high compression ratios (e.g., 10-20x) while maintaining accuracy
This enables efficient storage and processing of long action sequences in vision-language-action models.
## Requirements
- Python 3.10+
- LeRobot dataset (either local or from HuggingFace Hub)
- transformers (for AutoProcessor)
- numpy
- torch
- tyro
## Workflow
```
LeRobotDataset → Extract Episodes → Apply Delta Transform
Select Dimensions → Normalize (q01, q99) → Create Chunks
Train FAST Tokenizer → Compute Stats → Save
```
## Parameters Guide
### Essential Parameters
- **`repo_id`**: HuggingFace dataset repository ID
- Example: `"lerobot/aloha_sim_insertion_human"`
- **`action_horizon`**: Length of action sequences to tokenize
- Typical: 10-16 steps
- **`encoded_dims`**: Which action dimensions to encode
- Format: `"start:end,start:end"`
- Example: `"0:7"` = dimensions 0-6
- Example: `"0:3,7:10"` = dimensions 0-2 and 7-9
### Optional Parameters
- **`delta_dims`**: Apply delta transform (action - state) to these dimensions
- Format: `"0,1,2,3,4,5"`
- Use for position-based actions
- **`state_key`**: Dataset key containing state observations
- Default: `"observation.state"`
- **`vocab_size`**: BPE vocabulary size
- Default: 1024
- Larger = better compression but more memory
- **`scale`**: DCT quantization scale
- Default: 10.0
- Smaller = finer quantization, larger = coarser
- **`sample_fraction`**: Fraction of action chunks to use per episode
- Default: 0.1 (10%)
- Increase for small datasets, decrease for large datasets
## Output
The script creates a directory (default: `./fast_tokenizer_{repo_id}`) containing:
1. **Tokenizer files**: Can be loaded with `AutoProcessor.from_pretrained()`
2. **`metadata.json`**: Contains:
- Training configuration
- Compression statistics
- Dataset information
## Example Output
```
Loading dataset: lerobot/aloha_sim_insertion_human
Dataset loaded: 50 episodes, 5000 frames
Encoding 14 dimensions: 0:14
Delta dimensions: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
Action horizon: 10
Processing 50 episodes...
Collected 4500 action chunks
Extracted 14 encoded dimensions
Before normalization - overall stats:
Min: -2.3451, Max: 3.1234, Mean: 0.0234, Std: 0.8765
Applied quantile normalization [q01, q99] → [-1, 1]
After normalization - overall stats:
Min: -1.0000, Max: 1.0000, Mean: 0.0156, Std: 0.4321
Training FAST tokenizer on 4500 action chunks...
Action chunk shape: (4500, 10, 14)
Vocab size: 1024
DCT scale: 10.0
✓ Tokenizer training complete!
Compression Statistics:
Average compression ratio: 14.23x
Mean token length: 9.8
P99 token length: 15
Min token length: 6
Max token length: 18
✅ Saved FAST tokenizer to ./fast_tokenizer_lerobot_aloha_sim_insertion_human
```
## Using the Trained Tokenizer
```python
from transformers import AutoProcessor
# Load tokenizer
tokenizer = AutoProcessor.from_pretrained(
"./fast_tokenizer_lerobot_aloha_sim_insertion_human",
trust_remote_code=True
)
# Encode action chunk [horizon, action_dim]
action_chunk = np.random.randn(10, 14) # Example
tokens = tokenizer(action_chunk[None])[0] # Returns token IDs
# Decode tokens back to actions
reconstructed = tokenizer.decode(tokens)
```
## Tips
1. **Start Small**: Use `--max_episodes 10` for initial testing
2. **Check Dimensions**: Verify encoded dimensions match your robot's action space
3. **Delta Transform**: Use for position-based actions, not velocity-based
4. **Normalization**: Ensure dataset has proper statistics computed
5. **Compression Ratio**: Aim for 10-20x for good balance of compression and accuracy
## Troubleshooting
**Issue**: "No normalization stats found"
- **Solution**: Compute dataset statistics first, or use raw actions
**Issue**: "Episode too short for action horizon"
- **Solution**: Reduce `--action_horizon` or filter short episodes
**Issue**: "State key not found"
- **Solution**: Check dataset features and use correct `--state_key`
**Issue**: Memory error with large datasets
- **Solution**: Reduce `--sample_fraction` or `--max_episodes`
## Citation
If you use FAST in your research, please cite:
```bibtex
@article{black2023fast,
title={FAST: Factorized Action Sequence Tokenizer for Vision-Language-Action Models},
author={Black, Kevin and others},
journal={arXiv preprint},
year={2023}
}
```
@@ -22,8 +22,6 @@ from lerobot.optim.optimizers import AdamWConfig
from lerobot.optim.schedulers import CosineDecayWithWarmupSchedulerConfig
from lerobot.policies.rtc.configuration_rtc import RTCConfig
DEFAULT_IMAGE_SIZE = 224
@PreTrainedConfig.register_subclass("pi05")
@dataclass
@@ -39,6 +37,11 @@ class PI05Config(PreTrainedConfig):
# Shorter state and action vectors will be padded to these dimensions
max_state_dim: int = 32
max_action_dim: int = 32
max_action_tokens: int = 32
fast_vocab_size: int = 2048
# FAST-only mode: train with only discrete action token prediction (no flow matching, no subtask)
fast_only: bool = False
# Flow matching parameters: see openpi `PI0Pytorch`
num_inference_steps: int = 10
@@ -52,10 +55,7 @@ class PI05Config(PreTrainedConfig):
# Real-Time Chunking (RTC) configuration
rtc_config: RTCConfig | None = None
image_resolution: tuple[int, int] = (
DEFAULT_IMAGE_SIZE,
DEFAULT_IMAGE_SIZE,
) # see openpi `preprocessing_pytorch.py`
image_resolution: tuple[int, int] = (224, 224) # see openpi `preprocessing_pytorch.py`
# Add empty images. Used to add empty cameras when no image features are present.
empty_cameras: int = 0
@@ -65,8 +65,8 @@ class PI05Config(PreTrainedConfig):
normalization_mapping: dict[str, NormalizationMode] = field(
default_factory=lambda: {
"VISUAL": NormalizationMode.IDENTITY,
"STATE": NormalizationMode.QUANTILES, # Pi0.5 uses quantiles for state
"ACTION": NormalizationMode.QUANTILES, # Pi0.5 uses quantiles for action
"STATE": NormalizationMode.MEAN_STD, # Pi0.5 uses quantiles for state
"ACTION": NormalizationMode.MEAN_STD, # Pi0.5 uses quantiles for action
}
)
+21
View File
@@ -0,0 +1,21 @@
lerobot-train \
--dataset.repo_id=lerobot \
--dataset.root=/fsx/jade_choghari/outputs/collect-data-pgen \
--output_dir=/fsx/jade_choghari/outputs/pi0test1 \
--job_name=pi0_training \
--policy.repo_id=jade_choghari/pi0-base \
--policy.path=/fsx/jade_choghari/outputs/pi0_fast_fruit1/checkpoints/last/pretrained_model \
--policy.dtype=bfloat16 \
--steps=3000 \
--save_freq=1000 \
--rename_map='{
"observation.images.base": "observation.images.base_0_rgb",
"observation.images.left_wrist": "observation.images.left_wrist_0_rgb",
"observation.images.right_wrist": "observation.images.right_wrist_0_rgb",
}' \
--batch_size=4 \
--policy.device=cuda \
# --wandb.enable=true \
# --wandb.disable_artifact=true \
# --wandb.project=pi05hi-training \
File diff suppressed because it is too large Load Diff
+44 -11
View File
@@ -33,6 +33,7 @@ from lerobot.processor import (
ProcessorStep,
ProcessorStepRegistry,
RenameObservationsProcessorStep,
ActionTokenizerProcessorStep,
TokenizerProcessorStep,
UnnormalizerProcessorStep,
)
@@ -47,13 +48,15 @@ from lerobot.utils.constants import (
@ProcessorStepRegistry.register(name="pi05_prepare_state_tokenizer_processor_step")
@dataclass
class Pi05PrepareStateTokenizerProcessorStep(ProcessorStep):
class Pi05PrepareStateAndLanguageTokenizerProcessorStep(ProcessorStep):
"""
Processor step to prepare the state and tokenize the language input.
"""
max_state_dim: int = 32
task_key: str = "task"
high_level_task_key: str = "user_prompt"
subtask_only_key: str = "subtask"
def __call__(self, transition: EnvTransition) -> EnvTransition:
transition = transition.copy()
@@ -64,6 +67,8 @@ class Pi05PrepareStateTokenizerProcessorStep(ProcessorStep):
tasks = transition.get(TransitionKey.COMPLEMENTARY_DATA, {}).get(self.task_key)
if tasks is None:
raise ValueError("No task found in complementary data")
high_level_tasks = transition.get(TransitionKey.COMPLEMENTARY_DATA, {}).get(self.high_level_task_key)
# TODO: check if this necessary
state = deepcopy(state)
@@ -76,16 +81,42 @@ class Pi05PrepareStateTokenizerProcessorStep(ProcessorStep):
state_np = state.cpu().numpy()
discretized_states = np.digitize(state_np, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1
full_prompts = []
# Clean high level tasks first (if available)
cleaned_high_level_tasks = []
if high_level_tasks is not None:
for high_level_task in high_level_tasks:
cleaned_high_level_tasks.append(high_level_task.strip().replace("_", " ").replace("\n", " "))
# Process low level tasks with state information
low_level_prompts = []
subtask_only_prompts = [] # Store only the subtask text for prediction
for i, task in enumerate(tasks):
cleaned_text = task.strip().replace("_", " ").replace("\n", " ")
state_str = " ".join(map(str, discretized_states[i]))
full_prompt = f"Task: {cleaned_text}, State: {state_str};\nAction: "
full_prompts.append(full_prompt)
# Store only the subtask text (used as prediction target)
subtask_only_prompts.append(cleaned_text)
if cleaned_high_level_tasks:
cleaned_high_level_task = cleaned_high_level_tasks[i]
full_prompt = f"High level task: {cleaned_high_level_task}; State: {state_str}; Subtask: {cleaned_text}"
else:
full_prompt = f"Task: {cleaned_text}, State: {state_str};\n" #remove Action by jade
transition[TransitionKey.COMPLEMENTARY_DATA][self.task_key] = full_prompts
# Normalize state to [-1, 1] range if needed (assuming it's already normalized by normalizer processor step!!)
# Discretize into 256 bins (see openpi `PaligemmaTokenizer.tokenize()`)
low_level_prompts.append(full_prompt)
transition[TransitionKey.COMPLEMENTARY_DATA][self.task_key] = low_level_prompts
transition[TransitionKey.COMPLEMENTARY_DATA][self.subtask_only_key] = subtask_only_prompts
# Process high level tasks without state information (if available)
if high_level_tasks is not None:
high_level_prompts = []
for i, cleaned_high_level_task in enumerate(cleaned_high_level_tasks):
state_str = " ".join(map(str, discretized_states[i]))
full_prompt = f"High level task: {cleaned_high_level_task}; State: {state_str}; Subtask:"
high_level_prompts.append(full_prompt)
transition[TransitionKey.COMPLEMENTARY_DATA][self.high_level_task_key] = high_level_prompts
return transition
def transform_features(
@@ -128,25 +159,27 @@ def make_pi05_pre_post_processors(
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
# Add remaining processors
input_steps: list[ProcessorStep] = [
RenameObservationsProcessorStep(rename_map={}), # To mimic the same processor as pretrained one
AddBatchDimensionProcessorStep(),
# NOTE: NormalizerProcessorStep MUST come before Pi05PrepareStateTokenizerProcessorStep
# NOTE: NormalizerProcessorStep MUST come before Pi05PrepareStateAndLanguageTokenizerProcessorStep
# because the tokenizer step expects normalized state in [-1, 1] range for discretization
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
),
Pi05PrepareStateTokenizerProcessorStep(max_state_dim=config.max_state_dim),
Pi05PrepareStateAndLanguageTokenizerProcessorStep(max_state_dim=config.max_state_dim),
TokenizerProcessorStep(
tokenizer_name="google/paligemma-3b-pt-224",
max_length=config.tokenizer_max_length,
padding_side="right",
padding="max_length",
),
ActionTokenizerProcessorStep(
tokenizer_name="/fsx/jade_choghari/outputs/fast_tokenizer", # TODO: jade put the PI
),
DeviceProcessorStep(device=config.device),
]
@@ -156,7 +189,7 @@ def make_pi05_pre_post_processors(
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
+23
View File
@@ -0,0 +1,23 @@
export CUDA_LAUNCH_BLOCKING=1
lerobot-train \
--dataset.repo_id=local \
--dataset.root=/fsx/jade_choghari/outputs/collect-data-pgen \
--output_dir=/fsx/jade_choghari/outputs/pi0_fast_fruit2 \
--job_name=pi0_training \
--policy.repo_id=jade_choghari/pi0-base1 \
--policy.path=lerobot/pi05_base \
--policy.dtype=bfloat16 \
--steps=200000 \
--save_freq=5000 \
--rename_map='{
"observation.images.base": "observation.images.base_0_rgb",
"observation.images.left_wrist": "observation.images.left_wrist_0_rgb",
"observation.images.right_wrist": "observation.images.right_wrist_0_rgb",
}' \
--batch_size=16 \
--policy.device=cuda \
--policy.fast_only=true \
# --wandb.enable=true \
# --wandb.disable_artifact=true \
# --wandb.project=pi05hi-training \
# /fsx/jade_choghari/.cache/huggingface/lerobot/jadechoghari/collect-data
+13
View File
@@ -0,0 +1,13 @@
rm -rf /fsx/jade_choghari/outputs/pi0_multi_training
lerobot-train \
--dataset.repo_id=local\
--dataset.root=/fsx/jade_choghari/data/libero \
--output_dir=/fsx/jade_choghari/outputs/pi0_multi_training \
--job_name=pi0_multi_training \
--policy.repo_id=jadechoghari/pi0-base1 \
--policy.path=/fsx/jade_choghari/outputs/libero_training_fast_6/checkpoints/last/pretrained_model/ \
--policy.dtype=bfloat16 \
--steps=50000 \
--save_freq=5000 \
--batch_size=4 \
--policy.device=cuda \
+12
View File
@@ -0,0 +1,12 @@
python src/lerobot/policies/pi05/train_fast_tokenizer.py \
--repo_id "local" \
--root /fsx/jade_choghari/data/libero \
--action_horizon 10 \
--encoded_dims "0:7" \
--vocab_size 1024 \
--push_to_hub \
--hub_repo_id jadechoghari/fast-libero-tokenizer-quantiles \
--normalization_mode QUANTILES \
# python train_fast_tokenizer.py --repo_id my_dataset
@@ -0,0 +1,533 @@
"""Train FAST tokenizer for action encoding.
This script:
1. Loads action chunks from LeRobotDataset (with sampling)
2. Applies delta transforms and per-timestamp normalization
3. Trains FAST tokenizer on specified action dimensions
4. Saves tokenizer to assets directory
5. Reports compression statistics
"""
import json
import numpy as np
import tyro
from pathlib import Path
from transformers import AutoProcessor
import torch
from huggingface_hub import HfApi
from lerobot.configs.types import NormalizationMode
from lerobot.datasets.lerobot_dataset import LeRobotDataset
def apply_delta_transform(state: np.ndarray, actions: np.ndarray, delta_dims: list[int] | None) -> np.ndarray:
"""Apply delta transform to specified dimensions.
Args:
state: Current state [D]
actions: Future actions [D]
delta_dims: List of dimension indices to apply delta transform to
Returns:
Transformed actions [D]
"""
if delta_dims is None or len(delta_dims) == 0:
return actions
delta_actions = actions.copy()
for dim in delta_dims:
delta_actions[dim] = actions[dim] - state[dim]
return delta_actions
def apply_normalization(
data: np.ndarray,
stats: dict[str, np.ndarray],
mode: NormalizationMode,
eps: float = 1e-8,
) -> np.ndarray:
"""Apply normalization to data based on the specified mode.
Args:
data: Data to normalize [N, H, D] or [D]
stats: Dictionary of statistics (mean, std, min, max, q01, q99, q10, q90)
mode: Normalization mode to apply
eps: Small epsilon for numerical stability
Returns:
Normalized data with the same shape as input
"""
if mode == NormalizationMode.IDENTITY:
return data
if mode == NormalizationMode.MEAN_STD:
mean = stats.get("mean")
std = stats.get("std")
if mean is None or std is None:
raise ValueError("MEAN_STD mode requires 'mean' and 'std' in stats")
return (data - mean) / np.maximum(std, eps)
if mode == NormalizationMode.MIN_MAX:
min_val = stats.get("min")
max_val = stats.get("max")
if min_val is None or max_val is None:
raise ValueError("MIN_MAX mode requires 'min' and 'max' in stats")
denom = np.maximum(max_val - min_val, eps)
return 2.0 * (data - min_val) / denom - 1.0
if mode == NormalizationMode.QUANTILES:
q01 = stats.get("q01")
q99 = stats.get("q99")
if q01 is None or q99 is None:
raise ValueError("QUANTILES mode requires 'q01' and 'q99' in stats")
denom = np.maximum(q99 - q01, eps)
# Clip to quantile range then normalize to [-1, 1]
clipped = np.clip(data, q01, q99)
return 2.0 * (clipped - q01) / denom - 1.0
if mode == NormalizationMode.QUANTILE10:
q10 = stats.get("q10")
q90 = stats.get("q90")
if q10 is None or q90 is None:
raise ValueError("QUANTILE10 mode requires 'q10' and 'q90' in stats")
denom = np.maximum(q90 - q10, eps)
# Clip to quantile range then normalize to [-1, 1]
clipped = np.clip(data, q10, q90)
return 2.0 * (clipped - q10) / denom - 1.0
raise ValueError(f"Unsupported normalization mode: {mode}")
def process_episode(args):
"""Process single episode and return action chunks."""
dataset, ep_idx, action_horizon, delta_dims, sample_fraction, state_key, use_delta_transform = args
try:
# Get episode info
ep_info = dataset.meta.episodes[ep_idx]
from_idx = ep_info["dataset_from_index"]
to_idx = ep_info["dataset_to_index"]
ep_length = to_idx - from_idx
if ep_length < action_horizon:
return None
# Load all frames in episode
# If dataset has episode filtering, we need to use the mapping
states = []
actions = []
for abs_idx in range(from_idx, to_idx):
# Map absolute index to relative index if needed
if dataset._absolute_to_relative_idx is not None:
if abs_idx not in dataset._absolute_to_relative_idx:
# This episode's frames aren't in the filtered dataset
return None
rel_idx = dataset._absolute_to_relative_idx[abs_idx]
else:
rel_idx = abs_idx
frame = dataset.hf_dataset[rel_idx]
# Get state (could be from observation.state or other state key)
if state_key in frame:
state = frame[state_key].numpy() if torch.is_tensor(frame[state_key]) else np.array(frame[state_key])
else:
# If no state key, use zeros (no delta transform)
state = np.zeros_like(frame["action"].numpy() if torch.is_tensor(frame["action"]) else np.array(frame["action"]))
action = frame["action"].numpy() if torch.is_tensor(frame["action"]) else np.array(frame["action"])
states.append(state)
actions.append(action)
states = np.array(states)
actions = np.array(actions)
# Create action chunks (sliding window)
# All actions in a chunk are relative to the FIRST state in that chunk
action_chunks = []
for i in range(len(states) - action_horizon + 1):
current_state = states[i] # First state in chunk
future_absolute_actions = actions[i:i + action_horizon]
if use_delta_transform:
# Relative actions
delta_chunk = np.zeros_like(future_absolute_actions)
for t in range(action_horizon):
delta_chunk[t] = apply_delta_transform(
current_state,
future_absolute_actions[t],
delta_dims,
)
action_chunks.append(delta_chunk)
else:
# Absolute actions (NO delta)
action_chunks.append(future_absolute_actions)
if len(action_chunks) == 0:
return None
action_chunks = np.array(action_chunks)
# Sample chunks
if sample_fraction < 1.0:
n_chunks = len(action_chunks)
n_samples = max(1, int(n_chunks * sample_fraction))
episode_seed = hash(ep_idx) % (2**31)
rng = np.random.RandomState(episode_seed)
indices = rng.choice(n_chunks, size=n_samples, replace=False)
action_chunks = action_chunks[indices]
return action_chunks
except Exception as e:
print(f"Error processing episode {ep_idx}: {e}")
import traceback
traceback.print_exc()
return None
def train_fast_tokenizer(
action_chunks: np.ndarray,
vocab_size: int = 1024,
scale: float = 10.0,
) -> AutoProcessor:
"""
Train FAST tokenizer (BPE on DCT coefficients) on action chunks.
Uses the .fit() method to train a new tokenizer on the provided data.
Args:
action_chunks: Array of action chunks [N, H, D] where N=num_chunks, H=horizon, D=action_dim
vocab_size: BPE vocabulary size
scale: DCT scaling factor for quantization
Returns:
Trained FAST tokenizer
"""
print(f"Training FAST tokenizer on {len(action_chunks)} action chunks...")
print(f"Action chunk shape: {action_chunks.shape}")
print(f"Vocab size: {vocab_size}")
print(f"DCT scale: {scale}")
# Download the tokenizer source code (not pretrained weights)
# We'll train a new tokenizer on our own data
base_tokenizer = AutoProcessor.from_pretrained(
"physical-intelligence/fast",
trust_remote_code=True
)
# Convert action_chunks array to list of arrays (expected by .fit())
action_data_list = [action_chunks[i] for i in range(len(action_chunks))]
# Train the new tokenizer on our action data using .fit()
# This trains the BPE tokenizer on DCT coefficients
print("Training new tokenizer (this may take a few minutes)...")
tokenizer = base_tokenizer.fit(
action_data_list,
scale=scale,
vocab_size=vocab_size,
time_horizon=action_chunks.shape[1], # action_horizon
action_dim=action_chunks.shape[2], # encoded dimensions
)
print("✓ Tokenizer training complete!")
# Validate it works
sample_chunk = action_chunks[0]
encoded = tokenizer(sample_chunk[None])[0]
if isinstance(encoded, list):
encoded = np.array(encoded)
print(f"Sample encoding: {len(encoded)} tokens for chunk shape {sample_chunk.shape}")
return tokenizer
def compute_compression_stats(tokenizer, action_chunks: np.ndarray):
"""Compute compression statistics."""
print("\nComputing compression statistics...")
# Sample for stats (use max 1000 chunks for speed)
sample_size = min(1000, len(action_chunks))
sample_indices = np.random.RandomState(42).choice(len(action_chunks), size=sample_size, replace=False)
sample_chunks = action_chunks[sample_indices]
token_lengths = []
for chunk in sample_chunks:
encoded = tokenizer(chunk[None])[0]
if isinstance(encoded, list):
token_lengths.append(len(encoded))
else:
token_lengths.append(encoded.shape[0] if hasattr(encoded, 'shape') else len(encoded))
token_lengths = np.array(token_lengths)
# Compression ratio: (H * D) / avg_tokens
input_size = action_chunks.shape[1] * action_chunks.shape[2]
avg_tokens = np.mean(token_lengths)
compression_ratio = input_size / avg_tokens
stats = {
'compression_ratio': float(compression_ratio),
'mean_token_length': float(np.mean(token_lengths)),
'p99_token_length': float(np.percentile(token_lengths, 99)),
'min_token_length': float(np.min(token_lengths)),
'max_token_length': float(np.max(token_lengths)),
}
print(f"Compression Statistics:")
print(f" Average compression ratio: {stats['compression_ratio']:.2f}x")
print(f" Mean token length: {stats['mean_token_length']:.1f}")
print(f" P99 token length: {stats['p99_token_length']:.0f}")
print(f" Min token length: {stats['min_token_length']:.0f}")
print(f" Max token length: {stats['max_token_length']:.0f}")
return stats
def main(
repo_id: str,
root: str | None = None,
action_horizon: int = 10,
max_episodes: int | None = None,
sample_fraction: float = 0.1,
encoded_dims: str = "0:6,7:23",
delta_dims: str | None = None,
use_delta_transform: bool = False,
state_key: str = "observation.state",
normalization_mode: str = "QUANTILES",
vocab_size: int = 1024,
scale: float = 10.0,
output_dir: str | None = None,
push_to_hub: bool = False,
hub_repo_id: str | None = None,
hub_private: bool = False,
):
"""
Train FAST tokenizer for action encoding.
Args:
repo_id: LeRobot dataset repository ID
root: Root directory for dataset (default: ~/.cache/huggingface/lerobot)
action_horizon: Number of future actions in each chunk
max_episodes: Max episodes to use (None = all episodes in dataset)
sample_fraction: Fraction of chunks to sample per episode
encoded_dims: Comma-separated dimension ranges to encode (e.g., "0:6,7:23")
delta_dims: Comma-separated dimension indices for delta transform (e.g., "0,1,2,3,4,5")
use_delta_transform: Whether to apply delta transform (relative actions vs absolute actions)
state_key: Dataset key for state observations (default: "observation.state")
normalization_mode: Normalization mode (MEAN_STD, MIN_MAX, QUANTILES, QUANTILE10, IDENTITY)
vocab_size: FAST vocabulary size (BPE vocab size)
scale: DCT scaling factor (default: 10.0)
output_dir: Directory to save tokenizer (default: ./fast_tokenizer_{repo_id})
push_to_hub: Whether to push the tokenizer to Hugging Face Hub
hub_repo_id: Hub repository ID (e.g., "username/tokenizer-name"). If None, uses output_dir name
hub_private: Whether to create a private repository on the Hub
"""
# Load dataset
print(f"Loading dataset: {repo_id}")
dataset = LeRobotDataset(repo_id=repo_id, root=root)
print(f"Dataset loaded: {dataset.num_episodes} episodes, {dataset.num_frames} frames")
# Parse normalization mode
try:
norm_mode = NormalizationMode(normalization_mode)
except ValueError:
raise ValueError(
f"Invalid normalization_mode: {normalization_mode}. "
f"Must be one of: {', '.join([m.value for m in NormalizationMode])}"
)
print(f"Normalization mode: {norm_mode.value}")
# Parse encoded dimensions
encoded_dim_ranges = []
for range_str in encoded_dims.split(','):
start, end = map(int, range_str.strip().split(':'))
encoded_dim_ranges.append((start, end))
total_encoded_dims = sum(end - start for start, end in encoded_dim_ranges)
print(f"Encoding {total_encoded_dims} dimensions: {encoded_dims}")
# Parse delta dimensions
delta_dim_list = None
if delta_dims is not None and delta_dims.strip():
delta_dim_list = [int(d.strip()) for d in delta_dims.split(',')]
print(f"Delta dimensions: {delta_dim_list}")
else:
print("No delta dimensions specified")
print(f"Use delta transform: {use_delta_transform}")
if use_delta_transform and (delta_dim_list is None or len(delta_dim_list) == 0):
print("Warning: use_delta_transform=True but no delta_dims specified. No delta will be applied.")
print(f"Action horizon: {action_horizon}")
print(f"State key: {state_key}")
# Determine episodes to process
num_episodes = dataset.num_episodes
if max_episodes is not None:
num_episodes = min(max_episodes, num_episodes)
print(f"Processing {num_episodes} episodes...")
# Process episodes sequentially (to avoid pickling issues with dataset)
all_chunks = []
for ep_idx in range(num_episodes):
if ep_idx % 10 == 0:
print(f" Processing episode {ep_idx}/{num_episodes}...")
chunks = process_episode(
(dataset, ep_idx, action_horizon, delta_dim_list, sample_fraction, state_key, use_delta_transform)
)
if chunks is not None:
all_chunks.append(chunks)
# Concatenate all chunks
all_chunks = np.concatenate(all_chunks, axis=0)
print(f"Collected {len(all_chunks)} action chunks")
# Extract only encoded dimensions FIRST (before normalization)
encoded_chunks = []
for start, end in encoded_dim_ranges:
encoded_chunks.append(all_chunks[:, :, start:end])
encoded_chunks = np.concatenate(encoded_chunks, axis=-1) # [N, H, D_encoded]
print(f"Extracted {encoded_chunks.shape[-1]} encoded dimensions")
# Apply normalization to encoded dimensions
print(f"\nBefore normalization - overall stats:")
print(f" Min: {np.min(encoded_chunks):.4f}, Max: {np.max(encoded_chunks):.4f}")
print(f" Mean: {np.mean(encoded_chunks):.4f}, Std: {np.std(encoded_chunks):.4f}")
# Get normalization stats from dataset
norm_stats = dataset.meta.stats
if norm_stats is not None and "action" in norm_stats:
action_stats = norm_stats["action"]
# Build encoded dimension indices
encoded_dim_indices = []
for start, end in encoded_dim_ranges:
encoded_dim_indices.extend(range(start, end))
encoded_dim_indices = np.array(encoded_dim_indices)
# Extract stats for encoded dimensions only
encoded_stats = {}
for stat_name, stat_values in action_stats.items():
if isinstance(stat_values, (list, np.ndarray)):
stat_array = np.array(stat_values)
if len(stat_array) > max(encoded_dim_indices):
encoded_stats[stat_name] = stat_array[encoded_dim_indices]
if encoded_stats:
print(f"\nNormalization stats for encoded dimensions (mode: {norm_mode.value}):")
for stat_name, stat_values in encoded_stats.items():
print(f" {stat_name}: shape={stat_values.shape}, "
f"range=[{np.min(stat_values):.4f}, {np.max(stat_values):.4f}]")
# Apply normalization based on mode
try:
encoded_chunks = apply_normalization(
encoded_chunks,
encoded_stats,
norm_mode,
eps=1e-8
)
print(f"\nApplied {norm_mode.value} normalization")
except ValueError as e:
print(f"Warning: {e}. Using raw actions without normalization.")
print(f"\nAfter normalization - overall stats:")
print(f" Min: {np.min(encoded_chunks):.4f}, Max: {np.max(encoded_chunks):.4f}")
print(f" Mean: {np.mean(encoded_chunks):.4f}, Std: {np.std(encoded_chunks):.4f}")
print(f"\nPer-dimension stats (after normalization):")
for d in range(encoded_chunks.shape[-1]):
dim_data = encoded_chunks[:, :, d]
print(f" Dim {d}: min={np.min(dim_data):7.4f}, max={np.max(dim_data):7.4f}, "
f"mean={np.mean(dim_data):7.4f}, std={np.std(dim_data):7.4f}")
else:
print("Warning: Could not extract stats for encoded dimensions, using raw actions")
else:
print("Warning: No normalization stats found in dataset, using raw actions")
print(f"Encoded chunks shape: {encoded_chunks.shape}")
# Train FAST tokenizer
tokenizer = train_fast_tokenizer(
encoded_chunks,
vocab_size=vocab_size,
scale=scale,
)
# Compute compression statistics
compression_stats = compute_compression_stats(tokenizer, encoded_chunks)
# Save tokenizer
if output_dir is None:
output_dir = f"fast_tokenizer_{repo_id.replace('/', '_')}"
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
tokenizer.save_pretrained(output_path)
# Save metadata
metadata = {
'repo_id': repo_id,
'vocab_size': vocab_size,
'scale': scale,
'encoded_dims': encoded_dims,
'encoded_dim_ranges': encoded_dim_ranges,
'total_encoded_dims': total_encoded_dims,
'delta_dims': delta_dims,
'delta_dim_list': delta_dim_list,
'use_delta_transform': use_delta_transform,
'state_key': state_key,
'normalization_mode': norm_mode.value,
'action_horizon': action_horizon,
'num_training_chunks': len(encoded_chunks),
'compression_stats': compression_stats,
}
with open(output_path / "metadata.json", 'w') as f:
json.dump(metadata, f, indent=2)
print(f"\nSaved FAST tokenizer to {output_path}")
print(f"Metadata: {json.dumps(metadata, indent=2)}")
# Push to Hugging Face Hub if requested
if push_to_hub:
# Determine the hub repository ID
if hub_repo_id is None:
hub_repo_id = output_path.name
print(f"\nNo hub_repo_id provided, using: {hub_repo_id}")
print(f"\nPushing tokenizer to Hugging Face Hub: {hub_repo_id}")
print(f" Private: {hub_private}")
try:
# Use the tokenizer's push_to_hub method
tokenizer.push_to_hub(
repo_id=hub_repo_id,
private=hub_private,
commit_message=f"Upload FAST tokenizer trained on {repo_id}"
)
# Also upload the metadata.json file separately
api = HfApi()
api.upload_file(
path_or_fileobj=str(output_path / "metadata.json"),
path_in_repo="metadata.json",
repo_id=hub_repo_id,
repo_type="model",
commit_message="Upload tokenizer metadata"
)
print(f"Successfully pushed tokenizer to: https://huggingface.co/{hub_repo_id}")
except Exception as e:
print(f"Error pushing to hub: {e}")
print(" Make sure you're logged in with `huggingface-cli login`")
if __name__ == "__main__":
tyro.cli(main)
@@ -0,0 +1,101 @@
# Train FAST Tokenizer - Usage Examples
This script trains a FAST (Factorized Action Sequence Tokenizer) on LeRobotDataset action data.
## Basic Usage
```bash
python src/lerobot/policies/pi05/train_fast_tokenizer.py \
--repo_id "lerobot/aloha_sim_insertion_human" \
--action_horizon 10 \
--encoded_dims "0:7" \
--vocab_size 1024 \
--scale 10.0
```
## Parameters
### Required
- `--repo_id`: LeRobot dataset repository ID (e.g., "lerobot/aloha_sim_insertion_human")
### Optional
- `--root`: Root directory for dataset (default: ~/.cache/huggingface/lerobot)
- `--action_horizon`: Number of future actions in each chunk (default: 10)
- `--max_episodes`: Maximum number of episodes to use (default: None = all)
- `--sample_fraction`: Fraction of chunks to sample per episode (default: 0.1)
- `--encoded_dims`: Comma-separated dimension ranges to encode (default: "0:6,7:23")
- Example: "0:7" encodes dimensions 0-6
- Example: "0:3,6:9" encodes dimensions 0-2 and 6-8
- `--delta_dims`: Comma-separated dimension indices for delta transform (default: None)
- Example: "0,1,2,3,4,5" applies delta transform to first 6 dimensions
- Delta transform: action[i] - state[i] for specified dimensions
- `--state_key`: Dataset key for state observations (default: "observation.state")
- `--vocab_size`: FAST vocabulary size / BPE vocab size (default: 1024)
- `--scale`: DCT scaling factor (default: 10.0)
- `--output_dir`: Directory to save tokenizer (default: ./fast_tokenizer_{repo_id})
## Examples
### Example 1: Train on full action space
```bash
python src/lerobot/policies/pi05/train_fast_tokenizer.py \
--repo_id "lerobot/pusht" \
--action_horizon 16 \
--encoded_dims "0:2" \
--vocab_size 512 \
--max_episodes 100
```
### Example 2: Train with delta transform
```bash
python src/lerobot/policies/pi05/train_fast_tokenizer.py \
--repo_id "lerobot/aloha_sim_insertion_human" \
--action_horizon 10 \
--encoded_dims "0:14" \
--delta_dims "0,1,2,3,4,5,6,7,8,9,10,11,12,13" \
--state_key "observation.state" \
--vocab_size 1024 \
--scale 10.0 \
--sample_fraction 0.2
```
### Example 3: Train on subset of dimensions
```bash
python src/lerobot/policies/pi05/train_fast_tokenizer.py \
--repo_id "lerobot/aloha_sim_insertion_human" \
--action_horizon 10 \
--encoded_dims "0:7" \
--vocab_size 1024 \
--output_dir "./my_tokenizer"
```
## Output
The script saves:
1. **Tokenizer files**: Trained FAST tokenizer (can be loaded with `AutoProcessor.from_pretrained()`)
2. **metadata.json**: Contains:
- Configuration parameters
- Compression statistics (compression ratio, token lengths)
- Training dataset information
## Understanding the Process
1. **Load Dataset**: Loads the LeRobotDataset from HuggingFace
2. **Extract Action Chunks**: Creates sliding windows of actions with specified horizon
3. **Apply Delta Transform**: (Optional) Computes action deltas relative to current state
4. **Select Encoded Dimensions**: Extracts only the dimensions to be encoded
5. **Normalize**: Applies quantile normalization ([q01, q99] → [-1, 1])
6. **Train Tokenizer**: Trains BPE tokenizer on DCT coefficients
7. **Compute Stats**: Reports compression ratio and token length statistics
8. **Save**: Saves tokenizer and metadata
## Notes
- **Normalization**: The script uses quantile normalization (q01, q99) from the dataset's statistics
- **Sampling**: To speed up training, you can sample a fraction of chunks per episode
- **Delta Transform**: Applied per-dimension to make actions relative to current state
- **Compression**: FAST uses DCT + BPE to compress action sequences efficiently
+28
View File
@@ -0,0 +1,28 @@
#!/bin/bash
# FSDP training script for PI05 with aggressive memory optimization
# Use this for large models that OOM with standard DDP
accelerate launch --config_file /admin/home/jade_choghari/lerobot/fsdp_config.yaml \
$(which lerobot-train) \
--dataset.repo_id=local \
--dataset.root=/fsx/jade_choghari/data/libero \
--output_dir=/fsx/jade_choghari/outputs/libero_training_fsdp \
--job_name=libero_training_fsdp \
--policy.repo_id=jade_choghari/pi05-fast-libero-fsdp \
--policy.path=/fsx/jade_choghari/models/libero-pi-fast \
--policy.dtype=bfloat16 \
--steps=100000 \
--save_freq=10 \
--batch_size=8 \
--policy.device=cuda \
--policy.fast_only=true \
--policy.scheduler_warmup_steps=2000 \
--policy.scheduler_decay_steps=60000 \
--policy.scheduler_decay_lr=1e-5 \
--policy.gradient_checkpointing=false \
--wandb.enable=true \
--wandb.disable_artifact=true \
--wandb.project=pi05-libero-training-fsdp
+24
View File
@@ -0,0 +1,24 @@
export CUDA_LAUNCH_BLOCKING=1
lerobot-train \
--dataset.repo_id=local \
--dataset.root=/fsx/jade_choghari/data/libero \
--output_dir=/fsx/jade_choghari/outputs/libero_training_fast_4 \
--job_name=libero_training_fast \
--policy.repo_id=jade_choghari/pi05-fast-libero \
--policy.path=/fsx/jade_choghari/models/pi05-base \
--policy.dtype=bfloat16 \
--steps=100000 \
--save_freq=20000 \
--batch_size=4 \
--policy.device=cuda \
--policy.fast_only=true \
--policy.scheduler_warmup_steps=1000 \
--policy.scheduler_decay_steps=30000 \
--policy.scheduler_decay_lr=1e-5 \
--policy.gradient_checkpointing=true \
--rename_map='{
"observation.images.image1": "observation.images.base_0_rgb",
"observation.images.image2": "observation.images.left_wrist_0_rgb",
}' \
--policy.empty_cameras=1 \
# /fsx/jade_choghari/.cache/huggingface/lerobot/jadechoghari/collect-data
@@ -0,0 +1,15 @@
#!/bin/bash
#SBATCH --job-name=pi05-train
#SBATCH --time=24:00:00
#SBATCH --qos=high
#SBATCH --gres=gpu:8
#SBATCH --mem=256G
#SBATCH --partition=hopper-prod
#SBATCH --output=/fsx/jade_choghari/logs/%x-%j.out
#SBATCH --error=/fsx/jade_choghari/logs/%x-%j.err
srun \
--container-image=/fsx/michel_aractingi/docker_images/huggingface+lerobot-gpu+dev.sqsh \
--container-mounts=/fsx/jade_choghari \
--container-workdir=$HOME/lerobot \
bash /admin/home/jade_choghari/lerobot/src/lerobot/policies/pi05/train_multi.sh
+36
View File
@@ -0,0 +1,36 @@
#!/bin/bash
set -euxo pipefail
# Source YOUR Miniforge conda (mounted from FSX)
source /fsx/jade_choghari/miniforge3/etc/profile.d/conda.sh
conda activate lerobot
accelerate launch --mixed_precision=bf16 --multi_gpu --num_processes=8 \
$(which lerobot-train) \
--dataset.repo_id=local \
--dataset.root=/fsx/jade_choghari/data/libero \
--output_dir=/fsx/jade_choghari/outputs/libero_training_fast_mean_1 \
--job_name=libero_training_fast \
--policy.repo_id=jade_choghari/pi05-fast-libero \
--policy.path=/fsx/jade_choghari/models/pi05-base \
--policy.dtype=bfloat16 \
--steps=100000 \
--save_freq=20000 \
--batch_size=4 \
--policy.device=cuda \
--policy.fast_only=true \
--policy.scheduler_warmup_steps=4000 \
--policy.scheduler_decay_steps=100000 \
--policy.scheduler_decay_lr=1e-5 \
--policy.gradient_checkpointing=true \
--policy.chunk_size=10 \
--policy.n_action_steps=10 \
--policy.max_action_tokens=256 \
--rename_map='{
"observation.images.image1": "observation.images.base_0_rgb",
"observation.images.image2": "observation.images.left_wrist_0_rgb",
}' \
--policy.empty_cameras=1 \
--wandb.enable=true \
--wandb.disable_artifact=true \
--wandb.project=pi05-libero-training \
@@ -527,7 +527,6 @@ class VLAFlowMatching(nn.Module):
num_vlm_layers=self.config.num_vlm_layers,
self_attn_every_n_layers=self.config.self_attn_every_n_layers,
expert_width_multiplier=self.config.expert_width_multiplier,
device=self.config.device if self.config.device is not None else "auto",
)
self.state_proj = nn.Linear(
self.config.max_state_dim, self.vlm_with_expert.config.text_config.hidden_size
@@ -784,15 +783,18 @@ class VLAFlowMatching(nn.Module):
use_cache=self.config.use_cache,
fill_kv_cache=True,
)
num_steps = self.config.num_steps
dt = -1.0 / num_steps
dt = -1.0 / self.config.num_steps
dt = torch.tensor(dt, dtype=torch.float32, device=device)
x_t = noise
for step in range(num_steps):
time = 1.0 + step * dt
time_tensor = torch.tensor(time, dtype=torch.float32, device=device).expand(bsize)
time = torch.tensor(1.0, dtype=torch.float32, device=device)
def denoise_step_partial_call(input_x_t, current_timestep=time_tensor):
while time >= -dt / 2:
expanded_time = time.expand(bsize)
# Define a closure function to properly capture expanded_time
# This avoids the lambda expression (E731) and loop variable binding (B023) issues
def denoise_step_partial_call(input_x_t, current_timestep=expanded_time):
return self.denoise_step(
x_t=input_x_t,
prefix_pad_masks=prefix_pad_masks,
@@ -816,11 +818,15 @@ class VLAFlowMatching(nn.Module):
else:
v_t = denoise_step_partial_call(x_t)
x_t = x_t + dt * v_t
# Euler step
x_t += dt * v_t
# Record x_t and v_t after Euler step (other params are recorded in rtc_processor.denoise_step)
if self.rtc_processor is not None and self.rtc_processor.is_debug_enabled():
self.rtc_processor.track(time=time, x_t=x_t, v_t=v_t)
time += dt
return x_t
def denoise_step(
+1 -1
View File
@@ -75,7 +75,7 @@ from .policy_robot_bridge import (
RobotActionToPolicyActionProcessorStep,
)
from .rename_processor import RenameObservationsProcessorStep
from .tokenizer_processor import TokenizerProcessorStep
from .tokenizer_processor import TokenizerProcessorStep, ActionTokenizerProcessorStep
__all__ = [
"ActionProcessorStep",
+3 -1
View File
@@ -168,10 +168,12 @@ def _extract_complementary_data(batch: dict[str, Any]) -> dict[str, Any]:
"""
pad_keys = {k: v for k, v in batch.items() if "_is_pad" in k}
task_key = {"task": batch["task"]} if "task" in batch else {}
user_prompt_key = {"user_prompt": batch["user_prompt"]} if "user_prompt" in batch else {}
subtask_key = {"subtask": batch["subtask"]} if "subtask" in batch else {}
index_key = {"index": batch["index"]} if "index" in batch else {}
task_index_key = {"task_index": batch["task_index"]} if "task_index" in batch else {}
return {**pad_keys, **task_key, **index_key, **task_index_key}
return {**pad_keys, **task_key, **index_key, **task_index_key, **user_prompt_key, **subtask_key}
def create_transition(
@@ -47,7 +47,6 @@ class RenameObservationsProcessorStep(ObservationProcessorStep):
processed_obs[self.rename_map[key]] = value
else:
processed_obs[key] = value
return processed_obs
def get_config(self) -> dict[str, Any]:
File diff suppressed because it is too large Load Diff
@@ -1,21 +0,0 @@
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# OMX is a fully open-source robot from ROBOTIS.
# More information at: https://ai.robotis.com/omx/introduction_omx.html
from .config_omx_follower import OmxFollowerConfig
from .omx_follower import OmxFollower
@@ -1,39 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from lerobot.cameras import CameraConfig
from ..config import RobotConfig
@RobotConfig.register_subclass("omx_follower")
@dataclass
class OmxFollowerConfig(RobotConfig):
# Port to connect to the arm
port: str
disable_torque_on_disconnect: bool = True
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
# Set this to a positive scalar to have the same value for all motors, or a dictionary that maps motor
# names to the max_relative_target value for that motor.
max_relative_target: float | dict[str, float] | None = None
# cameras
cameras: dict[str, CameraConfig] = field(default_factory=dict)
# Set to `True` for backward compatibility with previous policies/dataset
use_degrees: bool = False

Some files were not shown because too many files have changed in this diff Show More