make script compatible with lerobot (b536f47) (#38)

* bump openx2lerobot script

* bump agibot2lerobot script

* bump robomind2lerobot script
This commit is contained in:
Qizhi Chen
2025-06-12 20:13:33 +08:00
committed by GitHub
parent 462a2ace0b
commit 297b67cbc2
7 changed files with 36 additions and 80 deletions
+3 -3
View File
@@ -174,7 +174,7 @@ python convert.py \
#### For single node
```bash
cd agibot2lerobot && bash convert.sh
bash convert.sh
```
#### For multi nodes
@@ -197,7 +197,7 @@ On either Node, check the ray cluster status, and start the script
```bash
ray status
cd agibot2lerobot && bash convert.sh
bash convert.sh
```
**Slurm-managed System**
@@ -257,7 +257,7 @@ done
sleep 10
cd agibot2lerobot && bash convert.sh
bash convert.sh
```
**Other Community Supported Cluster Managers**
+9 -17
View File
@@ -25,7 +25,6 @@ from lerobot.common.datasets.utils import (
write_info,
)
from lerobot.common.datasets.video_utils import get_safe_default_codec
from lerobot.common.robot_devices.robots.utils import Robot
from ray.runtime_env import RuntimeEnv
@@ -72,10 +71,9 @@ class AgiBotDataset(LeRobotDataset):
cls,
repo_id: str,
fps: int,
features: dict,
root: str | Path | None = None,
robot: Robot | None = None,
robot_type: str | None = None,
features: dict | None = None,
use_videos: bool = True,
tolerance_s: float = 1e-4,
image_writer_processes: int = 0,
@@ -87,10 +85,9 @@ class AgiBotDataset(LeRobotDataset):
obj.meta = AgiBotDatasetMetadata.create(
repo_id=repo_id,
fps=fps,
root=root,
robot=robot,
robot_type=robot_type,
features=features,
root=root,
use_videos=use_videos,
)
obj.repo_id = obj.meta.repo_id
@@ -114,7 +111,7 @@ class AgiBotDataset(LeRobotDataset):
obj.video_backend = video_backend if video_backend is not None else get_safe_default_codec()
return obj
def add_frame(self, frame: dict) -> None:
def add_frame(self, frame: dict, task: str, timestamp: float | None = None) -> None:
"""
This function only adds the frame to the episode_buffer. Apart from images — which are written in a
temporary directory — nothing is written to disk. To save those frames, the 'save_episode()' method
@@ -133,17 +130,14 @@ class AgiBotDataset(LeRobotDataset):
# Automatically add frame_index and timestamp to episode buffer
frame_index = self.episode_buffer["size"]
timestamp = frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps
if timestamp is None:
timestamp = frame_index / self.fps
self.episode_buffer["frame_index"].append(frame_index)
self.episode_buffer["timestamp"].append(timestamp)
self.episode_buffer["task"].append(task)
# Add frame features to episode_buffer
for key, value in frame.items():
if key == "task":
# Note: we associate the task in natural language to its task index during `save_episode`
self.episode_buffer["task"].append(frame["task"])
continue
if key not in self.features:
raise ValueError(
f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'."
@@ -246,7 +240,7 @@ def save_as_lerobot_dataset(agibot_world_config, task: tuple[Path, Path], num_th
if not save_depth:
features.pop("observation.images.head_depth")
dataset = AgiBotDataset.create(
dataset: AgiBotDataset = AgiBotDataset.create(
repo_id=json_file.stem,
root=local_dir,
fps=30,
@@ -268,7 +262,6 @@ def save_as_lerobot_dataset(agibot_world_config, task: tuple[Path, Path], num_th
eid,
src_path=src_path,
task_id=task_id,
task_instruction=task_instruction,
save_depth=save_depth,
AgiBotWorld_CONFIG=agibot_world_config,
)
@@ -278,7 +271,7 @@ def save_as_lerobot_dataset(agibot_world_config, task: tuple[Path, Path], num_th
continue
for frame_data in frames:
dataset.add_frame(frame_data)
dataset.add_frame(frame_data, task_instruction)
try:
dataset.save_episode(videos=videos, action_config=action_config)
except Exception as e:
@@ -300,7 +293,6 @@ def save_as_lerobot_dataset(agibot_world_config, task: tuple[Path, Path], num_th
eid,
src_path=src_path,
task_id=task_id,
task_instruction=task_instruction,
save_depth=save_depth,
AgiBotWorld_CONFIG=agibot_world_config,
)
@@ -313,7 +305,7 @@ def save_as_lerobot_dataset(agibot_world_config, task: tuple[Path, Path], num_th
continue
action_config = task_info[eid]["label_info"]["action_config"]
for frame_data in frames:
dataset.add_frame(frame_data)
dataset.add_frame(frame_data, task_instruction)
try:
dataset.save_episode(videos=videos, action_config=action_config)
except Exception as e:
+1 -2
View File
@@ -20,7 +20,7 @@ def load_depths(root_dir: str, camera_name: str):
def load_local_dataset(
episode_id: int, src_path: str, task_id: int, task_instruction: str, save_depth: bool, AgiBotWorld_CONFIG: dict
episode_id: int, src_path: str, task_id: int, save_depth: bool, AgiBotWorld_CONFIG: dict
) -> tuple[list, dict]:
"""Load local dataset and return a dict with observations and actions"""
ob_dir = Path(src_path) / f"observations/{task_id}/{episode_id}"
@@ -81,7 +81,6 @@ def load_local_dataset(
)
for key, value in action.items()
},
"task": task_instruction,
}
for i in range(num_frames)
]
+5 -14
View File
@@ -1,5 +1,7 @@
# OpenX to LeRobot
Open X-Embodiment assembles a dataset from 22 different robots collected through a collaboration between 21 institutions, demonstrating 527 skills (160266 tasks). (Copied from [docs](https://robotics-transformer-x.github.io/))
## 🚀 What's New in This Script
In this dataset, we have made several key improvements:
@@ -69,18 +71,7 @@ Dataset Structure of `meta/info.json`:
## Get started
> [!IMPORTANT]
> 1.Before running the following code, modify `save_episode()` function in lerobot.
> ```python
> def save_episode(self, episode_data: dict | None = None, keep_images: bool | None = False) -> None:
> ...
> # delete images
> if not keep_images:
> img_dir = self.root / "images"
> if img_dir.is_dir():
> shutil.rmtree(self.root / "images")
> ...
> ```
> 2.for `bc_z` dataset, modify `encode_video_frames()` in `lerobot/common/datasets/video_utils.py`.
> 1.for `bc_z` dataset, modify `encode_video_frames()` in `lerobot/common/datasets/video_utils.py`.
>
> ```python
> # add the following content to line 141:
@@ -99,7 +90,7 @@ Dataset Structure of `meta/info.json`:
git clone https://github.com/Tavish9/any4lerobot.git
```
2. Modify path in `openx2lerobot/convert.sh`:
2. Modify path in `convert.sh`:
```bash
python openx_rlds.py \
@@ -113,7 +104,7 @@ Dataset Structure of `meta/info.json`:
3. Execute the script:
```bash
cd openx2lerobot && bash convert.sh
bash convert.sh
```
## Available OpenX_LeRobot Dataset
+4 -11
View File
@@ -37,10 +37,8 @@ from pathlib import Path
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from huggingface_hub import HfApi
from lerobot.common.constants import HF_LEROBOT_HOME
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from oxe_utils.configs import OXE_DATASET_CONFIGS, ActionEncoding, StateEncoding
from oxe_utils.transforms import OXE_STANDARDIZATION_TRANSFORMS
@@ -149,10 +147,10 @@ def save_as_lerobot_dataset(lerobot_dataset: LeRobotDataset, raw_dataset: tf.dat
**image_dict,
"observation.state": traj["proprio"][i],
"action": traj["action"][i],
"task": traj["task"][0].decode(),
}
},
task=traj["task"][0].decode(),
)
lerobot_dataset.save_episode(keep_images=kwargs.get("keep_images", False))
lerobot_dataset.save_episode()
def create_lerobot_dataset(
@@ -209,7 +207,7 @@ def create_lerobot_dataset(
repo_id=repo_id,
robot_type=robot_type,
root=local_dir,
fps=fps,
fps=int(fps),
use_videos=use_videos,
features=features,
image_writer_threads=image_writer_threads,
@@ -287,11 +285,6 @@ def main():
default=10,
help="Number of threads per process of image writer for saving images.",
)
parser.add_argument(
"--keep-images",
action="store_true",
help="Whether to keep the cached images.",
)
args = parser.parse_args()
create_lerobot_dataset(**vars(args))
+4 -4
View File
@@ -184,7 +184,7 @@ Dataset Structure of `meta/info.json`:
> ```
> [!NOTE]
> The conversion speed of this script is limited by the performance of the physical machine running it, including **CPU cores and memory**. We recommend using **2 CPU cores per task** for optimal performance. However, each task requires approximately 20 GiB of memory. To avoid running out of memory, you may need to increase the number of CPU cores per task depending on your systems available memory.
> The conversion speed of this script is limited by the performance of the physical machine running it, including **CPU cores and memory**. We recommend using **2 CPU cores per task** for optimal performance. However, each task requires approximately 10 GiB of memory. To avoid running out of memory, you may need to increase the number of CPU cores per task depending on your systems available memory.
### Download source code:
@@ -210,7 +210,7 @@ python robomind_h5.py \
#### For single node
```bash
cd robomind2lerobot && bash convert.sh
bash convert.sh
```
#### For multi nodes
@@ -233,7 +233,7 @@ On either Node, check the ray cluster status, and start the script
```bash
ray status
cd robomind2lerobot && bash convert.sh
bash convert.sh
```
**Slurm-managed System**
@@ -293,7 +293,7 @@ done
sleep 10
cd robomind2lerobot && bash convert.sh
bash convert.sh
```
**Other Community Supported Cluster Managers**
+10 -29
View File
@@ -5,7 +5,6 @@ import logging
import shutil
from pathlib import Path
import datasets
import numpy as np
import pandas as pd
import ray
@@ -15,8 +14,6 @@ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatas
from lerobot.common.datasets.utils import (
check_timestamps_sync,
get_episode_data_index,
get_hf_features_from_features,
hf_transform_to_torch,
validate_episode_buffer,
validate_frame,
write_episode,
@@ -24,7 +21,6 @@ from lerobot.common.datasets.utils import (
write_info,
)
from lerobot.common.datasets.video_utils import get_safe_default_codec
from lerobot.common.robot_devices.robots.utils import Robot
from ray.runtime_env import RuntimeEnv
from robomind_uitls.configs import ROBOMIND_CONFIG
from robomind_uitls.lerobot_uitls import compute_episode_stats, generate_features_from_config
@@ -81,10 +77,9 @@ class RoboMINDDataset(LeRobotDataset):
cls,
repo_id: str,
fps: int,
features: dict,
root: str | Path | None = None,
robot: Robot | None = None,
robot_type: str | None = None,
features: dict | None = None,
use_videos: bool = True,
tolerance_s: float = 1e-4,
image_writer_processes: int = 0,
@@ -96,10 +91,9 @@ class RoboMINDDataset(LeRobotDataset):
obj.meta = RoboMINDDatasetMetadata.create(
repo_id=repo_id,
fps=fps,
root=root,
robot=robot,
robot_type=robot_type,
features=features,
root=root,
use_videos=use_videos,
)
obj.repo_id = obj.meta.repo_id
@@ -123,16 +117,7 @@ class RoboMINDDataset(LeRobotDataset):
obj.video_backend = video_backend if video_backend is not None else get_safe_default_codec()
return obj
def create_hf_dataset(self) -> datasets.Dataset:
features = get_hf_features_from_features(self.features)
ft_dict = {col: [] for col in features}
hf_dataset = datasets.Dataset.from_dict(ft_dict, features=features, split="train")
# TODO(aliberts): hf_dataset.set_format("torch")
hf_dataset.set_transform(hf_transform_to_torch)
return hf_dataset
def add_frame(self, frame: dict) -> None:
def add_frame(self, frame: dict, task: str, timestamp: float | None = None) -> None:
"""
This function only adds the frame to the episode_buffer. Apart from images which are written in a
temporary directory nothing is written to disk. To save those frames, the 'save_episode()' method
@@ -150,17 +135,14 @@ class RoboMINDDataset(LeRobotDataset):
# Automatically add frame_index and timestamp to episode buffer
frame_index = self.episode_buffer["size"]
timestamp = frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps
if timestamp is None:
timestamp = frame_index / self.fps
self.episode_buffer["frame_index"].append(frame_index)
self.episode_buffer["timestamp"].append(timestamp)
self.episode_buffer["task"].append(task)
# Add frame features to episode_buffer
for key, value in frame.items():
if key == "task":
# Note: we associate the task in natural language to its task index during `save_episode`
self.episode_buffer["task"].append(frame["task"])
continue
if key not in self.features:
raise ValueError(
f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'."
@@ -276,11 +258,11 @@ def save_as_lerobot_dataset(task: tuple[dict, Path, str], src_path, benchmark, e
# 1. not consistent image shape...
# 2. franka and ur image is bgr...
bgr2rgb = False
if "1_0" in benchmark:
match embodiment:
case "franka_1rgb" | "franka_3rgb" | "franka_fr3_dual" | "ur_1rgb":
if embodiment in ["franka_1rgb", "franka_3rgb", "franka_fr3_dual", "ur_1rgb"]:
bgr2rgb = True
if "1_0" in benchmark:
match embodiment:
case "tienkung_gello_1rgb":
if task_type in (
"clean_table_2_241211",
@@ -331,8 +313,7 @@ def save_as_lerobot_dataset(task: tuple[dict, Path, str], src_path, benchmark, e
status, raw_dataset, err = load_local_dataset(episode_path, config, save_depth, bgr2rgb)
if status and len(raw_dataset) >= 50:
for frame_data in raw_dataset:
frame_data.update({"task": task_instruction})
dataset.add_frame(frame_data)
dataset.add_frame(frame_data, task_instruction)
dataset.save_episode(split, action_config.get(episode_path.parent.parent.name, {}))
logging.info(f"process done for {path}, len {len(raw_dataset)}")
else: