diff --git a/examples/so100_to_so100_EE/evaluate.py b/examples/so100_to_so100_EE/evaluate.py new file mode 100644 index 000000000..1ab28e575 --- /dev/null +++ b/examples/so100_to_so100_EE/evaluate.py @@ -0,0 +1,167 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.configs.types import FeatureType, PolicyFeature +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features +from lerobot.datasets.utils import combine_feature_dicts +from lerobot.model.kinematics import RobotKinematics +from lerobot.policies.act.modeling_act import ACTPolicy +from lerobot.policies.factory import make_pre_post_processors +from lerobot.processor import ( + RobotAction, + RobotObservation, + RobotProcessorPipeline, + make_default_teleop_action_processor, +) +from lerobot.processor.converters import ( + observation_to_transition, + robot_action_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.record import record_loop +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + AddRobotObservationAsComplimentaryData, + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import _init_rerun + +NUM_EPISODES = 5 +FPS = 30 +EPISODE_TIME_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" +HF_DATASET_ID = "/" + +# Initialize the robot with degrees +camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", + id="my_awesome_follower_arm", + cameras=camera_config, + use_degrees=True, +) + +# Initialize the robot +robot = SO100Follower(robot_config) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +kinematics_solver = RobotKinematics( + urdf_path="./src/lerobot/teleoperators/sim/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), +) + +# Build pipeline to convert ee pose action to joint action +robot_ee_to_joints_processor = RobotProcessorPipeline[RobotAction, RobotAction]( + steps=[ + AddRobotObservationAsComplimentaryData(robot=robot), + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_to_transition, + to_output=transition_to_robot_action, +) + +# Build pipeline to convert joint observation to ee pose observation +robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ + ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())) + ], + to_transition=observation_to_transition, + to_output=transition_to_observation, +) + + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id=HF_DATASET_ID, + fps=FPS, + features=combine_feature_dicts( + aggregate_pipeline_dataset_features( + pipeline=robot_joints_to_ee_pose_processor, + initial_features=create_initial_features(observation=robot.observation_features), + use_videos=True, + ), + # User for now should be explicit on the feature keys that were used for record + # Alternatively, the user can pass the processor step that has the right features + aggregate_pipeline_dataset_features( + pipeline=make_default_teleop_action_processor(), + initial_features=create_initial_features( + action={ + f"ee.{k}": PolicyFeature(type=FeatureType.ACTION, shape=(1,)) + for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"] + } + ), + use_videos=True, + ), + ), + robot_type=robot.name, + use_videos=True, + image_writer_threads=4, +) + +# Initialize the keyboard listener and rerun visualization +_, events = init_keyboard_listener() +_init_rerun(session_name="recording_phone") + +# Connect the robot and teleoperator +robot.connect() + +episode_idx = 0 + +policy = ACTPolicy.from_pretrained(HF_MODEL_ID) +preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=policy, + pretrained_path=HF_MODEL_ID, + dataset_stats=dataset.meta.stats, +) + +for episode_idx in range(NUM_EPISODES): + log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") + + record_loop( + robot=robot, + events=events, + fps=FPS, + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + dataset=dataset, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=make_default_teleop_action_processor(), + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + dataset.save_episode() + +# Clean up +log_say("Stop recording") +robot.disconnect() +dataset.push_to_hub() diff --git a/examples/so100_to_so100_EE/replay.py b/examples/so100_to_so100_EE/replay.py new file mode 100644 index 000000000..4954debc9 --- /dev/null +++ b/examples/so100_to_so100_EE/replay.py @@ -0,0 +1,79 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import time + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import RobotAction, RobotProcessorPipeline +from lerobot.processor.converters import robot_action_to_transition, transition_to_robot_action +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + AddRobotObservationAsComplimentaryData, + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.utils import log_say + +EPISODE_IDX = 0 +HF_REPO_ID = "/" + +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True +) +robot = SO100Follower(robot_config) +robot.connect() + +dataset = LeRobotDataset(HF_REPO_ID, episodes=[EPISODE_IDX]) +actions = dataset.hf_dataset.select_columns("action") + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +kinematics_solver = RobotKinematics( + urdf_path="./src/lerobot/teleoperators/sim/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), +) + +# Build pipeline to convert ee pose action to joint action +robot_ee_to_joints_processor = RobotProcessorPipeline[RobotAction, RobotAction]( + steps=[ + AddRobotObservationAsComplimentaryData(robot=robot), + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=False, # Because replay is open loop + ), + ], + to_transition=robot_action_to_transition, + to_output=transition_to_robot_action, +) + +log_say(f"Replaying episode {EPISODE_IDX}") +for idx in range(dataset.num_frames): + t0 = time.perf_counter() + + ee_action = { + name: float(actions[idx]["action"][i]) for i, name in enumerate(dataset.features["action"]["names"]) + } + + joint_action = robot_ee_to_joints_processor(ee_action) + action_sent = robot.send_action(joint_action) + + busy_wait(1.0 / dataset.fps - (time.perf_counter() - t0)) + +robot.disconnect()