diff --git a/examples/phone_to_so100/evaluate.py b/examples/phone_to_so100/evaluate.py index 67fa86978..0951abc71 100644 --- a/examples/phone_to_so100/evaluate.py +++ b/examples/phone_to_so100/evaluate.py @@ -23,8 +23,8 @@ from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.policies.factory import make_pre_post_processors from lerobot.processor import DataProcessorPipeline from lerobot.processor.converters import ( - to_output_robot_action, - to_transition_robot_observation, + observation_to_transition, + transition_to_robot_action, ) from lerobot.record import record_loop from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig @@ -75,7 +75,7 @@ robot_ee_to_joints_processor = DataProcessorPipeline( ), ], to_transition=lambda tr: tr, - to_output=to_output_robot_action, + to_output=transition_to_robot_action, ) # Build pipeline to convert joint observation to ee pose observation @@ -83,7 +83,7 @@ robot_joints_to_ee_pose_processor = DataProcessorPipeline( steps=[ ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())) ], - to_transition=to_transition_robot_observation, + to_transition=observation_to_transition, to_output=lambda tr: tr, ) diff --git a/examples/phone_to_so100/record.py b/examples/phone_to_so100/record.py index ee72ca5d2..d952f7e52 100644 --- a/examples/phone_to_so100/record.py +++ b/examples/phone_to_so100/record.py @@ -22,9 +22,9 @@ from lerobot.datasets.utils import merge_features from lerobot.model.kinematics import RobotKinematics from lerobot.processor import DataProcessorPipeline from lerobot.processor.converters import ( - to_output_robot_action, - to_transition_robot_observation, - to_transition_teleop_action, + action_to_transition, + observation_to_transition, + transition_to_robot_action, ) from lerobot.record import record_loop from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig @@ -88,7 +88,7 @@ phone_to_robot_ee_pose_processor = DataProcessorPipeline( max_ee_twist_step_rad=0.50, ), ], - to_transition=to_transition_teleop_action, + to_transition=action_to_transition, to_output=lambda tr: tr, ) @@ -106,7 +106,7 @@ robot_ee_to_joints_processor = DataProcessorPipeline( ), ], to_transition=lambda tr: tr, - to_output=to_output_robot_action, + to_output=transition_to_robot_action, ) # Build pipeline to convert joint observation to ee pose observation @@ -114,7 +114,7 @@ robot_joints_to_ee_pose = DataProcessorPipeline( steps=[ ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())) ], - to_transition=to_transition_robot_observation, + to_transition=observation_to_transition, to_output=lambda tr: tr, ) diff --git a/examples/phone_to_so100/replay.py b/examples/phone_to_so100/replay.py index fe2facb5a..2fa5c51c4 100644 --- a/examples/phone_to_so100/replay.py +++ b/examples/phone_to_so100/replay.py @@ -20,7 +20,7 @@ import time from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.model.kinematics import RobotKinematics from lerobot.processor import DataProcessorPipeline -from lerobot.processor.converters import to_output_robot_action, to_transition_teleop_action +from lerobot.processor.converters import action_to_transition, transition_to_robot_action from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig from lerobot.robots.so100_follower.robot_kinematic_processor import ( AddRobotObservationAsComplimentaryData, @@ -59,8 +59,8 @@ robot_ee_to_joints_processor = DataProcessorPipeline( initial_guess_current_joints=False, # Because replay is open loop ), ], - to_transition=to_transition_teleop_action, - to_output=to_output_robot_action, + to_transition=action_to_transition, + to_output=transition_to_robot_action, ) robot_ee_to_joints_processor.reset() diff --git a/examples/phone_to_so100/teleoperate.py b/examples/phone_to_so100/teleoperate.py index d7cc4a457..193c88986 100644 --- a/examples/phone_to_so100/teleoperate.py +++ b/examples/phone_to_so100/teleoperate.py @@ -17,7 +17,7 @@ import time from lerobot.model.kinematics import RobotKinematics from lerobot.processor import DataProcessorPipeline -from lerobot.processor.converters import to_output_robot_action, to_transition_teleop_action +from lerobot.processor.converters import action_to_transition, transition_to_robot_action from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig from lerobot.robots.so100_follower.robot_kinematic_processor import ( AddRobotObservationAsComplimentaryData, @@ -72,8 +72,8 @@ phone_to_robot_joints_processor = DataProcessorPipeline( speed_factor=20.0, ), ], - to_transition=to_transition_teleop_action, - to_output=to_output_robot_action, + to_transition=action_to_transition, + to_output=transition_to_robot_action, ) robot.connect() diff --git a/src/lerobot/processor/converters.py b/src/lerobot/processor/converters.py index 2dec92dda..bc768952f 100644 --- a/src/lerobot/processor/converters.py +++ b/src/lerobot/processor/converters.py @@ -233,7 +233,7 @@ def create_transition( } -def to_transition_teleop_action(action: dict[str, Any]) -> EnvTransition: +def action_to_transition(action: dict[str, Any]) -> EnvTransition: # action_to_transition """ Convert a raw teleop action dict into an EnvTransition under the ACTION TransitionKey. """ @@ -251,7 +251,7 @@ def to_transition_teleop_action(action: dict[str, Any]) -> EnvTransition: # TODO(Adil, Pepijn): Overtime we can maybe add these converters to pipeline.py itself -def to_transition_robot_observation(observation: dict[str, Any]) -> EnvTransition: +def observation_to_transition(observation: dict[str, Any]) -> EnvTransition: """ Convert a raw robot observation dict into an EnvTransition under the OBSERVATION TransitionKey. """ @@ -268,7 +268,7 @@ def to_transition_robot_observation(observation: dict[str, Any]) -> EnvTransitio return create_transition(observation=obs_dict, action={}) -def to_output_robot_action(transition: EnvTransition) -> dict[str, Any]: +def transition_to_robot_action(transition: EnvTransition) -> dict[str, Any]: """ Converts a EnvTransition under the ACTION TransitionKey to a dict with keys ending in '.pos' for raw robot actions. """ diff --git a/src/lerobot/record.py b/src/lerobot/record.py index d6278965f..b09f04d88 100644 --- a/src/lerobot/record.py +++ b/src/lerobot/record.py @@ -78,10 +78,10 @@ from lerobot.policies.factory import make_policy, make_pre_post_processors from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.processor import DataProcessorPipeline, IdentityProcessorStep, TransitionKey from lerobot.processor.converters import ( - to_output_robot_action, - to_transition_robot_observation, - to_transition_teleop_action, + action_to_transition, + observation_to_transition, transition_to_dataset_frame, + transition_to_robot_action, ) from lerobot.processor.rename_processor import rename_stats from lerobot.robots import ( # noqa: F401 @@ -245,14 +245,14 @@ def record_loop( display_data: bool = False, ): teleop_action_processor = teleop_action_processor or DataProcessorPipeline( - steps=[IdentityProcessorStep()], to_transition=to_transition_teleop_action, to_output=lambda tr: tr + steps=[IdentityProcessorStep()], to_transition=action_to_transition, to_output=lambda tr: tr ) robot_action_processor = robot_action_processor or DataProcessorPipeline( - steps=[IdentityProcessorStep()], to_transition=lambda tr: tr, to_output=to_output_robot_action + steps=[IdentityProcessorStep()], to_transition=lambda tr: tr, to_output=transition_to_robot_action ) robot_observation_processor = robot_observation_processor or DataProcessorPipeline( steps=[IdentityProcessorStep()], - to_transition=to_transition_robot_observation, + to_transition=observation_to_transition, to_output=lambda tr: tr, ) diff --git a/src/lerobot/replay.py b/src/lerobot/replay.py index 7c5eda848..ba9828c1e 100644 --- a/src/lerobot/replay.py +++ b/src/lerobot/replay.py @@ -48,7 +48,7 @@ from pprint import pformat from lerobot.configs import parser from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.processor import DataProcessorPipeline, IdentityProcessorStep -from lerobot.processor.converters import to_output_robot_action, to_transition_teleop_action +from lerobot.processor.converters import action_to_transition, transition_to_robot_action from lerobot.robots import ( # noqa: F401 Robot, RobotConfig, @@ -96,8 +96,8 @@ def replay(cfg: ReplayConfig): # Initialize robot action processor with default if not provided robot_action_processor = cfg.robot_action_processor or DataProcessorPipeline( steps=[IdentityProcessorStep()], - to_transition=to_transition_teleop_action, - to_output=to_output_robot_action, # type: ignore[arg-type] + to_transition=action_to_transition, + to_output=transition_to_robot_action, # type: ignore[arg-type] ) # Reset processor diff --git a/src/lerobot/teleoperate.py b/src/lerobot/teleoperate.py index 924e22815..7aa268fb0 100644 --- a/src/lerobot/teleoperate.py +++ b/src/lerobot/teleoperate.py @@ -63,9 +63,9 @@ from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraCon from lerobot.configs import parser from lerobot.processor import DataProcessorPipeline, IdentityProcessorStep from lerobot.processor.converters import ( - to_output_robot_action, - to_transition_robot_observation, - to_transition_teleop_action, + action_to_transition, + observation_to_transition, + transition_to_robot_action, ) from lerobot.robots import ( # noqa: F401 Robot, @@ -121,16 +121,16 @@ def teleop_loop( ): # Initialize processors with defaults if not provided teleop_action_processor = teleop_action_processor or DataProcessorPipeline( - steps=[IdentityProcessorStep()], to_transition=to_transition_teleop_action, to_output=lambda tr: tr + steps=[IdentityProcessorStep()], to_transition=action_to_transition, to_output=lambda tr: tr ) robot_action_processor = robot_action_processor or DataProcessorPipeline( steps=[IdentityProcessorStep()], to_transition=lambda tr: tr, - to_output=to_output_robot_action, # type: ignore[arg-type] + to_output=transition_to_robot_action, # type: ignore[arg-type] ) robot_observation_processor = robot_observation_processor or DataProcessorPipeline( steps=[IdentityProcessorStep()], - to_transition=to_transition_robot_observation, + to_transition=observation_to_transition, to_output=lambda tr: tr, ) diff --git a/tests/processor/test_converters.py b/tests/processor/test_converters.py index 23acaf11e..688e4e17d 100644 --- a/tests/processor/test_converters.py +++ b/tests/processor/test_converters.py @@ -4,13 +4,13 @@ import torch from lerobot.processor import TransitionKey from lerobot.processor.converters import ( + action_to_transition, batch_to_transition, - to_output_robot_action, + observation_to_transition, to_tensor, - to_transition_robot_observation, - to_transition_teleop_action, transition_to_batch, transition_to_dataset_frame, + transition_to_robot_action, ) @@ -23,7 +23,7 @@ def test_to_transition_teleop_action_prefix_and_tensor_conversion(): "raw_img": img, # uint8 HWC to torch tensor } - tr = to_transition_teleop_action(act) + tr = action_to_transition(act) # Should be an EnvTransition-like dict with ACTION populated assert isinstance(tr, dict) @@ -61,7 +61,7 @@ def test_to_transition_robot_observation_state_vs_images_split(): "arr": np.array([1.5, 2.5]), # vector to state to torch tensor } - tr = to_transition_robot_observation(obs) + tr = observation_to_transition(obs) assert isinstance(tr, dict) assert TransitionKey.OBSERVATION in tr @@ -99,7 +99,7 @@ def test_to_output_robot_action_strips_prefix_and_filters_pos_keys_only(): } } - out = to_output_robot_action(tr) + out = transition_to_robot_action(tr) # Only ".pos" keys with "action." prefix are retained and stripped to base names assert set(out.keys()) == {"j1.pos", "gripper.pos"} # Values converted to float