chore(processor): rename converters function names (#1853)

* chore(processor): rename to_transition_teleop_action -> action_to_transition

* chore(processor): rename to_transition_robot_observation -> observation_to_transition

* chore(processor): rename to_output_robot_action -> transition_to_robot_action
This commit is contained in:
Steven Palma
2025-09-03 18:08:54 +02:00
committed by GitHub
parent d893bf1e30
commit 029c4a9a76
9 changed files with 40 additions and 40 deletions
+4 -4
View File
@@ -23,8 +23,8 @@ from lerobot.policies.act.modeling_act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors
from lerobot.processor import DataProcessorPipeline
from lerobot.processor.converters import (
to_output_robot_action,
to_transition_robot_observation,
observation_to_transition,
transition_to_robot_action,
)
from lerobot.record import record_loop
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
@@ -75,7 +75,7 @@ robot_ee_to_joints_processor = DataProcessorPipeline(
),
],
to_transition=lambda tr: tr,
to_output=to_output_robot_action,
to_output=transition_to_robot_action,
)
# Build pipeline to convert joint observation to ee pose observation
@@ -83,7 +83,7 @@ robot_joints_to_ee_pose_processor = DataProcessorPipeline(
steps=[
ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys()))
],
to_transition=to_transition_robot_observation,
to_transition=observation_to_transition,
to_output=lambda tr: tr,
)
+6 -6
View File
@@ -22,9 +22,9 @@ from lerobot.datasets.utils import merge_features
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import DataProcessorPipeline
from lerobot.processor.converters import (
to_output_robot_action,
to_transition_robot_observation,
to_transition_teleop_action,
action_to_transition,
observation_to_transition,
transition_to_robot_action,
)
from lerobot.record import record_loop
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
@@ -88,7 +88,7 @@ phone_to_robot_ee_pose_processor = DataProcessorPipeline(
max_ee_twist_step_rad=0.50,
),
],
to_transition=to_transition_teleop_action,
to_transition=action_to_transition,
to_output=lambda tr: tr,
)
@@ -106,7 +106,7 @@ robot_ee_to_joints_processor = DataProcessorPipeline(
),
],
to_transition=lambda tr: tr,
to_output=to_output_robot_action,
to_output=transition_to_robot_action,
)
# Build pipeline to convert joint observation to ee pose observation
@@ -114,7 +114,7 @@ robot_joints_to_ee_pose = DataProcessorPipeline(
steps=[
ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys()))
],
to_transition=to_transition_robot_observation,
to_transition=observation_to_transition,
to_output=lambda tr: tr,
)
+3 -3
View File
@@ -20,7 +20,7 @@ import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import DataProcessorPipeline
from lerobot.processor.converters import to_output_robot_action, to_transition_teleop_action
from lerobot.processor.converters import action_to_transition, transition_to_robot_action
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
from lerobot.robots.so100_follower.robot_kinematic_processor import (
AddRobotObservationAsComplimentaryData,
@@ -59,8 +59,8 @@ robot_ee_to_joints_processor = DataProcessorPipeline(
initial_guess_current_joints=False, # Because replay is open loop
),
],
to_transition=to_transition_teleop_action,
to_output=to_output_robot_action,
to_transition=action_to_transition,
to_output=transition_to_robot_action,
)
robot_ee_to_joints_processor.reset()
+3 -3
View File
@@ -17,7 +17,7 @@ import time
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import DataProcessorPipeline
from lerobot.processor.converters import to_output_robot_action, to_transition_teleop_action
from lerobot.processor.converters import action_to_transition, transition_to_robot_action
from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig
from lerobot.robots.so100_follower.robot_kinematic_processor import (
AddRobotObservationAsComplimentaryData,
@@ -72,8 +72,8 @@ phone_to_robot_joints_processor = DataProcessorPipeline(
speed_factor=20.0,
),
],
to_transition=to_transition_teleop_action,
to_output=to_output_robot_action,
to_transition=action_to_transition,
to_output=transition_to_robot_action,
)
robot.connect()
+3 -3
View File
@@ -233,7 +233,7 @@ def create_transition(
}
def to_transition_teleop_action(action: dict[str, Any]) -> EnvTransition:
def action_to_transition(action: dict[str, Any]) -> EnvTransition: # action_to_transition
"""
Convert a raw teleop action dict into an EnvTransition under the ACTION TransitionKey.
"""
@@ -251,7 +251,7 @@ def to_transition_teleop_action(action: dict[str, Any]) -> EnvTransition:
# TODO(Adil, Pepijn): Overtime we can maybe add these converters to pipeline.py itself
def to_transition_robot_observation(observation: dict[str, Any]) -> EnvTransition:
def observation_to_transition(observation: dict[str, Any]) -> EnvTransition:
"""
Convert a raw robot observation dict into an EnvTransition under the OBSERVATION TransitionKey.
"""
@@ -268,7 +268,7 @@ def to_transition_robot_observation(observation: dict[str, Any]) -> EnvTransitio
return create_transition(observation=obs_dict, action={})
def to_output_robot_action(transition: EnvTransition) -> dict[str, Any]:
def transition_to_robot_action(transition: EnvTransition) -> dict[str, Any]:
"""
Converts a EnvTransition under the ACTION TransitionKey to a dict with keys ending in '.pos' for raw robot actions.
"""
+6 -6
View File
@@ -78,10 +78,10 @@ from lerobot.policies.factory import make_policy, make_pre_post_processors
from lerobot.policies.pretrained import PreTrainedPolicy
from lerobot.processor import DataProcessorPipeline, IdentityProcessorStep, TransitionKey
from lerobot.processor.converters import (
to_output_robot_action,
to_transition_robot_observation,
to_transition_teleop_action,
action_to_transition,
observation_to_transition,
transition_to_dataset_frame,
transition_to_robot_action,
)
from lerobot.processor.rename_processor import rename_stats
from lerobot.robots import ( # noqa: F401
@@ -245,14 +245,14 @@ def record_loop(
display_data: bool = False,
):
teleop_action_processor = teleop_action_processor or DataProcessorPipeline(
steps=[IdentityProcessorStep()], to_transition=to_transition_teleop_action, to_output=lambda tr: tr
steps=[IdentityProcessorStep()], to_transition=action_to_transition, to_output=lambda tr: tr
)
robot_action_processor = robot_action_processor or DataProcessorPipeline(
steps=[IdentityProcessorStep()], to_transition=lambda tr: tr, to_output=to_output_robot_action
steps=[IdentityProcessorStep()], to_transition=lambda tr: tr, to_output=transition_to_robot_action
)
robot_observation_processor = robot_observation_processor or DataProcessorPipeline(
steps=[IdentityProcessorStep()],
to_transition=to_transition_robot_observation,
to_transition=observation_to_transition,
to_output=lambda tr: tr,
)
+3 -3
View File
@@ -48,7 +48,7 @@ from pprint import pformat
from lerobot.configs import parser
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.processor import DataProcessorPipeline, IdentityProcessorStep
from lerobot.processor.converters import to_output_robot_action, to_transition_teleop_action
from lerobot.processor.converters import action_to_transition, transition_to_robot_action
from lerobot.robots import ( # noqa: F401
Robot,
RobotConfig,
@@ -96,8 +96,8 @@ def replay(cfg: ReplayConfig):
# Initialize robot action processor with default if not provided
robot_action_processor = cfg.robot_action_processor or DataProcessorPipeline(
steps=[IdentityProcessorStep()],
to_transition=to_transition_teleop_action,
to_output=to_output_robot_action, # type: ignore[arg-type]
to_transition=action_to_transition,
to_output=transition_to_robot_action, # type: ignore[arg-type]
)
# Reset processor
+6 -6
View File
@@ -63,9 +63,9 @@ from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraCon
from lerobot.configs import parser
from lerobot.processor import DataProcessorPipeline, IdentityProcessorStep
from lerobot.processor.converters import (
to_output_robot_action,
to_transition_robot_observation,
to_transition_teleop_action,
action_to_transition,
observation_to_transition,
transition_to_robot_action,
)
from lerobot.robots import ( # noqa: F401
Robot,
@@ -121,16 +121,16 @@ def teleop_loop(
):
# Initialize processors with defaults if not provided
teleop_action_processor = teleop_action_processor or DataProcessorPipeline(
steps=[IdentityProcessorStep()], to_transition=to_transition_teleop_action, to_output=lambda tr: tr
steps=[IdentityProcessorStep()], to_transition=action_to_transition, to_output=lambda tr: tr
)
robot_action_processor = robot_action_processor or DataProcessorPipeline(
steps=[IdentityProcessorStep()],
to_transition=lambda tr: tr,
to_output=to_output_robot_action, # type: ignore[arg-type]
to_output=transition_to_robot_action, # type: ignore[arg-type]
)
robot_observation_processor = robot_observation_processor or DataProcessorPipeline(
steps=[IdentityProcessorStep()],
to_transition=to_transition_robot_observation,
to_transition=observation_to_transition,
to_output=lambda tr: tr,
)
+6 -6
View File
@@ -4,13 +4,13 @@ import torch
from lerobot.processor import TransitionKey
from lerobot.processor.converters import (
action_to_transition,
batch_to_transition,
to_output_robot_action,
observation_to_transition,
to_tensor,
to_transition_robot_observation,
to_transition_teleop_action,
transition_to_batch,
transition_to_dataset_frame,
transition_to_robot_action,
)
@@ -23,7 +23,7 @@ def test_to_transition_teleop_action_prefix_and_tensor_conversion():
"raw_img": img, # uint8 HWC to torch tensor
}
tr = to_transition_teleop_action(act)
tr = action_to_transition(act)
# Should be an EnvTransition-like dict with ACTION populated
assert isinstance(tr, dict)
@@ -61,7 +61,7 @@ def test_to_transition_robot_observation_state_vs_images_split():
"arr": np.array([1.5, 2.5]), # vector to state to torch tensor
}
tr = to_transition_robot_observation(obs)
tr = observation_to_transition(obs)
assert isinstance(tr, dict)
assert TransitionKey.OBSERVATION in tr
@@ -99,7 +99,7 @@ def test_to_output_robot_action_strips_prefix_and_filters_pos_keys_only():
}
}
out = to_output_robot_action(tr)
out = transition_to_robot_action(tr)
# Only ".pos" keys with "action." prefix are retained and stripped to base names
assert set(out.keys()) == {"j1.pos", "gripper.pos"}
# Values converted to float