refactor(pipeline): Remove model card generation and streamline processor methods

- Eliminated the _generate_model_card method from RobotProcessor, which was responsible for generating README.md files from a template.
- Updated save_pretrained method to remove model card generation, focusing on serialization of processor definitions and parameters.
- Added default implementations for get_config, state_dict, load_state_dict, reset, and feature_contract methods in various processor classes to enhance consistency and usability.
This commit is contained in:
Adil Zouitine
2025-08-05 10:31:09 +02:00
parent 5595887fd0
commit 8077456c00
6 changed files with 109 additions and 2042 deletions
+2 -7
View File
@@ -22,7 +22,7 @@ from gymnasium.utils.env_checker import check_env
import lerobot
from lerobot.envs.factory import make_env, make_env_config
from lerobot.processor import RobotProcessor, TransitionKey, VanillaObservationProcessor
from lerobot.envs.utils import preprocess_observation
from tests.utils import require_env
OBS_TYPES = ["state", "pixels", "pixels_agent_pos"]
@@ -48,12 +48,7 @@ def test_factory(env_name):
cfg = make_env_config(env_name)
env = make_env(cfg, n_envs=1)
obs, _ = env.reset()
# Process observation using processor
obs_processor = RobotProcessor([VanillaObservationProcessor()])
transition = (obs, None, None, None, None, None, None)
processed_transition = obs_processor(transition)
obs = processed_transition[TransitionKey.OBSERVATION]
obs = preprocess_observation(obs)
# test image keys are float32 in range [0,1]
for key in obs:
+2 -5
View File
@@ -30,6 +30,7 @@ from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.datasets.factory import make_dataset
from lerobot.datasets.utils import cycle, dataset_to_policy_features
from lerobot.envs.factory import make_env, make_env_config
from lerobot.envs.utils import preprocess_observation
from lerobot.optim.factory import make_optimizer_and_scheduler
from lerobot.policies.act.modeling_act import ACTTemporalEnsembler
from lerobot.policies.factory import (
@@ -39,7 +40,6 @@ from lerobot.policies.factory import (
)
from lerobot.policies.normalize import Normalize, Unnormalize
from lerobot.policies.pretrained import PreTrainedPolicy
from lerobot.processor import RobotProcessor, TransitionKey, VanillaObservationProcessor
from lerobot.utils.random_utils import seeded_context
from tests.artifacts.policies.save_policy_to_safetensors import get_policy_stats
from tests.utils import DEVICE, require_cpu, require_env, require_x86_64_kernel
@@ -185,10 +185,7 @@ def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs):
observation, _ = env.reset(seed=train_cfg.seed)
# apply transform to normalize the observations
obs_processor = RobotProcessor([VanillaObservationProcessor()])
transition = (observation, None, None, None, None, None, None)
processed_transition = obs_processor(transition)
observation = processed_transition[TransitionKey.OBSERVATION]
observation = preprocess_observation(observation)
# send observation to device/gpu
observation = {key: observation[key].to(DEVICE, non_blocking=True) for key in observation}