mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-15 08:39:49 +00:00
e963e5a0c4
* refactor: RL stack refactoring — RLAlgorithm, RLTrainer, DataMixer, and SAC restructuring * chore: clarify torch.compile disabled note in SACAlgorithm * fix(teleop): keyboard EE teleop not registering special keys and losing intervention state Fixes #2345 Co-authored-by: jpizarrom <jpizarrom@gmail.com> * fix: remove leftover normalization calls from reward classifier predict_reward Fixes #2355 * fix: add thread synchronization to ReplayBuffer to prevent race condition between add() and sample() * refactor: update SACAlgorithm to pass action_dim to _init_critics and fix encoder reference * perf: remove redundant CPU→GPU→CPU transition move in learner * Fix: add kwargs in reward classifier __init__() * fix: include IS_INTERVENTION in complementary_info sent to learner for offline replay buffer * fix: add try/finally to control_loop to ensure image writer cleanup on exit * fix: use string key for IS_INTERVENTION in complementary_info to avoid torch.load serialization error * fix: skip tests that require grpc if not available * fix(tests): ensure tensor stats comparison accounts for reshaping in normalization tests * fix(tests): skip tests that require grpc if not available * refactor(rl): expose public API in rl/__init__ and use relative imports in sub-packages * fix(config): update vision encoder model name to lerobot/resnet10 * fix(sac): clarify torch.compile status * refactor(rl): update shutdown_event type hints from 'any' to 'Any' for consistency and clarity * refactor(sac): simplify optimizer return structure * perf(rl): use async iterators in OnlineOfflineMixer.get_iterator * refactor(sac): decouple algorithm hyperparameters from policy config * update losses names in tests * fix docstring * remove unused type alias * fix test for flat dict structure * refactor(policies): rename policies/sac → policies/gaussian_actor * refactor(rl/sac): consolidate hyperparameter ownership and clean up discrete critic * perf(observation_processor): add CUDA support for image processing * fix(rl): correctly wire HIL-SERL gripper penalty through processor pipeline (cherry picked from commit9c2af818ff) * fix(rl): add time limit processor to environment pipeline (cherry picked from commitcd105f65cb) * fix(rl): clarify discrete gripper action mapping in GripperVelocityToJoint for SO100 (cherry picked from commit494f469a2b) * fix(rl): update neutral gripper action (cherry picked from commit9c9064e5be) * fix(rl): merge environment and action-processor info in transition processing (cherry picked from commit30e1886b64) * fix(rl): mirror gym_manipulator in actor (cherry picked from commitd2a046dfc5) * fix(rl): postprocess action in actor (cherry picked from commitc2556439e5) * fix(rl): improve action processing for discrete and continuous actions (cherry picked from commitf887ab3f6a) * fix(rl): enhance intervention handling in actor and learner (cherry picked from commitef8bfffbd7) * Revert "perf(observation_processor): add CUDA support for image processing" This reverts commit38b88c414c. * refactor(rl): make algorithm a nested config so all SAC hyperparameters are JSON-addressable * refactor(rl): add make_algorithm_config function for RLAlgorithmConfig instantiation * refactor(rl): add type property to RLAlgorithmConfig for better clarity * refactor(rl): make RLAlgorithmConfig an abstract base class for better extensibility * refactor(tests): remove grpc import checks from test files for cleaner code * fix(tests): gate RL tests on the `datasets` extra * refactor: simplify docstrings for clarity and conciseness across multiple files * fix(rl): update gripper position key and handle action absence during reset * fix(rl): record pre-step observation so (obs, action, next.reward) align in gym_manipulator dataset * refactor: clean up import statements * chore: address reviewer comments * chore: improve visual stats reshaping logic and update docstring for clarity * refactor: enforce mandatory config_class and name attributes in RLAlgorithm * refactor: implement NotImplementedError for abstract methods in RLAlgorithm and DataMixer * refactor: replace build_algorithm with make_algorithm for SACAlgorithmConfig and update related tests * refactor: add require_package calls for grpcio and gym-hil in relevant modules * refactor(rl): move grpcio guards to runtime entry points * feat(rl): consolidate HIL-SERL checkpoint into HF-style components Make `RLAlgorithmConfig` and `RLAlgorithm` `HubMixin`s, add abstract `state_dict()` / `load_state_dict()` for critic ensemble, target nets and `log_alpha`, and persist them as a sibling `algorithm/` component next to `pretrained_model/`. Replace the pickled `training_state.pt` with an enriched `training_step.json` carrying `step` and `interaction_step`, so resume restores actor + critics + target nets + temperature + optimizers + RNG + counters from HF-standard files. * refactor(rl): move actor weight-sync wire format from policy to algorithm * refactor(rl): update type hints for learner and actor functions * refactor(rl): hoist grpcio guard to module top in actor/learner * chore(rl): manage import pattern in actor (#3564) * chore(rl): manage import pattern in actor * chore(rl): optional grpc imports in learner; quote grpc ServicerContext types --------- Co-authored-by: Khalil Meftah <khalil.meftah@huggingface.co> * update uv.lock * chore(doc): update doc --------- Co-authored-by: jpizarrom <jpizarrom@gmail.com> Co-authored-by: Steven Palma <imstevenpmwork@ieee.org>
147 lines
5.2 KiB
Python
147 lines
5.2 KiB
Python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import torch
|
|
|
|
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
|
from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig
|
|
from lerobot.rewards.classifier.modeling_classifier import ClassifierOutput
|
|
from lerobot.utils.constants import OBS_IMAGE, REWARD
|
|
from tests.utils import skip_if_package_missing
|
|
|
|
|
|
def test_classifier_output():
|
|
output = ClassifierOutput(
|
|
logits=torch.tensor([1, 2, 3]),
|
|
probabilities=torch.tensor([0.1, 0.2, 0.3]),
|
|
hidden_states=None,
|
|
)
|
|
|
|
assert (
|
|
f"{output}"
|
|
== "ClassifierOutput(logits=tensor([1, 2, 3]), probabilities=tensor([0.1000, 0.2000, 0.3000]), hidden_states=None)"
|
|
)
|
|
|
|
|
|
@skip_if_package_missing("transformers")
|
|
def test_binary_classifier_with_default_params():
|
|
from lerobot.rewards.classifier.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig()
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.output_features = {
|
|
REWARD: PolicyFeature(type=FeatureType.REWARD, shape=(1,)),
|
|
}
|
|
config.normalization_mapping = {
|
|
"VISUAL": NormalizationMode.IDENTITY,
|
|
"REWARD": NormalizationMode.IDENTITY,
|
|
}
|
|
config.num_cameras = 1
|
|
classifier = Classifier(config)
|
|
|
|
batch_size = 10
|
|
|
|
input = {
|
|
OBS_IMAGE: torch.rand((batch_size, 3, 128, 128)),
|
|
REWARD: torch.randint(low=0, high=2, size=(batch_size,)).float(),
|
|
}
|
|
|
|
images, labels = classifier.extract_images_and_labels(input)
|
|
assert len(images) == 1
|
|
assert images[0].shape == torch.Size([batch_size, 3, 128, 128])
|
|
assert labels.shape == torch.Size([batch_size])
|
|
|
|
output = classifier.predict(images)
|
|
|
|
assert output is not None
|
|
assert output.logits.size() == torch.Size([batch_size])
|
|
assert not torch.isnan(output.logits).any(), "Tensor contains NaN values"
|
|
assert output.probabilities.shape == torch.Size([batch_size])
|
|
assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values"
|
|
assert output.hidden_states.shape == torch.Size([batch_size, 256])
|
|
assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values"
|
|
|
|
|
|
@skip_if_package_missing("transformers")
|
|
def test_multiclass_classifier():
|
|
from lerobot.rewards.classifier.modeling_classifier import Classifier
|
|
|
|
num_classes = 5
|
|
config = RewardClassifierConfig()
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.output_features = {
|
|
REWARD: PolicyFeature(type=FeatureType.REWARD, shape=(num_classes,)),
|
|
}
|
|
config.num_cameras = 1
|
|
config.num_classes = num_classes
|
|
classifier = Classifier(config)
|
|
|
|
batch_size = 10
|
|
|
|
input = {
|
|
OBS_IMAGE: torch.rand((batch_size, 3, 128, 128)),
|
|
REWARD: torch.rand((batch_size, num_classes)),
|
|
}
|
|
|
|
images, labels = classifier.extract_images_and_labels(input)
|
|
assert len(images) == 1
|
|
assert images[0].shape == torch.Size([batch_size, 3, 128, 128])
|
|
assert labels.shape == torch.Size([batch_size, num_classes])
|
|
|
|
output = classifier.predict(images)
|
|
|
|
assert output is not None
|
|
assert output.logits.shape == torch.Size([batch_size, num_classes])
|
|
assert not torch.isnan(output.logits).any(), "Tensor contains NaN values"
|
|
assert output.probabilities.shape == torch.Size([batch_size, num_classes])
|
|
assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values"
|
|
assert output.hidden_states.shape == torch.Size([batch_size, 256])
|
|
assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values"
|
|
|
|
|
|
@skip_if_package_missing("transformers")
|
|
def test_default_device():
|
|
from lerobot.rewards.classifier.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig()
|
|
assert config.device is None or config.device == "cpu"
|
|
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.num_cameras = 1
|
|
classifier = Classifier(config)
|
|
for p in classifier.parameters():
|
|
assert p.device == torch.device("cpu")
|
|
|
|
|
|
@skip_if_package_missing("transformers")
|
|
def test_explicit_device_setup():
|
|
from lerobot.rewards.classifier.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig(device="cpu")
|
|
assert config.device == "cpu"
|
|
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.num_cameras = 1
|
|
classifier = Classifier(config)
|
|
for p in classifier.parameters():
|
|
assert p.device == torch.device("cpu")
|