mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-11 14:49:43 +00:00
8a3d64033f
* feat(rewards): add RewardModelConfig and PreTrainedRewardModel base classes * refactor(rewards): migrate Classifier from policies/sac/reward_model/ to rewards/classifier/ * refactor(rewards): migrate SARM from policies/sarm/ to rewards/sarm/ * refactor(rewards): add rewards/factory.py and remove reward model code from policies/factory.py * refactor(rewards): update imports and delete old reward model locations * test(rewards): add reward model tests and update existing test imports * fix(rewards): restore full Classifier and SARM implementations * test(rewards): restore missing CUDA and mixed precision classifier processor tests * refactor(lerobot_train.py): remove rabc specific configuration and replace it with a generic samplerweight class in lerobot_train * refactor(lerobot_train.py): add missing sampling weight script * linter + missing files * add testing for sampl weighter * revert some useless changes, improve typing * update docs * add automatic detection of the progress path * remove type exp * improve comment * fix: move rabc.py to rewards/sarm/ and update import paths * refactor(imports): update reward model imports to new module structure * refactor(imports): update reward model imports to reflect new module structure * refactor(imports): conditionally import pandas based on availability * feat(configs): add reward_model field to TrainPipelineConfig and Hub fields to RewardModelConfig * refactor(policies): remove reward model branches from policy factory and __init__ * refactor(rewards): expand __init__ facade and fix SARMConfig __post_init__ crash * feat(train): route reward model training through rewards/factory instead of policies/factory * refactor(train): streamline reward model training logic * fix(rewards): ensure FileNotFoundError is raised for missing config_file * refactor(train): update __get_path_fields__ to include reward_model for config loading * refactor(classifier): remove redundant input normalization in predict_reward method * fix(train): raise ValueError for non-trainable reward models in train function * refactor(pretrained_rm): add model card template * refactor(tests): reward models * refactor(sarm): update reset method and remove unused action prediction methods * refactor(wandb): differentiate tags for reward model and policy training in cfg_to_group function * fix(train): raise ValueError for PEFT usage in reward model training * refactor(rewards): enhance RewardModelConfig with device handling and delta indices properties --------- Co-authored-by: Michel Aractingi <michel.aractingi@huggingface.co>
160 lines
5.7 KiB
Python
160 lines
5.7 KiB
Python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
|
from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig
|
|
from lerobot.rewards.classifier.modeling_classifier import ClassifierOutput
|
|
from lerobot.utils.constants import OBS_IMAGE, REWARD
|
|
from tests.utils import skip_if_package_missing
|
|
|
|
|
|
def test_classifier_output():
|
|
output = ClassifierOutput(
|
|
logits=torch.tensor([1, 2, 3]),
|
|
probabilities=torch.tensor([0.1, 0.2, 0.3]),
|
|
hidden_states=None,
|
|
)
|
|
|
|
assert (
|
|
f"{output}"
|
|
== "ClassifierOutput(logits=tensor([1, 2, 3]), probabilities=tensor([0.1000, 0.2000, 0.3000]), hidden_states=None)"
|
|
)
|
|
|
|
|
|
@skip_if_package_missing("transformers")
|
|
@pytest.mark.skip(
|
|
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
|
|
)
|
|
def test_binary_classifier_with_default_params():
|
|
from lerobot.rewards.classifier.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig()
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.output_features = {
|
|
REWARD: PolicyFeature(type=FeatureType.REWARD, shape=(1,)),
|
|
}
|
|
config.normalization_mapping = {
|
|
"VISUAL": NormalizationMode.IDENTITY,
|
|
"REWARD": NormalizationMode.IDENTITY,
|
|
}
|
|
config.num_cameras = 1
|
|
classifier = Classifier(config)
|
|
|
|
batch_size = 10
|
|
|
|
input = {
|
|
OBS_IMAGE: torch.rand((batch_size, 3, 128, 128)),
|
|
REWARD: torch.randint(low=0, high=2, size=(batch_size,)).float(),
|
|
}
|
|
|
|
images, labels = classifier.extract_images_and_labels(input)
|
|
assert len(images) == 1
|
|
assert images[0].shape == torch.Size([batch_size, 3, 128, 128])
|
|
assert labels.shape == torch.Size([batch_size])
|
|
|
|
output = classifier.predict(images)
|
|
|
|
assert output is not None
|
|
assert output.logits.size() == torch.Size([batch_size])
|
|
assert not torch.isnan(output.logits).any(), "Tensor contains NaN values"
|
|
assert output.probabilities.shape == torch.Size([batch_size])
|
|
assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values"
|
|
assert output.hidden_states.shape == torch.Size([batch_size, 256])
|
|
assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values"
|
|
|
|
|
|
@skip_if_package_missing("transformers")
|
|
@pytest.mark.skip(
|
|
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
|
|
)
|
|
def test_multiclass_classifier():
|
|
from lerobot.rewards.classifier.modeling_classifier import Classifier
|
|
|
|
num_classes = 5
|
|
config = RewardClassifierConfig()
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.output_features = {
|
|
REWARD: PolicyFeature(type=FeatureType.REWARD, shape=(num_classes,)),
|
|
}
|
|
config.num_cameras = 1
|
|
config.num_classes = num_classes
|
|
classifier = Classifier(config)
|
|
|
|
batch_size = 10
|
|
|
|
input = {
|
|
OBS_IMAGE: torch.rand((batch_size, 3, 128, 128)),
|
|
REWARD: torch.rand((batch_size, num_classes)),
|
|
}
|
|
|
|
images, labels = classifier.extract_images_and_labels(input)
|
|
assert len(images) == 1
|
|
assert images[0].shape == torch.Size([batch_size, 3, 128, 128])
|
|
assert labels.shape == torch.Size([batch_size, num_classes])
|
|
|
|
output = classifier.predict(images)
|
|
|
|
assert output is not None
|
|
assert output.logits.shape == torch.Size([batch_size, num_classes])
|
|
assert not torch.isnan(output.logits).any(), "Tensor contains NaN values"
|
|
assert output.probabilities.shape == torch.Size([batch_size, num_classes])
|
|
assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values"
|
|
assert output.hidden_states.shape == torch.Size([batch_size, 256])
|
|
assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values"
|
|
|
|
|
|
@skip_if_package_missing("transformers")
|
|
@pytest.mark.skip(
|
|
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
|
|
)
|
|
def test_default_device():
|
|
from lerobot.rewards.classifier.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig()
|
|
assert config.device is None or config.device == "cpu"
|
|
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.num_cameras = 1
|
|
classifier = Classifier(config)
|
|
for p in classifier.parameters():
|
|
assert p.device == torch.device("cpu")
|
|
|
|
|
|
@skip_if_package_missing("transformers")
|
|
@pytest.mark.skip(
|
|
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
|
|
)
|
|
def test_explicit_device_setup():
|
|
from lerobot.rewards.classifier.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig(device="cpu")
|
|
assert config.device == "cpu"
|
|
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.num_cameras = 1
|
|
classifier = Classifier(config)
|
|
for p in classifier.parameters():
|
|
assert p.device == torch.device("cpu")
|