mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-16 09:09:48 +00:00
f0d2b37beb
* chore(dependencies): upgrade transformers + hggingface-hub + peft + scipy * chore(dependencies): bump pi0 family to transformers v5 * chore(dependencies): bump wall x to transformers v5 * chore(dependencies): bump gr00t to transformers v5 * chore(style): fix pre-commit * fix(policy): xvla forced_bos_token missing * test(rl): skip ci tests for resnet10 * Fix: full pi models support for transformer v5 (#2967) * fix(pi): remove loss truncation * fix(pi): remove state padding before tokenization * fix(pi): fix image padding value * fix from_pretrain * add transformer v5 changes * remove reference * more fixes * make it work * add support for rest of pi family * add pifast work * more changes * more changes * more cleanup * fix torch params * dtype fix * torch compile * embed mismatch fix * revert groot * more nit fixes * remove unused classes * more fixes * revert * nit * torch dtype warning fix * but back dynamic renaming * add tie embedding --------- Co-authored-by: Yufei Sun <skieyfly@gmail.com> * chore: fix XVLA in transformers v5 (#3006) * test(policies): enable wall x CI testing * style(test): pre-commit check * style(test): pre-commit * fix wall x for transformer v5 (#3008) * tv5 fix * various wall x fixes * Delete tests/policies/pi0_pi05/print_pi05_output_logits.py Signed-off-by: Jade Choghari <chogharijade@gmail.com> * sync modeling_florence2.py with chore/bump_transformers_v5 * more * more fixes * more * remove comment * more --------- Signed-off-by: Jade Choghari <chogharijade@gmail.com> * chore(dependencies): adjust dependencies versioning after transformers v5 (#3034) * chore(dependecies): adjust dependecies versioning after transformers v5 * fix(policies): remove deprecated input_embeds * fix(policies): dict _tied_weights_keys * chore(depedencies): common qwen-vl-utils * chore(dependencies): bump transformers to 5.2 * Fix policy testing for tv5 (#3032) * fix ci logger * other fix * fix mypy * change logits to torch2.10 * skip wallx| * remove logging --------- Co-authored-by: Steven Palma <imstevenpmwork@ieee.org> * feat(ci): log into HF to unblock some CI tests (#3007) * feat(ci): log into HF to unblock some CI tests * chore(ci): change hf call + secret name * fix(ci): temp fix for pi0 rtc test * test(policies): require_cuda for unblocked tests * test(policies): require_cuda wall_x * fic(tests): require_cuda outter most for pi0 * fix(test): return instead of yield --------- Signed-off-by: Steven Palma <imstevenpmwork@ieee.org> * style(test): fix pre-commit * chore(deps): upgrade transformers (#3050) * chore(test): use lerobot model * fix(policies): change default action tokenizer for wall x * sample on cpu * Revert "Merge branch 'chore/bump_transformers_v5' of https://github.com/huggingface/lerobot into chore/bump_transformers_v5" This reverts commitd9b76755f7, reversing changes made to89359cb0b6. * Reapply "Merge branch 'chore/bump_transformers_v5' of https://github.com/huggingface/lerobot into chore/bump_transformers_v5" This reverts commitc9914db78b. --------- Signed-off-by: Jade Choghari <chogharijade@gmail.com> Signed-off-by: Steven Palma <imstevenpmwork@ieee.org> Co-authored-by: Jade Choghari <chogharijade@gmail.com> Co-authored-by: Yufei Sun <skieyfly@gmail.com> Co-authored-by: Pepijn <pepijn@huggingface.co>
154 lines
5.4 KiB
Python
154 lines
5.4 KiB
Python
# !/usr/bin/env python
|
|
|
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
|
|
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
|
|
from lerobot.policies.sac.reward_model.modeling_classifier import ClassifierOutput
|
|
from lerobot.utils.constants import OBS_IMAGE, REWARD
|
|
from tests.utils import require_package
|
|
|
|
|
|
def test_classifier_output():
|
|
output = ClassifierOutput(
|
|
logits=torch.tensor([1, 2, 3]),
|
|
probabilities=torch.tensor([0.1, 0.2, 0.3]),
|
|
hidden_states=None,
|
|
)
|
|
|
|
assert (
|
|
f"{output}"
|
|
== "ClassifierOutput(logits=tensor([1, 2, 3]), probabilities=tensor([0.1000, 0.2000, 0.3000]), hidden_states=None)"
|
|
)
|
|
|
|
|
|
@require_package("transformers")
|
|
@pytest.mark.skip(
|
|
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
|
|
)
|
|
def test_binary_classifier_with_default_params():
|
|
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig()
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.output_features = {
|
|
REWARD: PolicyFeature(type=FeatureType.REWARD, shape=(1,)),
|
|
}
|
|
config.normalization_mapping = {
|
|
"VISUAL": NormalizationMode.IDENTITY,
|
|
"REWARD": NormalizationMode.IDENTITY,
|
|
}
|
|
config.num_cameras = 1
|
|
classifier = Classifier(config)
|
|
|
|
batch_size = 10
|
|
|
|
input = {
|
|
OBS_IMAGE: torch.rand((batch_size, 3, 128, 128)),
|
|
REWARD: torch.randint(low=0, high=2, size=(batch_size,)).float(),
|
|
}
|
|
|
|
images, labels = classifier.extract_images_and_labels(input)
|
|
assert len(images) == 1
|
|
assert images[0].shape == torch.Size([batch_size, 3, 128, 128])
|
|
assert labels.shape == torch.Size([batch_size])
|
|
|
|
output = classifier.predict(images)
|
|
|
|
assert output is not None
|
|
assert output.logits.size() == torch.Size([batch_size])
|
|
assert not torch.isnan(output.logits).any(), "Tensor contains NaN values"
|
|
assert output.probabilities.shape == torch.Size([batch_size])
|
|
assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values"
|
|
assert output.hidden_states.shape == torch.Size([batch_size, 256])
|
|
assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values"
|
|
|
|
|
|
@require_package("transformers")
|
|
@pytest.mark.skip(
|
|
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
|
|
)
|
|
def test_multiclass_classifier():
|
|
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
|
|
|
|
num_classes = 5
|
|
config = RewardClassifierConfig()
|
|
config.input_features = {
|
|
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
|
|
}
|
|
config.output_features = {
|
|
REWARD: PolicyFeature(type=FeatureType.REWARD, shape=(num_classes,)),
|
|
}
|
|
config.num_cameras = 1
|
|
config.num_classes = num_classes
|
|
classifier = Classifier(config)
|
|
|
|
batch_size = 10
|
|
|
|
input = {
|
|
OBS_IMAGE: torch.rand((batch_size, 3, 128, 128)),
|
|
REWARD: torch.rand((batch_size, num_classes)),
|
|
}
|
|
|
|
images, labels = classifier.extract_images_and_labels(input)
|
|
assert len(images) == 1
|
|
assert images[0].shape == torch.Size([batch_size, 3, 128, 128])
|
|
assert labels.shape == torch.Size([batch_size, num_classes])
|
|
|
|
output = classifier.predict(images)
|
|
|
|
assert output is not None
|
|
assert output.logits.shape == torch.Size([batch_size, num_classes])
|
|
assert not torch.isnan(output.logits).any(), "Tensor contains NaN values"
|
|
assert output.probabilities.shape == torch.Size([batch_size, num_classes])
|
|
assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values"
|
|
assert output.hidden_states.shape == torch.Size([batch_size, 256])
|
|
assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values"
|
|
|
|
|
|
@require_package("transformers")
|
|
@pytest.mark.skip(
|
|
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
|
|
)
|
|
def test_default_device():
|
|
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig()
|
|
assert config.device == "cpu"
|
|
|
|
classifier = Classifier(config)
|
|
for p in classifier.parameters():
|
|
assert p.device == torch.device("cpu")
|
|
|
|
|
|
@require_package("transformers")
|
|
@pytest.mark.skip(
|
|
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
|
|
)
|
|
def test_explicit_device_setup():
|
|
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
|
|
|
|
config = RewardClassifierConfig(device="cpu")
|
|
assert config.device == "cpu"
|
|
|
|
classifier = Classifier(config)
|
|
for p in classifier.parameters():
|
|
assert p.device == torch.device("cpu")
|