Add peft as extra dependency, mark tests

Fast tests currently fail because of the missing dependency.
This commit is contained in:
nemo
2025-12-20 19:19:06 +01:00
parent 4bc75776f7
commit ad4a82b77e
3 changed files with 11 additions and 1 deletions
+2
View File
@@ -4,6 +4,8 @@
large pretrained models such as pre-trained policies (e.g., SmolVLA, π₀, ...) to new tasks without training all
of the model's parameters while yielding comparable performance.
Install the `lerobot[peft]` optional package to enable PEFT support.
To read about all the possible methods of adaption, please refer to the [🤗 PEFT docs](https://huggingface.co/docs/peft/index).
## Training SmolVLA
+3 -1
View File
@@ -139,6 +139,7 @@ hilserl = ["lerobot[transformers-dep]", "gym-hil>=0.1.13,<0.2.0", "lerobot[grpci
# Features
async = ["lerobot[grpcio-dep]", "matplotlib>=3.10.3,<4.0.0"]
peft = ["lerobot[transformers-dep]", "peft>=0.18.0"]
# Development
dev = ["pre-commit>=3.7.0,<5.0.0", "debugpy>=1.8.1,<1.9.0", "lerobot[grpcio-dep]", "grpcio-tools==1.73.1"]
@@ -174,7 +175,8 @@ all = [
"lerobot[phone]",
"lerobot[libero]",
"lerobot[metaworld]",
"lerobot[sarm]"
"lerobot[sarm]",
"lerobot[peft]",
]
[project.scripts]
+6
View File
@@ -5,6 +5,8 @@ from unittest.mock import MagicMock, patch
import pytest
from safetensors.torch import load_file
from .utils import require_package
def run_command(cmd, module, args):
module = importlib.import_module(f"lerobot.scripts.{module}")
@@ -29,6 +31,7 @@ def resolve_model_id_for_peft_training(policy_type):
@pytest.mark.parametrize("policy_type", ["smolvla"])
@require_package("peft")
def test_peft_training_push_to_hub_works(policy_type, tmp_path):
"""Ensure that push to hub stores PEFT only the adapter, not the full model weights."""
output_dir = tmp_path / f"output_{policy_type}"
@@ -67,6 +70,7 @@ def test_peft_training_push_to_hub_works(policy_type, tmp_path):
@pytest.mark.parametrize("policy_type", ["smolvla"])
@require_package("peft")
def test_peft_training_works(policy_type, tmp_path):
"""Check whether the standard case of fine-tuning a (partially) pre-trained policy with PEFT works."""
output_dir = tmp_path / f"output_{policy_type}"
@@ -115,6 +119,7 @@ def test_peft_training_works(policy_type, tmp_path):
@pytest.mark.parametrize("policy_type", ["smolvla"])
@require_package("peft")
def test_peft_training_params_are_fewer(policy_type, tmp_path):
"""Check whether the standard case of fine-tuning a (partially) pre-trained policy with PEFT works."""
output_dir = tmp_path / f"output_{policy_type}"
@@ -165,6 +170,7 @@ def dummy_make_robot_from_config(*args, **kwargs):
@pytest.mark.parametrize("policy_type", ["smolvla"])
@require_package("peft")
def test_peft_record_loads_policy(policy_type, tmp_path):
"""Train a policy with PEFT and attempt to load it with `lerobot-record`."""
from peft import PeftModel