mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-11 14:49:43 +00:00
Add peft as extra dependency, mark tests
Fast tests currently fail because of the missing dependency.
This commit is contained in:
@@ -4,6 +4,8 @@
|
||||
large pretrained models such as pre-trained policies (e.g., SmolVLA, π₀, ...) to new tasks without training all
|
||||
of the model's parameters while yielding comparable performance.
|
||||
|
||||
Install the `lerobot[peft]` optional package to enable PEFT support.
|
||||
|
||||
To read about all the possible methods of adaption, please refer to the [🤗 PEFT docs](https://huggingface.co/docs/peft/index).
|
||||
|
||||
## Training SmolVLA
|
||||
|
||||
+3
-1
@@ -139,6 +139,7 @@ hilserl = ["lerobot[transformers-dep]", "gym-hil>=0.1.13,<0.2.0", "lerobot[grpci
|
||||
|
||||
# Features
|
||||
async = ["lerobot[grpcio-dep]", "matplotlib>=3.10.3,<4.0.0"]
|
||||
peft = ["lerobot[transformers-dep]", "peft>=0.18.0"]
|
||||
|
||||
# Development
|
||||
dev = ["pre-commit>=3.7.0,<5.0.0", "debugpy>=1.8.1,<1.9.0", "lerobot[grpcio-dep]", "grpcio-tools==1.73.1"]
|
||||
@@ -174,7 +175,8 @@ all = [
|
||||
"lerobot[phone]",
|
||||
"lerobot[libero]",
|
||||
"lerobot[metaworld]",
|
||||
"lerobot[sarm]"
|
||||
"lerobot[sarm]",
|
||||
"lerobot[peft]",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,6 +5,8 @@ from unittest.mock import MagicMock, patch
|
||||
import pytest
|
||||
from safetensors.torch import load_file
|
||||
|
||||
from .utils import require_package
|
||||
|
||||
|
||||
def run_command(cmd, module, args):
|
||||
module = importlib.import_module(f"lerobot.scripts.{module}")
|
||||
@@ -29,6 +31,7 @@ def resolve_model_id_for_peft_training(policy_type):
|
||||
|
||||
|
||||
@pytest.mark.parametrize("policy_type", ["smolvla"])
|
||||
@require_package("peft")
|
||||
def test_peft_training_push_to_hub_works(policy_type, tmp_path):
|
||||
"""Ensure that push to hub stores PEFT only the adapter, not the full model weights."""
|
||||
output_dir = tmp_path / f"output_{policy_type}"
|
||||
@@ -67,6 +70,7 @@ def test_peft_training_push_to_hub_works(policy_type, tmp_path):
|
||||
|
||||
|
||||
@pytest.mark.parametrize("policy_type", ["smolvla"])
|
||||
@require_package("peft")
|
||||
def test_peft_training_works(policy_type, tmp_path):
|
||||
"""Check whether the standard case of fine-tuning a (partially) pre-trained policy with PEFT works."""
|
||||
output_dir = tmp_path / f"output_{policy_type}"
|
||||
@@ -115,6 +119,7 @@ def test_peft_training_works(policy_type, tmp_path):
|
||||
|
||||
|
||||
@pytest.mark.parametrize("policy_type", ["smolvla"])
|
||||
@require_package("peft")
|
||||
def test_peft_training_params_are_fewer(policy_type, tmp_path):
|
||||
"""Check whether the standard case of fine-tuning a (partially) pre-trained policy with PEFT works."""
|
||||
output_dir = tmp_path / f"output_{policy_type}"
|
||||
@@ -165,6 +170,7 @@ def dummy_make_robot_from_config(*args, **kwargs):
|
||||
|
||||
|
||||
@pytest.mark.parametrize("policy_type", ["smolvla"])
|
||||
@require_package("peft")
|
||||
def test_peft_record_loads_policy(policy_type, tmp_path):
|
||||
"""Train a policy with PEFT and attempt to load it with `lerobot-record`."""
|
||||
from peft import PeftModel
|
||||
|
||||
Reference in New Issue
Block a user