Store policy config alongside PEFT checkpoint

Before this change the PEFT-wrapped policy did not save the policy's config
alongside the adapter config / weights which prevented us from changing the
policy config. Now the policy config is saved both in full training and PEFT
training.

This change makes loading the PEFT policy adapter much easier as well.
This commit is contained in:
nemo
2025-06-22 19:46:10 +02:00
parent 7fd8b4c773
commit dc67b2ff3f
3 changed files with 27 additions and 33 deletions
+14
View File
@@ -79,6 +79,20 @@ def test_save_checkpoint(mock_save_training_state, tmp_path, optimizer):
mock_save_training_state.assert_called_once()
@patch("lerobot.common.utils.train_utils.save_training_state")
def test_save_checkpoint_peft(mock_save_training_state, tmp_path, optimizer):
policy = Mock()
policy.config = Mock()
policy.config.save_pretrained = Mock()
cfg = Mock()
cfg.use_peft = True
save_checkpoint(tmp_path, 10, cfg, policy, optimizer)
policy.save_pretrained.assert_called_once()
cfg.save_pretrained.assert_called_once()
policy.config.save_pretrained.assert_called_once()
mock_save_training_state.assert_called_once()
def test_save_training_state(tmp_path, optimizer, scheduler):
save_training_state(tmp_path, 10, optimizer, scheduler)
assert (tmp_path / TRAINING_STATE_DIR).is_dir()