mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-17 17:50:09 +00:00
Store policy config alongside PEFT checkpoint
Before this change the PEFT-wrapped policy did not save the policy's config alongside the adapter config / weights which prevented us from changing the policy config. Now the policy config is saved both in full training and PEFT training. This change makes loading the PEFT policy adapter much easier as well.
This commit is contained in:
@@ -79,6 +79,20 @@ def test_save_checkpoint(mock_save_training_state, tmp_path, optimizer):
|
||||
mock_save_training_state.assert_called_once()
|
||||
|
||||
|
||||
@patch("lerobot.common.utils.train_utils.save_training_state")
|
||||
def test_save_checkpoint_peft(mock_save_training_state, tmp_path, optimizer):
|
||||
policy = Mock()
|
||||
policy.config = Mock()
|
||||
policy.config.save_pretrained = Mock()
|
||||
cfg = Mock()
|
||||
cfg.use_peft = True
|
||||
save_checkpoint(tmp_path, 10, cfg, policy, optimizer)
|
||||
policy.save_pretrained.assert_called_once()
|
||||
cfg.save_pretrained.assert_called_once()
|
||||
policy.config.save_pretrained.assert_called_once()
|
||||
mock_save_training_state.assert_called_once()
|
||||
|
||||
|
||||
def test_save_training_state(tmp_path, optimizer, scheduler):
|
||||
save_training_state(tmp_path, 10, optimizer, scheduler)
|
||||
assert (tmp_path / TRAINING_STATE_DIR).is_dir()
|
||||
|
||||
Reference in New Issue
Block a user