mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-11 14:49:43 +00:00
8a3d64033f
* feat(rewards): add RewardModelConfig and PreTrainedRewardModel base classes * refactor(rewards): migrate Classifier from policies/sac/reward_model/ to rewards/classifier/ * refactor(rewards): migrate SARM from policies/sarm/ to rewards/sarm/ * refactor(rewards): add rewards/factory.py and remove reward model code from policies/factory.py * refactor(rewards): update imports and delete old reward model locations * test(rewards): add reward model tests and update existing test imports * fix(rewards): restore full Classifier and SARM implementations * test(rewards): restore missing CUDA and mixed precision classifier processor tests * refactor(lerobot_train.py): remove rabc specific configuration and replace it with a generic samplerweight class in lerobot_train * refactor(lerobot_train.py): add missing sampling weight script * linter + missing files * add testing for sampl weighter * revert some useless changes, improve typing * update docs * add automatic detection of the progress path * remove type exp * improve comment * fix: move rabc.py to rewards/sarm/ and update import paths * refactor(imports): update reward model imports to new module structure * refactor(imports): update reward model imports to reflect new module structure * refactor(imports): conditionally import pandas based on availability * feat(configs): add reward_model field to TrainPipelineConfig and Hub fields to RewardModelConfig * refactor(policies): remove reward model branches from policy factory and __init__ * refactor(rewards): expand __init__ facade and fix SARMConfig __post_init__ crash * feat(train): route reward model training through rewards/factory instead of policies/factory * refactor(train): streamline reward model training logic * fix(rewards): ensure FileNotFoundError is raised for missing config_file * refactor(train): update __get_path_fields__ to include reward_model for config loading * refactor(classifier): remove redundant input normalization in predict_reward method * fix(train): raise ValueError for non-trainable reward models in train function * refactor(pretrained_rm): add model card template * refactor(tests): reward models * refactor(sarm): update reset method and remove unused action prediction methods * refactor(wandb): differentiate tags for reward model and policy training in cfg_to_group function * fix(train): raise ValueError for PEFT usage in reward model training * refactor(rewards): enhance RewardModelConfig with device handling and delta indices properties --------- Co-authored-by: Michel Aractingi <michel.aractingi@huggingface.co>
67 lines
2.1 KiB
Python
67 lines
2.1 KiB
Python
import torch
|
|
|
|
from lerobot.datasets import LeRobotDataset
|
|
from lerobot.rewards import RewardClassifierConfig, make_reward_model, make_reward_pre_post_processors
|
|
|
|
|
|
def main():
|
|
# Device to use for training
|
|
device = "mps" # or "cuda", or "cpu"
|
|
|
|
# Load the dataset used for training
|
|
repo_id = "lerobot/example_hil_serl_dataset"
|
|
dataset = LeRobotDataset(repo_id)
|
|
|
|
# Configure the policy to extract features from the image frames
|
|
camera_keys = dataset.meta.camera_keys
|
|
|
|
config = RewardClassifierConfig(
|
|
num_cameras=len(camera_keys),
|
|
device=device,
|
|
# backbone model to extract features from the image frames
|
|
model_name="microsoft/resnet-18",
|
|
)
|
|
|
|
# Make reward model, preprocessor, and optimizer
|
|
reward_model = make_reward_model(config, dataset_stats=dataset.meta.stats)
|
|
optimizer = config.get_optimizer_preset().build(reward_model.parameters())
|
|
preprocessor, _ = make_reward_pre_post_processors(config, dataset_stats=dataset.meta.stats)
|
|
|
|
classifier_id = "<user>/reward_classifier_hil_serl_example"
|
|
|
|
# Instantiate a dataloader
|
|
dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
|
|
|
|
# Training loop
|
|
num_epochs = 5
|
|
for epoch in range(num_epochs):
|
|
total_loss = 0
|
|
total_accuracy = 0
|
|
for batch in dataloader:
|
|
# Preprocess the batch and move it to the correct device.
|
|
batch = preprocessor(batch)
|
|
|
|
# Forward pass
|
|
loss, output_dict = reward_model.forward(batch)
|
|
|
|
# Backward pass and optimization
|
|
optimizer.zero_grad()
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
total_loss += loss.item()
|
|
total_accuracy += output_dict["accuracy"]
|
|
|
|
avg_loss = total_loss / len(dataloader)
|
|
avg_accuracy = total_accuracy / len(dataloader)
|
|
print(f"Epoch {epoch + 1}/{num_epochs}, Loss: {avg_loss:.4f}, Accuracy: {avg_accuracy:.2f}%")
|
|
|
|
print("Training finished!")
|
|
|
|
# You can now save the trained reward model.
|
|
reward_model.push_to_hub(classifier_id)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|