diff --git a/benchmarks/video/README.md b/benchmarks/video/README.md index 490a4b495..1feee69c4 100644 --- a/benchmarks/video/README.md +++ b/benchmarks/video/README.md @@ -28,9 +28,9 @@ We don't expect the same optimal settings for a dataset of images from a simulat For these reasons, we run this benchmark on four representative datasets: - `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera. -- `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera. -- `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera. -- `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera. +- `lerobot/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera. +- `lerobot/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera. +- `lerobot/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera. Note: The datasets used for this benchmark need to be image datasets, not video datasets. @@ -179,7 +179,7 @@ python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ - aliberts/aloha_mobile_shrimp_image \ + lerobot/aloha_mobile_shrimp_image \ --vcodec libx264 libx265 \ --pix-fmt yuv444p yuv420p \ --g 2 20 None \ @@ -203,9 +203,9 @@ python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ - aliberts/aloha_mobile_shrimp_image \ - aliberts/paris_street \ - aliberts/kitchen \ + lerobot/aloha_mobile_shrimp_image \ + lerobot/paris_street \ + lerobot/kitchen \ --vcodec libx264 libx265 \ --pix-fmt yuv444p yuv420p \ --g 1 2 3 4 5 6 10 15 20 40 None \ @@ -221,9 +221,9 @@ python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ - aliberts/aloha_mobile_shrimp_image \ - aliberts/paris_street \ - aliberts/kitchen \ + lerobot/aloha_mobile_shrimp_image \ + lerobot/paris_street \ + lerobot/kitchen \ --vcodec libsvtav1 \ --pix-fmt yuv420p \ --g 1 2 3 4 5 6 10 15 20 40 None \ @@ -252,37 +252,37 @@ Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_read These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav` -| video_images_size_ratio | vcodec | pix_fmt | | | | -| ---------------------------------- | ---------- | ------- | --------- | --------- | --------- | -| | libx264 | | libx265 | | libsvtav1 | -| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | -| lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% | -| aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% | -| aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% | -| aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% | +| video_images_size_ratio | vcodec | pix_fmt | | | | +| --------------------------------- | ---------- | ------- | --------- | --------- | --------- | +| | libx264 | | libx265 | | libsvtav1 | +| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% | +| lerobot/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% | +| lerobot/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% | +| lerobot/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% | -| video_images_load_time_ratio | vcodec | pix_fmt | | | | -| ---------------------------------- | ------- | ------- | -------- | ------- | --------- | -| | libx264 | | libx265 | | libsvtav1 | -| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | -| lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 | -| aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** | -| aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** | -| aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** | +| video_images_load_time_ratio | vcodec | pix_fmt | | | | +| --------------------------------- | ------- | ------- | -------- | ------- | --------- | +| | libx264 | | libx265 | | libsvtav1 | +| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 | +| lerobot/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** | +| lerobot/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** | +| lerobot/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** | -| | | vcodec | pix_fmt | | | | -| ---------------------------------- | -------- | -------- | ------------ | -------- | --------- | ------------ | -| | | libx264 | | libx265 | | libsvtav1 | -| repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | -| lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 | -| | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 | -| | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% | -| aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** | -| | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** | -| | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** | -| aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** | -| | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** | -| | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** | -| aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** | -| | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** | -| | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** | +| | | vcodec | pix_fmt | | | | +| --------------------------------- | -------- | -------- | ------------ | -------- | --------- | ------------ | +| | | libx264 | | libx265 | | libsvtav1 | +| repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 | +| | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 | +| | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% | +| lerobot/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** | +| | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** | +| | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** | +| lerobot/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** | +| | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** | +| | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** | +| lerobot/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** | +| | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** | +| | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** | diff --git a/docs/source/earthrover_mini_plus.mdx b/docs/source/earthrover_mini_plus.mdx index d8083336a..dd9c2ad2b 100644 --- a/docs/source/earthrover_mini_plus.mdx +++ b/docs/source/earthrover_mini_plus.mdx @@ -185,7 +185,7 @@ echo $HF_USER Use the standard recording command: ```bash -python src/lerobot/scripts/lerobot_record.py \ +lerobot-record \ --robot.type=earthrover_mini_plus \ --teleop.type=keyboard_rover \ --dataset.repo_id=your_username/dataset_name \ diff --git a/docs/source/hope_jr.mdx b/docs/source/hope_jr.mdx index 856febb95..026cd084a 100644 --- a/docs/source/hope_jr.mdx +++ b/docs/source/hope_jr.mdx @@ -224,7 +224,7 @@ lerobot-record \ --teleop.port=/dev/tty.usbmodem1201 \ --teleop.id=right \ --teleop.side=right \ - --dataset.repo_id=nepyope/hand_record_test_with_video_data \ + --dataset.repo_id=/hand_record_test_with_video_data \ --dataset.single_task="Hand recording test with video data" \ --dataset.num_episodes=1 \ --dataset.episode_time_s=5 \ @@ -241,7 +241,7 @@ lerobot-replay \ --robot.port=/dev/tty.usbmodem58760432281 \ --robot.id=right \ --robot.side=right \ - --dataset.repo_id=nepyope/hand_record_test_with_camera \ + --dataset.repo_id=/hand_record_test_with_camera \ --dataset.episode=0 ``` @@ -249,13 +249,13 @@ lerobot-replay \ ```bash lerobot-train \ - --dataset.repo_id=nepyope/hand_record_test_with_video_data \ + --dataset.repo_id=/hand_record_test_with_video_data \ --policy.type=act \ --output_dir=outputs/train/hopejr_hand \ --job_name=hopejr \ --policy.device=mps \ --wandb.enable=true \ - --policy.repo_id=nepyope/hand_test_policy + --policy.repo_id=/hand_test_policy ``` ### Evaluate @@ -270,7 +270,7 @@ lerobot-record \ --robot.side=right \ --robot.cameras='{"main": {"type": "opencv", "index_or_path": 0, "width": 640, "height": 480, "fps": 30}}' \ --display_data=false \ - --dataset.repo_id=nepyope/eval_hopejr \ + --dataset.repo_id=/eval_hopejr \ --dataset.single_task="Evaluate hopejr hand policy" \ --dataset.num_episodes=10 \ --policy.path=outputs/train/hopejr_hand/checkpoints/last/pretrained_model diff --git a/docs/source/pi0.mdx b/docs/source/pi0.mdx index 93e0b4c88..879bbd16d 100644 --- a/docs/source/pi0.mdx +++ b/docs/source/pi0.mdx @@ -60,7 +60,7 @@ policy.type=pi0 For training π₀, you can use the standard LeRobot training script with the appropriate configuration: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your_dataset \ --policy.type=pi0 \ --output_dir=./outputs/pi0_training \ diff --git a/docs/source/pi05.mdx b/docs/source/pi05.mdx index dbf118aa3..8abaca989 100644 --- a/docs/source/pi05.mdx +++ b/docs/source/pi05.mdx @@ -56,7 +56,7 @@ policy.type=pi05 Here's a complete training command for finetuning the base π₀.₅ model on your own dataset: ```bash -python src/lerobot/scripts/lerobot_train.py\ +lerobot-train \ --dataset.repo_id=your_dataset \ --policy.type=pi05 \ --output_dir=./outputs/pi05_training \ diff --git a/docs/source/sarm.mdx b/docs/source/sarm.mdx index 65e49792b..cd488fe1f 100644 --- a/docs/source/sarm.mdx +++ b/docs/source/sarm.mdx @@ -269,7 +269,7 @@ This generates visualizations showing video frames with subtask boundaries overl Train with **no annotations** - uses linear progress from 0 to 1: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=sarm \ --policy.annotation_mode=single_stage \ @@ -288,7 +288,7 @@ python src/lerobot/scripts/lerobot_train.py \ Train with **dense annotations only** (sparse auto-generated): ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=sarm \ --policy.annotation_mode=dense_only \ @@ -307,7 +307,7 @@ python src/lerobot/scripts/lerobot_train.py \ Train with **both sparse and dense annotations**: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=sarm \ --policy.annotation_mode=dual \ @@ -468,7 +468,7 @@ This script: Once you have the progress file, train your policy with RA-BC weighting. The progress file is auto-detected from the dataset path (`sarm_progress.parquet`). Currently PI0, PI0.5 and SmolVLA are supported with RA-BC: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=pi0 \ --use_rabc=true \ diff --git a/docs/source/unitree_g1.mdx b/docs/source/unitree_g1.mdx index ea6bf54ad..4c5d28924 100644 --- a/docs/source/unitree_g1.mdx +++ b/docs/source/unitree_g1.mdx @@ -216,7 +216,7 @@ lerobot-teleoperate \ ### Record Dataset in Simulation ```bash -python -m lerobot.scripts.lerobot_record \ +lerobot-record \ --robot.type=unitree_g1 \ --robot.is_simulation=true \ --robot.cameras='{"global_view": {"type": "zmq", "server_address": "localhost", "port": 5555, "camera_name": "head_camera", "width": 640, "height": 480, "fps": 30}}' \ @@ -266,7 +266,7 @@ lerobot-teleoperate \ ### Record Dataset on Real Robot ```bash -python -m lerobot.scripts.lerobot_record \ +lerobot-record \ --robot.type=unitree_g1 \ --robot.is_simulation=false \ --robot.cameras='{"global_view": {"type": "zmq", "server_address": "172.18.129.215", "port": 5555, "camera_name": "head_camera", "width": 640, "height": 480, "fps": 30}}' \ diff --git a/docs/source/walloss.mdx b/docs/source/walloss.mdx index c0756c087..e9785cc93 100644 --- a/docs/source/walloss.mdx +++ b/docs/source/walloss.mdx @@ -45,7 +45,7 @@ policy.type=wall_x For training WallX, you can use the standard LeRobot training script with the appropriate configuration: ```bash -python src/lerobot/scripts/lerobot_train.py \ +lerobot-train \ --dataset.repo_id=your_dataset \ --policy.type=wall_x \ --output_dir=./outputs/wallx_training \ diff --git a/docs/source/xvla.mdx b/docs/source/xvla.mdx index dd7d1ef57..97e04d4ec 100644 --- a/docs/source/xvla.mdx +++ b/docs/source/xvla.mdx @@ -154,7 +154,7 @@ lerobot-train \ ```bash lerobot-train \ - --dataset.repo_id=pepijn223/bimanual-so100-handover-cube \ + --dataset.repo_id=/bimanual-so100-handover-cube \ --output_dir=./outputs/xvla_bimanual \ --job_name=xvla_so101_training \ --policy.path="lerobot/xvla-base" \ diff --git a/examples/backward_compatibility/replay.py b/examples/backward_compatibility/replay.py index 8de5ba197..f7c47bec5 100644 --- a/examples/backward_compatibility/replay.py +++ b/examples/backward_compatibility/replay.py @@ -22,7 +22,7 @@ lerobot-replay \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=black \ - --dataset.repo_id=aliberts/record-test \ + --dataset.repo_id=/record-test \ --dataset.episode=2 ``` """ diff --git a/examples/rtc/eval_dataset.py b/examples/rtc/eval_dataset.py index 4652df107..613fd67d7 100644 --- a/examples/rtc/eval_dataset.py +++ b/examples/rtc/eval_dataset.py @@ -27,8 +27,8 @@ measuring consistency and ground truth alignment. Usage: # Basic usage with smolvla policy uv run python examples/rtc/eval_dataset.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ - --dataset.repo_id=helper2424/check_rtc \ + --policy.path=/smolvla_check_rtc_last3 \ + --dataset.repo_id=/check_rtc \ --rtc.execution_horizon=8 \ --device=mps \ --rtc.max_guidance_weight=10.0 \ @@ -58,16 +58,16 @@ Usage: --device=cuda uv run python examples/rtc/eval_dataset.py \ - --policy.path=lipsop/reuben_pi0 \ - --dataset.repo_id=ReubenLim/so101_cube_in_cup \ + --policy.path=/reuben_pi0 \ + --dataset.repo_id=/so101_cube_in_cup \ --rtc.execution_horizon=8 \ --device=cuda # With torch.compile for faster inference (PyTorch 2.0+) # Note: CUDA graphs disabled by default due to in-place ops in denoising loop uv run python examples/rtc/eval_dataset.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ - --dataset.repo_id=helper2424/check_rtc \ + --policy.path=/smolvla_check_rtc_last3 \ + --dataset.repo_id=/check_rtc \ --rtc.execution_horizon=8 \ --device=mps \ --use_torch_compile=true \ @@ -75,8 +75,8 @@ Usage: # With torch.compile on CUDA (CUDA graphs disabled by default) uv run python examples/rtc/eval_dataset.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ - --dataset.repo_id=helper2424/check_rtc \ + --policy.path=/smolvla_check_rtc_last3 \ + --dataset.repo_id=/check_rtc \ --rtc.execution_horizon=8 \ --device=cuda \ --use_torch_compile=true \ @@ -84,8 +84,8 @@ Usage: # Enable CUDA graphs (advanced - may cause tensor aliasing errors) uv run python examples/rtc/eval_dataset.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ - --dataset.repo_id=helper2424/check_rtc \ + --policy.path=/smolvla_check_rtc_last3 \ + --dataset.repo_id=/check_rtc \ --use_torch_compile=true \ --torch_compile_backend=inductor \ --torch_compile_mode=max-autotune \ diff --git a/examples/rtc/eval_with_real_robot.py b/examples/rtc/eval_with_real_robot.py index 1470899d9..4c803eb7e 100644 --- a/examples/rtc/eval_with_real_robot.py +++ b/examples/rtc/eval_with_real_robot.py @@ -28,7 +28,7 @@ For simulation environments, see eval_with_simulation.py Usage: # Run RTC with Real robot with RTC uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ + --policy.path=/smolvla_check_rtc_last3 \ --policy.device=mps \ --rtc.enabled=true \ --rtc.execution_horizon=20 \ @@ -41,7 +41,7 @@ Usage: # Run RTC with Real robot without RTC uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=helper2424/smolvla_check_rtc_last3 \ + --policy.path=/smolvla_check_rtc_last3 \ --policy.device=mps \ --rtc.enabled=false \ --robot.type=so100_follower \ @@ -53,7 +53,7 @@ Usage: # Run RTC with Real robot with pi0.5 policy uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=helper2424/pi05_check_rtc \ + --policy.path=/pi05_check_rtc \ --policy.device=mps \ --rtc.enabled=true \ --rtc.execution_horizon=20 \ diff --git a/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py b/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py index 74be6bfa4..7be37a1b1 100644 --- a/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py +++ b/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py @@ -529,7 +529,7 @@ if __name__ == "__main__": type=str, required=True, help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset " - "(e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).", + "(e.g. `lerobot/pusht`, `/aloha_sim_insertion_human`).", ) parser.add_argument( "--branch", diff --git a/src/lerobot/policies/sarm/compute_rabc_weights.py b/src/lerobot/policies/sarm/compute_rabc_weights.py index 5b6ea6e9b..485c1096b 100644 --- a/src/lerobot/policies/sarm/compute_rabc_weights.py +++ b/src/lerobot/policies/sarm/compute_rabc_weights.py @@ -27,18 +27,18 @@ Usage: # Full RA-BC computation with visualizations python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 + --reward-model-path /sarm_single_uni4 # Faster computation with stride (compute every 5 frames, interpolate the rest) python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 \\ + --reward-model-path /sarm_single_uni4 \\ --stride 5 # Visualize predictions only (no RA-BC computation) python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 \\ + --reward-model-path /sarm_single_uni4 \\ --visualize-only \\ --num-visualizations 5 @@ -714,12 +714,12 @@ Examples: # Full RA-BC computation with visualizations python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 + --reward-model-path /sarm_single_uni4 # Visualize predictions only (no RA-BC computation) python src/lerobot/policies/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ - --reward-model-path pepijn223/sarm_single_uni4 \\ + --reward-model-path /sarm_single_uni4 \\ --visualize-only \\ --num-visualizations 10 """, diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py index 60b968a42..10544a949 100644 --- a/src/lerobot/policies/smolvla/modeling_smolvla.py +++ b/src/lerobot/policies/smolvla/modeling_smolvla.py @@ -30,7 +30,7 @@ Example of finetuning the smolvla pretrained model (`smolvla_base`): ```bash lerobot-train \ --policy.path=lerobot/smolvla_base \ ---dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \ +--dataset.repo_id=/svla_so100_task1_v3 \ --batch_size=64 \ --steps=200000 ``` @@ -40,7 +40,7 @@ and an action expert. ```bash lerobot-train \ --policy.type=smolvla \ ---dataset.repo_id=danaaubakirova/svla_so100_task1_v3 \ +--dataset.repo_id=/svla_so100_task1_v3 \ --batch_size=64 \ --steps=200000 ``` diff --git a/src/lerobot/scripts/lerobot_edit_dataset.py b/src/lerobot/scripts/lerobot_edit_dataset.py index 06e256fa2..afdc95efd 100644 --- a/src/lerobot/scripts/lerobot_edit_dataset.py +++ b/src/lerobot/scripts/lerobot_edit_dataset.py @@ -24,100 +24,100 @@ When new_repo_id is specified, creates a new dataset. Usage Examples: Delete episodes 0, 2, and 5 from a dataset: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type delete_episodes \ --operation.episode_indices "[0, 2, 5]" Delete episodes and save to a new dataset: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --new_repo_id lerobot/pusht_filtered \ --operation.type delete_episodes \ --operation.episode_indices "[0, 2, 5]" Split dataset by fractions: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type split \ --operation.splits '{"train": 0.8, "val": 0.2}' Split dataset by episode indices: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type split \ --operation.splits '{"train": [0, 1, 2, 3], "val": [4, 5]}' Split into more than two splits: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type split \ --operation.splits '{"train": 0.6, "val": 0.2, "test": 0.2}' Merge multiple datasets: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_merged \ --operation.type merge \ --operation.repo_ids "['lerobot/pusht_train', 'lerobot/pusht_val']" Remove camera feature: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type remove_feature \ --operation.feature_names "['observation.images.top']" Modify tasks - set a single task for all episodes (WARNING: modifies in-place): - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type modify_tasks \ --operation.new_task "Pick up the cube and place it" Modify tasks - set different tasks for specific episodes (WARNING: modifies in-place): - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type modify_tasks \ --operation.episode_tasks '{"0": "Task A", "1": "Task B", "2": "Task A"}' Modify tasks - set default task with overrides for specific episodes (WARNING: modifies in-place): - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type modify_tasks \ --operation.new_task "Default task" \ --operation.episode_tasks '{"5": "Special task for episode 5"}' Convert image dataset to video format and save locally: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --operation.type convert_image_to_video \ --operation.output_dir /path/to/output/pusht_video Convert image dataset to video format and save with new repo_id: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --new_repo_id lerobot/pusht_video \ --operation.type convert_image_to_video Convert image dataset to video format and push to hub: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --new_repo_id lerobot/pusht_video \ --operation.type convert_image_to_video \ --push_to_hub true Show dataset information: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --operation.type info \ --operation.show_features true Show dataset information without feature details: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --repo_id lerobot/pusht_image \ --operation.type info \ --operation.show_features false Using JSON config file: - python -m lerobot.scripts.lerobot_edit_dataset \ + lerobot-edit-dataset \ --config_path path/to/edit_config.json """ diff --git a/src/lerobot/scripts/lerobot_replay.py b/src/lerobot/scripts/lerobot_replay.py index c9a559d07..8e2a394b9 100644 --- a/src/lerobot/scripts/lerobot_replay.py +++ b/src/lerobot/scripts/lerobot_replay.py @@ -22,7 +22,7 @@ lerobot-replay \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=black \ - --dataset.repo_id=aliberts/record-test \ + --dataset.repo_id=/record-test \ --dataset.episode=0 ```