diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 43e2442d3..e6ace444a 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,11 +2,6 @@ Short, imperative summary (e.g., "fix(robots): handle None in sensor parser"). See [CONTRIBUTING.md](../CONTRIBUTING.md) for PR conventions. -## Type / Scope - -- **Type**: (Bug | Feature | Docs | Performance | Test | CI | Chore) -- **Scope**: (optional — name of module or package affected) - ## Summary / Motivation - One-paragraph description of what changes and why. @@ -19,28 +14,14 @@ Short, imperative summary (e.g., "fix(robots): handle None in sensor parser"). S ## What changed -- Short, concrete bullets of the modifications (files/behaviour). +- Short, concrete bullets explaining the functional changes (how the behavior or output differs now). - Short note if this introduces breaking changes and migration steps. ## How was this tested (or how to run locally) -- Tests added: list new tests or test files. +- Tests added: list new tests or test files. `pytest -q tests/ -k ` - Manual checks / dataset runs performed. -- Instructions for the reviewer - -Example: - -- Ran the relevant tests: - - ```bash - pytest -q tests/ -k - ``` - -- Reproduce with a quick example or CLI (if applicable): - - ```bash - lerobot-train --some.option=true - ``` +- Instructions for the reviewer for reproducing with a quick example or CLI (if applicable) ## Checklist (required before merge) @@ -48,6 +29,7 @@ Example: - [ ] All tests pass locally (`pytest`) - [ ] Documentation updated - [ ] CI is green +- [ ] Community Review: I have reviewed another contributor's open PR and linked it here: # (insert PR number/link) ## Reviewer notes diff --git a/.github/workflows/benchmark_tests.yml b/.github/workflows/benchmark_tests.yml index 79d5614b2..b07c8f8da 100644 --- a/.github/workflows/benchmark_tests.yml +++ b/.github/workflows/benchmark_tests.yml @@ -83,10 +83,13 @@ jobs: cache-binary: false - name: Login to Docker Hub + if: ${{ env.DOCKERHUB_USERNAME != '' }} uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] with: username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} # Build the benchmark-specific image. The Dockerfile separates dep-install # from source-copy, so code-only changes skip the slow uv-sync layer @@ -115,7 +118,7 @@ jobs: bash -c " hf auth login --token \"\$HF_USER_TOKEN\" --add-to-git-credential 2>/dev/null || true lerobot-eval \ - --policy.path=pepijn223/smolvla_libero \ + --policy.path=lerobot/smolvla_libero \ --env.type=libero \ --env.task=libero_spatial \ --eval.batch_size=1 \ @@ -144,7 +147,7 @@ jobs: --artifacts-dir /tmp/libero-artifacts \ --env libero \ --task libero_spatial \ - --policy pepijn223/smolvla_libero + --policy lerobot/smolvla_libero - name: Upload Libero rollout video if: always() @@ -238,10 +241,13 @@ jobs: cache-binary: false - name: Login to Docker Hub + if: ${{ env.DOCKERHUB_USERNAME != '' }} uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] with: username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} - name: Build MetaWorld benchmark image uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] @@ -264,7 +270,7 @@ jobs: bash -c " hf auth login --token \"\$HF_USER_TOKEN\" --add-to-git-credential 2>/dev/null || true lerobot-eval \ - --policy.path=pepijn223/smolvla_metaworld \ + --policy.path=lerobot/smolvla_metaworld \ --env.type=metaworld \ --env.task=metaworld-push-v3 \ --eval.batch_size=1 \ @@ -293,7 +299,7 @@ jobs: --artifacts-dir /tmp/metaworld-artifacts \ --env metaworld \ --task metaworld-push-v3 \ - --policy pepijn223/smolvla_metaworld + --policy lerobot/smolvla_metaworld - name: Upload MetaWorld rollout video if: always() @@ -310,3 +316,630 @@ jobs: name: metaworld-metrics path: /tmp/metaworld-artifacts/metrics.json if-no-files-found: warn + + # ── ROBOTWIN 2.0 ────────────────────────────────────────────────────────── + # Isolated image: full RoboTwin 2.0 stack — SAPIEN, mplib, CuRobo, + # pytorch3d, + simulation assets (~4 GB). + # Build takes ~20 min on first run; subsequent runs hit the layer cache. + # Requires an NVIDIA GPU runner with CUDA 12.1 drivers. + robotwin-integration-test: + name: RoboTwin 2.0 — build image + 1-episode eval + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_USER_TOKEN: ${{ secrets.LEROBOT_HF_USER }} + ROBOTWIN_POLICY: lerobot/smolvla_robotwin + ROBOTWIN_TASKS: beat_block_hammer,click_bell,handover_block,stack_blocks_two,click_alarmclock,open_microwave,adjust_bottle,lift_pot,stamp_seal,turn_switch + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + lfs: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + + - name: Login to Docker Hub + if: ${{ env.DOCKERHUB_USERNAME != '' }} + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + + # Build the full-install image: SAPIEN, mplib, CuRobo, pytorch3d + + # simulation assets (~4 GB). Layer cache lives in the runner's local + # Docker daemon — reused across re-runs on the same machine. + - name: Build RoboTwin 2.0 benchmark image + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: docker/Dockerfile.benchmark.robotwin + push: false + load: true + tags: lerobot-benchmark-robotwin:ci + cache-from: type=local,src=/tmp/.buildx-cache-robotwin + cache-to: type=local,dest=/tmp/.buildx-cache-robotwin,mode=max + + - name: Run RoboTwin 2.0 smoke eval (10 tasks, 1 episode each) + if: env.HF_USER_TOKEN != '' + run: | + # Named container (no --rm) so we can docker cp artifacts out. + docker run --name robotwin-eval --gpus all \ + --shm-size=4g \ + -e HF_HOME=/tmp/hf \ + -e HF_USER_TOKEN="${HF_USER_TOKEN}" \ + -e ROBOTWIN_POLICY="${ROBOTWIN_POLICY}" \ + -e ROBOTWIN_TASKS="${ROBOTWIN_TASKS}" \ + lerobot-benchmark-robotwin:ci \ + bash -c " + hf auth login --token \"\$HF_USER_TOKEN\" --add-to-git-credential 2>/dev/null || true + cd /opt/robotwin && lerobot-eval \ + --policy.path=\"\$ROBOTWIN_POLICY\" \ + --env.type=robotwin \ + --env.task=\"\$ROBOTWIN_TASKS\" \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={\"observation.images.head_camera\": \"observation.images.camera1\", \"observation.images.left_camera\": \"observation.images.camera2\", \"observation.images.right_camera\": \"observation.images.camera3\"}' \ + --output_dir=/tmp/eval-artifacts + python /lerobot/scripts/ci/extract_task_descriptions.py \ + --env robotwin \ + --task \"\$ROBOTWIN_TASKS\" \ + --output /tmp/eval-artifacts/task_descriptions.json + " + + - name: Copy RoboTwin artifacts from container + if: always() + run: | + mkdir -p /tmp/robotwin-artifacts + docker cp robotwin-eval:/tmp/eval-artifacts/. /tmp/robotwin-artifacts/ 2>/dev/null || true + docker rm -f robotwin-eval || true + + - name: Parse RoboTwin eval metrics + if: always() + run: | + python3 scripts/ci/parse_eval_metrics.py \ + --artifacts-dir /tmp/robotwin-artifacts \ + --env robotwin \ + --task "${ROBOTWIN_TASKS}" \ + --policy "${ROBOTWIN_POLICY}" + + - name: Upload RoboTwin rollout video + if: always() + uses: actions/upload-artifact@v4 + with: + name: robotwin-rollout-video + path: /tmp/robotwin-artifacts/videos/ + if-no-files-found: warn + + - name: Upload RoboTwin eval metrics + if: always() + uses: actions/upload-artifact@v4 + with: + name: robotwin-metrics + path: /tmp/robotwin-artifacts/metrics.json + if-no-files-found: warn + + # ── ROBOCASA365 ────────────────────────────────────────────────────────── + # Isolated image: robocasa + robosuite installed manually as editable + # clones (no `lerobot[robocasa]` extra — robocasa's setup.py pins + # `lerobot==0.3.3`, which would shadow this repo's lerobot). + robocasa-integration-test: + name: RoboCasa365 — build image + 1-episode eval + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_USER_TOKEN: ${{ secrets.LEROBOT_HF_USER }} + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + lfs: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + + - name: Login to Docker Hub + if: ${{ env.DOCKERHUB_USERNAME != '' }} + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + + - name: Build RoboCasa365 benchmark image + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: docker/Dockerfile.benchmark.robocasa + push: false + load: true + tags: lerobot-benchmark-robocasa:ci + + - name: Run RoboCasa365 smoke eval (10 atomic tasks, 1 episode each) + if: env.HF_USER_TOKEN != '' + run: | + docker run --name robocasa-eval --gpus all \ + --shm-size=4g \ + -e HF_HOME=/tmp/hf \ + -e HF_USER_TOKEN="${HF_USER_TOKEN}" \ + -e HF_HUB_DOWNLOAD_TIMEOUT=300 \ + -e MUJOCO_GL=egl \ + lerobot-benchmark-robocasa:ci \ + bash -c " + hf auth login --token \"\$HF_USER_TOKEN\" --add-to-git-credential 2>/dev/null || true + lerobot-eval \ + --policy.path=lerobot/smolvla_robocasa \ + --env.type=robocasa \ + --env.task=CloseFridge,OpenCabinet,OpenDrawer,TurnOnMicrowave,TurnOffStove,CloseToasterOvenDoor,SlideDishwasherRack,TurnOnSinkFaucet,NavigateKitchen,TurnOnElectricKettle \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={\"observation.images.robot0_agentview_left\": \"observation.images.camera1\", \"observation.images.robot0_eye_in_hand\": \"observation.images.camera2\", \"observation.images.robot0_agentview_right\": \"observation.images.camera3\"}' \ + --output_dir=/tmp/eval-artifacts + python scripts/ci/extract_task_descriptions.py \ + --env robocasa \ + --task CloseFridge,OpenCabinet,OpenDrawer,TurnOnMicrowave,TurnOffStove,CloseToasterOvenDoor,SlideDishwasherRack,TurnOnSinkFaucet,NavigateKitchen,TurnOnElectricKettle \ + --output /tmp/eval-artifacts/task_descriptions.json + " + + - name: Copy RoboCasa365 artifacts from container + if: always() + run: | + mkdir -p /tmp/robocasa-artifacts + docker cp robocasa-eval:/tmp/eval-artifacts/. /tmp/robocasa-artifacts/ 2>/dev/null || true + docker rm -f robocasa-eval || true + + - name: Parse RoboCasa365 eval metrics + if: always() + run: | + python3 scripts/ci/parse_eval_metrics.py \ + --artifacts-dir /tmp/robocasa-artifacts \ + --env robocasa \ + --task atomic_smoke_10 \ + --policy lerobot/smolvla_robocasa + + - name: Upload RoboCasa365 rollout video + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: robocasa-rollout-video + path: /tmp/robocasa-artifacts/videos/ + if-no-files-found: warn + + - name: Upload RoboCasa365 eval metrics + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: robocasa-metrics + path: /tmp/robocasa-artifacts/metrics.json + if-no-files-found: warn + + # ── ROBOCEREBRA ─────────────────────────────────────────────────────────── + # Reuses the LIBERO simulator (libero_10 suite) with RoboCerebra camera + # defaults (image/wrist_image). The image is layered on + # huggingface/lerobot-gpu, which already ships [libero] as part of [all]. + robocerebra-integration-test: + name: RoboCerebra — build image + 1-episode eval + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_USER_TOKEN: ${{ secrets.LEROBOT_HF_USER }} + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + lfs: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + + - name: Login to Docker Hub + if: ${{ env.DOCKERHUB_USERNAME != '' }} + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + + - name: Build RoboCerebra benchmark image + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: docker/Dockerfile.benchmark.robocerebra + push: false + load: true + tags: lerobot-benchmark-robocerebra:ci + cache-from: type=local,src=/tmp/.buildx-cache-robocerebra + cache-to: type=local,dest=/tmp/.buildx-cache-robocerebra,mode=max + + - name: Run RoboCerebra smoke eval (1 episode) + if: env.HF_USER_TOKEN != '' + run: | + docker run --name robocerebra-eval --gpus all \ + --shm-size=4g \ + -e HF_HOME=/tmp/hf \ + -e HF_USER_TOKEN="${HF_USER_TOKEN}" \ + -e HF_HUB_DOWNLOAD_TIMEOUT=300 \ + -e LIBERO_DATA_FOLDER=/tmp/libero_data \ + lerobot-benchmark-robocerebra:ci \ + bash -c " + hf auth login --token \"\$HF_USER_TOKEN\" --add-to-git-credential 2>/dev/null || true + lerobot-eval \ + --policy.path=lerobot/smolvla_robocerebra \ + --env.type=libero \ + --env.task=libero_10 \ + --env.fps=20 \ + --env.obs_type=pixels_agent_pos \ + --env.observation_height=256 \ + --env.observation_width=256 \ + '--env.camera_name_mapping={\"agentview_image\": \"image\", \"robot0_eye_in_hand_image\": \"wrist_image\"}' \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={\"observation.images.image\": \"observation.images.camera1\", \"observation.images.wrist_image\": \"observation.images.camera2\"}' \ + --policy.empty_cameras=1 \ + --output_dir=/tmp/eval-artifacts + python scripts/ci/extract_task_descriptions.py \ + --env libero --task libero_10 \ + --output /tmp/eval-artifacts/task_descriptions.json + " + + - name: Copy RoboCerebra artifacts from container + if: always() + run: | + mkdir -p /tmp/robocerebra-artifacts + docker cp robocerebra-eval:/tmp/eval-artifacts/. /tmp/robocerebra-artifacts/ 2>/dev/null || true + docker rm -f robocerebra-eval || true + + - name: Parse RoboCerebra eval metrics + if: always() + run: | + python3 scripts/ci/parse_eval_metrics.py \ + --artifacts-dir /tmp/robocerebra-artifacts \ + --env robocerebra \ + --task libero_10 \ + --policy lerobot/smolvla_robocerebra + + - name: Upload RoboCerebra rollout video + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: robocerebra-rollout-video + path: /tmp/robocerebra-artifacts/videos/ + if-no-files-found: warn + + - name: Upload RoboCerebra eval metrics + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: robocerebra-metrics + path: /tmp/robocerebra-artifacts/metrics.json + if-no-files-found: warn + + # ── ROBOMME ─────────────────────────────────────────────────────────────── + # Isolated image: mani-skill/SAPIEN/Vulkan chain with gymnasium and numpy + # overrides (robomme can't be a pyproject extra due to numpy<2 pin). + robomme-integration-test: + name: RoboMME — build image + 1-episode eval + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_USER_TOKEN: ${{ secrets.LEROBOT_HF_USER }} + ROBOMME_POLICY: lerobot/smolvla_robomme + ROBOMME_TASKS: PickXtimes,BinFill,StopCube,MoveCube,InsertPeg,SwingXtimes,VideoUnmask,ButtonUnmask,PickHighlight,PatternLock + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + lfs: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + + - name: Login to Docker Hub + if: ${{ env.DOCKERHUB_USERNAME != '' }} + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + + - name: Build RoboMME benchmark image + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: docker/Dockerfile.benchmark.robomme + push: false + load: true + tags: lerobot-benchmark-robomme:ci + + - name: Run RoboMME smoke eval (10 tasks, 1 episode each) + if: env.HF_USER_TOKEN != '' + run: | + docker run --name robomme-eval --gpus all \ + --shm-size=4g \ + -e HF_HOME=/tmp/hf \ + -e HF_USER_TOKEN="${HF_USER_TOKEN}" \ + -e HF_HUB_DOWNLOAD_TIMEOUT=300 \ + -e ROBOMME_POLICY="${ROBOMME_POLICY}" \ + -e ROBOMME_TASKS="${ROBOMME_TASKS}" \ + lerobot-benchmark-robomme:ci \ + bash -c " + hf auth login --token \"\$HF_USER_TOKEN\" --add-to-git-credential 2>/dev/null || true + lerobot-eval \ + --policy.path=\"\$ROBOMME_POLICY\" \ + --env.type=robomme \ + --env.task=\"\$ROBOMME_TASKS\" \ + --env.dataset_split=test \ + --env.task_ids=[0] \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={\"observation.images.image\": \"observation.images.camera1\", \"observation.images.wrist_image\": \"observation.images.camera2\"}' \ + --policy.empty_cameras=3 \ + --output_dir=/tmp/eval-artifacts + python scripts/ci/extract_task_descriptions.py \ + --env robomme --task \"\$ROBOMME_TASKS\" \ + --output /tmp/eval-artifacts/task_descriptions.json + " + + - name: Copy RoboMME artifacts from container + if: always() + run: | + mkdir -p /tmp/robomme-artifacts + docker cp robomme-eval:/tmp/eval-artifacts/. /tmp/robomme-artifacts/ 2>/dev/null || true + docker rm -f robomme-eval || true + + - name: Parse RoboMME eval metrics + if: always() + run: | + python3 scripts/ci/parse_eval_metrics.py \ + --artifacts-dir /tmp/robomme-artifacts \ + --env robomme \ + --task "${ROBOMME_TASKS}" \ + --policy "${ROBOMME_POLICY}" + + - name: Upload RoboMME rollout video + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: robomme-rollout-video + path: /tmp/robomme-artifacts/videos/ + if-no-files-found: warn + + - name: Upload RoboMME eval metrics + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: robomme-metrics + path: /tmp/robomme-artifacts/metrics.json + if-no-files-found: warn + + # ── LIBERO-plus ─────────────────────────────────────────────────────────── + # Isolated image: LIBERO-plus fork cloned into /home/user_lerobot on top of + # huggingface/lerobot-gpu (see docker/Dockerfile.benchmark.libero_plus). + libero-plus-integration-test: + name: LIBERO-plus — build image + 1-episode eval + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_USER_TOKEN: ${{ secrets.LEROBOT_HF_USER }} + LIBERO_PLUS_SUITE: libero_spatial + LIBERO_PLUS_POLICY: lerobot/smolvla_libero_plus + LIBERO_PLUS_TASK_IDS: "[0,100,260,500,1000,1500,2000,2400]" + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + lfs: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + + - name: Login to Docker Hub + if: ${{ env.DOCKERHUB_USERNAME != '' }} + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + + - name: Build LIBERO-plus benchmark image + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: docker/Dockerfile.benchmark.libero_plus + push: false + load: true + tags: lerobot-benchmark-libero-plus:ci + cache-from: type=local,src=/tmp/.buildx-cache-libero-plus + cache-to: type=local,dest=/tmp/.buildx-cache-libero-plus,mode=max + + - name: Run LIBERO-plus smoke eval (1 episode) + if: env.HF_USER_TOKEN != '' + run: | + docker run --name libero-plus-eval --gpus all \ + --shm-size=4g \ + -e HF_HOME=/tmp/hf \ + -e HF_USER_TOKEN="${HF_USER_TOKEN}" \ + -e HF_HUB_DOWNLOAD_TIMEOUT=300 \ + -e LIBERO_PLUS_SUITE="${LIBERO_PLUS_SUITE}" \ + -e LIBERO_PLUS_POLICY="${LIBERO_PLUS_POLICY}" \ + -e LIBERO_PLUS_TASK_IDS="${LIBERO_PLUS_TASK_IDS}" \ + lerobot-benchmark-libero-plus:ci \ + bash -c " + hf auth login --token \"\$HF_USER_TOKEN\" --add-to-git-credential 2>/dev/null || true + lerobot-eval \ + --policy.path=\"\$LIBERO_PLUS_POLICY\" \ + --env.type=libero_plus \ + --env.task=\"\$LIBERO_PLUS_SUITE\" \ + --env.task_ids=\"\$LIBERO_PLUS_TASK_IDS\" \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--env.camera_name_mapping={\"agentview_image\": \"camera1\", \"robot0_eye_in_hand_image\": \"camera2\"}' \ + --policy.empty_cameras=1 \ + --output_dir=/tmp/eval-artifacts + python scripts/ci/extract_task_descriptions.py \ + --env libero_plus --task \"\$LIBERO_PLUS_SUITE\" \ + --output /tmp/eval-artifacts/task_descriptions.json + " + + - name: Copy LIBERO-plus artifacts from container + if: always() + run: | + mkdir -p /tmp/libero-plus-artifacts + docker cp libero-plus-eval:/tmp/eval-artifacts/. /tmp/libero-plus-artifacts/ 2>/dev/null || true + docker rm -f libero-plus-eval || true + + - name: Parse LIBERO-plus eval metrics + if: always() + run: | + python3 scripts/ci/parse_eval_metrics.py \ + --artifacts-dir /tmp/libero-plus-artifacts \ + --env libero_plus \ + --task "${LIBERO_PLUS_SUITE}" \ + --policy "${LIBERO_PLUS_POLICY}" + + - name: Upload LIBERO-plus rollout video + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: libero-plus-rollout-video + path: /tmp/libero-plus-artifacts/videos/ + if-no-files-found: warn + + - name: Upload LIBERO-plus eval metrics + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: libero-plus-metrics + path: /tmp/libero-plus-artifacts/metrics.json + if-no-files-found: warn + + # ── VLABENCH ───────────────────────────────────────────────────────────── + # Isolated image: lerobot[vlabench] only (VLABench, mujoco==3.2.2, dm-control chain) + vlabench-integration-test: + name: VLABench — build image + 1-episode eval + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_USER_TOKEN: ${{ secrets.LEROBOT_HF_USER }} + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + lfs: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + + - name: Login to Docker Hub + if: ${{ env.DOCKERHUB_USERNAME != '' }} + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + + - name: Build VLABench benchmark image + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: docker/Dockerfile.benchmark.vlabench + push: false + load: true + tags: lerobot-benchmark-vlabench:ci + build-args: | + VLABENCH_ASSETS_REPO=lerobot/vlabench-assets + + - name: Run VLABench smoke eval (10 tasks, 1 episode each) + if: env.HF_USER_TOKEN != '' + run: | + docker run --name vlabench-eval --gpus all \ + --shm-size=4g \ + -e HF_HOME=/tmp/hf \ + -e HF_USER_TOKEN="${HF_USER_TOKEN}" \ + -e HF_HUB_DOWNLOAD_TIMEOUT=300 \ + -e MUJOCO_GL=egl \ + lerobot-benchmark-vlabench:ci \ + bash -c " + hf auth login --token \"\$HF_USER_TOKEN\" --add-to-git-credential 2>/dev/null || true + lerobot-eval \ + --policy.path=lerobot/smolvla_vlabench \ + --env.type=vlabench \ + --env.task=select_fruit,select_toy,select_book,select_painting,select_drink,select_ingredient,select_billiards,select_poker,add_condiment,insert_flower \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={\"observation.images.image\": \"observation.images.camera1\", \"observation.images.second_image\": \"observation.images.camera2\", \"observation.images.wrist_image\": \"observation.images.camera3\"}' \ + --output_dir=/tmp/eval-artifacts + python scripts/ci/extract_task_descriptions.py \ + --env vlabench \ + --task select_fruit,select_toy,select_book,select_painting,select_drink,select_ingredient,select_billiards,select_poker,add_condiment,insert_flower \ + --output /tmp/eval-artifacts/task_descriptions.json + " + + - name: Copy VLABench artifacts from container + if: always() + run: | + mkdir -p /tmp/vlabench-artifacts + docker cp vlabench-eval:/tmp/eval-artifacts/. /tmp/vlabench-artifacts/ 2>/dev/null || true + docker rm -f vlabench-eval || true + + - name: Parse VLABench eval metrics + if: always() + run: | + python3 scripts/ci/parse_eval_metrics.py \ + --artifacts-dir /tmp/vlabench-artifacts \ + --env vlabench \ + --task select_fruit,select_toy,select_book,select_painting,select_drink,select_ingredient,select_billiards,select_poker,add_condiment,insert_flower \ + --policy lerobot/smolvla_vlabench + + - name: Upload VLABench rollout video + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: vlabench-rollout-video + path: /tmp/vlabench-artifacts/videos/ + if-no-files-found: warn + + - name: Upload VLABench eval metrics + if: always() + uses: actions/upload-artifact@v4 # zizmor: ignore[unpinned-uses] + with: + name: vlabench-metrics + path: /tmp/vlabench-artifacts/metrics.json + if-no-files-found: warn diff --git a/.github/workflows/documentation-upload-pr.yml b/.github/workflows/documentation-upload-pr.yml index 315abec1f..927f70597 100644 --- a/.github/workflows/documentation-upload-pr.yml +++ b/.github/workflows/documentation-upload-pr.yml @@ -33,7 +33,7 @@ jobs: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' && github.repository == 'huggingface/lerobot' - uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@90b4ee2c10b81b5c1a6367c4e6fc9e2fb510a7e3 # main + uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@9ad2de8582b56c017cb530c1165116d40433f1c6 # main with: package_name: lerobot secrets: diff --git a/.github/workflows/latest_deps_tests.yml b/.github/workflows/latest_deps_tests.yml index a291257c5..2a7564d11 100644 --- a/.github/workflows/latest_deps_tests.yml +++ b/.github/workflows/latest_deps_tests.yml @@ -217,6 +217,24 @@ jobs: - name: Run end-to-end tests run: make test-end-to-end + slack-notification: + name: Slack Notification + needs: [cpu-tests, gpu-tests, upgrade-lock] + if: always() && needs.upgrade-lock.outputs.changed == 'true' + runs-on: ubuntu-latest + permissions: + contents: read + env: + CI_SLACK_CHANNEL: ${{ secrets.CI_SLACK_CHANNEL }} + steps: + - name: Post to a Slack channel + uses: huggingface/hf-workflows/.github/actions/post-slack@a88e7fa2eaee28de5a4d6142381b1fb792349b67 # main + with: + slack_channel: ${{ env.CI_SLACK_CHANNEL }} + title: "Results of the latest dependency tests (CPU + GPU)" + status: ${{ (needs.cpu-tests.result == 'success' && needs.gpu-tests.result == 'success') && 'success' || 'failure' }} + slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} + # This job creates or updates a PR with the upgraded lockfile open-pr: name: Open PR diff --git a/AGENTS.md b/AGENTS.md index c1aba7471..bd1bf0af1 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,5 +1,7 @@ This file provides guidance to AI agents when working with code in this repository. +> **User-facing help → [`AGENT_GUIDE.md`](./AGENT_GUIDE.md)** (SO-101 setup, recording, picking a policy, training duration, eval — with copy-pasteable commands). + ## Project Overview LeRobot is a PyTorch-based library for real-world robotics, providing datasets, pretrained policies, and tools for training, evaluation, data collection, and robot control. It integrates with Hugging Face Hub for model/dataset sharing. diff --git a/AGENT_GUIDE.md b/AGENT_GUIDE.md new file mode 100644 index 000000000..725948dc9 --- /dev/null +++ b/AGENT_GUIDE.md @@ -0,0 +1,410 @@ +# AGENT_GUIDE.md — LeRobot Helper for AI Agents & Users + +This file is a practical, copy-paste-friendly companion for any AI agent (Cursor, Claude, ChatGPT, Codex, etc.) helping a user work with LeRobot. It complements [`AGENTS.md`](./AGENTS.md) (dev/contributor context) with **user-facing guidance**: how to start, what to train, how long, how to record, and how to calibrate an SO-101. + +--- + +## 1. Start here — ask the user first (MANDATORY) + +Before suggesting any command, an agent MUST ask the user at least these questions and wait for answers: + +1. **What's your goal?** (e.g. "teach my SO-101 to fold a cloth", "train a policy on an existing HF dataset", "contribute a PR", "understand the codebase") +2. **What hardware do you have?** + - Robot: none / SO-100 / SO-101 / Koch / LeKiwi / Reachy / other + - Teleop: leader arm / phone / keyboard / gamepad / none + - Cameras: how many, resolution, fixed or moving? +3. **What machine will you train on?** + - GPU model + VRAM (e.g. "laptop 3060 6 GB", "RTX 4090 24 GB", "A100 80 GB", "CPU only") + - OS: macOS / Linux / Windows +4. **Skill level & time budget?** First time, some ML, experienced? Hours, days, a weekend? +5. **Do you already have a dataset?** Yes (HF repo id?) / no / want to record one +6. **How can I help right now?** (pick one concrete next step) + +Only after you have answers, propose a concrete path. If something is ambiguous, ask again rather than guessing. Bias toward **the simplest thing that works** for the user's hardware and goal. + +--- + +## 2. LeRobot in 60 seconds + +LeRobot = **datasets + policies + envs + robot control**, unified by a small set of strong abstractions. + +- **`LeRobotDataset`** — episode-aware dataset (video or images + actions + state), loadable from the Hub or disk. +- **Policies** (`ACT`, `Diffusion`, `SmolVLA`, `π0`, `π0.5`, `Wall-X`, `X-VLA`, `VQ-BeT`, `TD-MPC`, …) — all inherit `PreTrainedPolicy` and can be pushed/pulled from the Hub. +- **Processors** — small composable transforms between dataset → policy → robot. +- **Envs** (sim) and **Robots** (real) — same action/observation contract so code swaps cleanly. +- **CLI** — `lerobot-record`, `lerobot-train`, `lerobot-eval`, `lerobot-teleoperate`, `lerobot-calibrate`, `lerobot-find-port`, `lerobot-setup-motors`, `lerobot-replay`. + +See [`AGENTS.md`](./AGENTS.md) for repo architecture. + +--- + +## 3. Quickstart paths (pick one) + +### Path A — "I have an SO-101 and want my first trained policy" + +Go to §4 (SO-101 end-to-end), then §5 (data tips), then §6 (pick a policy — likely **ACT**), then §7 (how long), then §8 (eval). + +### Path B — "No hardware, I want to train on an existing dataset" + +Skip §4. Pick a policy in §6, pick a duration in §7, then run `lerobot-train` per §4.9 with a Hub `--dataset.repo_id` and an `--env.type` for eval. Finish with §8. + +### Path C — "I just want to understand the codebase" + +Read §2 above, then `AGENTS.md` "Architecture", then open `src/lerobot/policies/act/` and `src/lerobot/datasets/lerobot_dataset.py` as canonical examples. + +--- + +## 4. SO-101 end-to-end cheat-sheet + +Full details in [`docs/source/so101.mdx`](./docs/source/so101.mdx) and [`docs/source/il_robots.mdx`](./docs/source/il_robots.mdx). Minimum commands in order. Confirm arms are assembled + powered before issuing. + +**4.1 Install** + +```bash +pip install 'lerobot[feetech]' # SO-100/SO-101 motor stack +# pip install 'lerobot[all]' # everything +# pip install 'lerobot[aloha,pusht]' # specific features +# pip install 'lerobot[smolvla]' # add SmolVLA deps +git lfs install && git lfs pull +hf auth login # required to push datasets/policies +``` + +Contributors can alternatively use `uv sync --locked --extra feetech` (see `AGENTS.md`). + +**4.2 Find USB ports** — run once per arm, unplug when prompted. + +```bash +lerobot-find-port +``` + +macOS: `/dev/tty.usbmodem...`; Linux: `/dev/ttyACM0` (may need `sudo chmod 666 /dev/ttyACM0`). + +**4.3 Setup motor IDs & baudrate** (one-time, per arm) + +```bash +lerobot-setup-motors --robot.type=so101_follower --robot.port= +lerobot-setup-motors --teleop.type=so101_leader --teleop.port= +``` + +**4.4 Calibrate** — center all joints, press Enter, sweep each joint through its full range. The `id` is the calibration key — reuse it everywhere. + +```bash +lerobot-calibrate --robot.type=so101_follower --robot.port= --robot.id=my_follower +lerobot-calibrate --teleop.type=so101_leader --teleop.port= --teleop.id=my_leader +``` + +**4.5 Teleoperate** (sanity check, no recording) + +```bash +lerobot-teleoperate \ + --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --teleop.type=so101_leader --teleop.port= --teleop.id=my_leader \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --display_data=true +``` + +> **Feetech timeout / comms error on SO-100 / SO-101?** Before touching software, check the **red motor LEDs** on the daisy chain. +> +> - **All steady red, gripper → base chain** → wiring OK. +> - **One or more motors dark / chain stops mid-way** → wiring issue: reseat the 3-pin cables, check the controller-board power supply, and make sure each motor is fully clicked in. +> - **LEDs blinking** → the motor is in an **error state**: usually overload (forcing a joint past its limit) **or wrong power supply voltage**. SO-100 / SO-101 ship in two variants — a **5 V / 7.4 V** build and a **12 V** build — they are NOT interchangeable. Using a 12 V PSU on a 5 V / 7.4 V arm (or vice-versa) will trip this error; confirm your motor variant before powering up. +> +> Most "timeout" errors are physical, not code. + +**4.6 Record a dataset** — keys: **→** next, **←** redo, **ESC** finish & upload. + +```bash +HF_USER=$(NO_COLOR=1 hf auth whoami | awk -F': *' 'NR==1 {print $2}') + +lerobot-record \ + --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --teleop.type=so101_leader --teleop.port= --teleop.id=my_leader \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/my_task \ + --dataset.single_task="" \ + --dataset.num_episodes=50 \ + --dataset.episode_time_s=30 \ + --dataset.reset_time_s=10 \ + --display_data=true +``` + +**4.7 Visualize** — **always** do this before training. Look for missing frames, camera blur, unreachable targets, inconsistent object positions. +After upload: https://huggingface.co/spaces/lerobot/visualize_dataset → paste `${HF_USER}/my_task`. Works for **any LeRobot-formatted Hub dataset** — use it to scout other datasets, inspect episode quality, or debug your own data before retraining. + +**4.8 Replay an episode** (sanity check) + +```bash +lerobot-replay --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --dataset.repo_id=${HF_USER}/my_task --dataset.episode=0 +``` + +**4.9 Train** (default: ACT — fastest, lowest memory). Apple silicon: `--policy.device=mps`. See §6/§7 for policy and duration. + +```bash +lerobot-train \ + --dataset.repo_id=${HF_USER}/my_task \ + --policy.type=act \ + --policy.device=cuda \ + --output_dir=outputs/train/act_my_task \ + --job_name=act_my_task \ + --batch_size=8 \ + --wandb.enable=true \ + --policy.repo_id=${HF_USER}/act_my_task +``` + +**4.10 Evaluate on the real robot** — compare success rate to a teleoperated baseline. + +```bash +lerobot-record \ + --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/eval_my_task \ + --dataset.single_task="" \ + --dataset.num_episodes=10 \ + --policy.path=${HF_USER}/act_my_task +``` + +--- + +## 5. Data collection tips (beginner → reliable policy) + +Good data beats clever models. Adopt these defaults and deviate only with evidence. + +### 5.1 Setup & ergonomics + +- **Fix the rig and cameras** before touching the software. If the rig vibrates or the operator gets frustrated, fix that first — more bad data won't help. +- **Lighting matters more than resolution.** Diffuse, consistent light. Avoid moving shadows. +- **"Can you do the task from the camera view alone?"** If no, your cameras are wrong. Fix before recording. +- Enable **action interpolation** for rollouts when available for smoother trajectories. + +### 5.2 Practice before you record + +- Do 5–10 demos without recording. Build a deliberate, repeatable strategy. +- Hesitant or inconsistent demos teach the model hesitation. + +### 5.3 Quality over speed + +Deliberate, high-quality execution beats fast sloppy runs. Optimize for speed only **after** strategy is dialed in — never trade quality for it. + +### 5.4 Consistency within and across episodes + +Same grasp, approach vector, and timing. Coherent strategies are much easier to learn than wildly varying movements. + +### 5.5 Start small, then extend (the golden rule) + +- **First 50 episodes = constrained version** of the task: one object, fixed position, fixed camera setup, one operator. +- Train a quick ACT model. See what fails. +- **Then add diversity** along one axis at a time: more positions → more lighting → more objects → more operators. +- Don't try to collect the "perfect dataset" on day one. Iterate. + +### 5.6 Policy choice for beginners + +- **Laptop / first time / want results fast → ACT.** Works surprisingly well, trains fast even on a laptop GPU. +- **Bigger GPU / language-conditioned / multi-task → SmolVLA.** Unfreezing the vision encoder (see §7) is a big win here. +- Defer π0 / π0.5 / Wall-X / X-VLA until you have a proven ACT baseline and a 20+ GB GPU. + +### 5.7 Recommended defaults for your first task + +| Setting | Value | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| Episodes | **50** to start, scale to 100–300 after first training | +| Episode length | 20–45 s (shorter is fine for grasp/place) | +| Reset time | 10 s | +| FPS | 30 | +| Cameras | **2 cameras recommended**: 1 fixed front + 1 wrist. Multi-view often outperforms single-view. A single fixed camera also works to keep things simple. | +| Task description | Short, specific, action-phrased sentence | + +### 5.8 Troubleshooting signal + +- Policy fails at one specific stage → record 10–20 more episodes **targeting that stage**. +- Policy flaps / oscillates → likely inconsistent demos, or need more training; re-record worst episodes (use **←** to redo). +- Policy ignores the object → camera framing or lighting issue, not a model issue. + +See also: [What makes a good dataset](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset). + +--- + +## 6. Which policy should I train? + +Match the policy to the user's **GPU memory** and **time budget**. Numbers below come from an internal profiling run (one training update per policy). They are **indicative only** — see caveats. + +### 6.1 Profiling snapshot (indicative) + +All policies typically train for **5–10 epochs** (see §7). + +| Policy | Batch | Update (ms) | Peak GPU mem (GB) | Best for | +| ----------- | ----: | ----------: | ----------------: | ------------------------------------------------------------------------------------------------ | +| `act` | 4 | **83.9** | **0.94** | First-time users, laptops, single-task. Fast and reliable. | +| `diffusion` | 4 | 168.6 | 4.94 | Multi-modal action distributions; needs mid-range GPU. | +| `smolvla` | 1 | 357.8 | 3.93 | Language-conditioned, multi-task, small VLA. **Unfreeze vision encoder for big gains** (see §7). | +| `xvla` | 1 | 731.6 | 15.52 | Large VLA, multi-task. | +| `wall_x` | 1 | 716.5 | 15.95 | Large VLA with world-model objective. | +| `pi0` | 1 | 940.3 | 15.50 | Strong large VLA baseline (Physical Intelligence). | +| `pi05` | 1 | 1055.8 | 16.35 | Newer π policy; similar footprint to `pi0`. | + +**Critical caveats:** + +- **Optimizer:** measured with **SGD**. LeRobot's default is **AdamW**, which keeps extra optimizer state → **peak memory will be noticeably higher** with the default, especially for `pi0`, `pi05`, `wall_x`, `xvla`. +- **Batch size:** the large policies were profiled at batch 1. In practice use a **larger batch** for stable training (see §7.4). Memory scales roughly linearly with batch. + +### 6.2 Decision rules + +- **< 8 GB VRAM (laptop, 3060, M-series Mac):** → `act`. Maybe `diffusion` if you have ~6–8 GB free. +- **12–16 GB VRAM (4070/4080, A4000):** → `smolvla` with defaults, or `act`/`diffusion` with larger batch. `pi0`/`pi05`/`wall_x`/`xvla` feasible only with small batch + gradient accumulation. +- **24+ GB VRAM (3090/4090/A5000):** → any policy. Prefer `smolvla` (unfrozen) for multi-task; `act` for single-task grasp-and-place (still often the best ROI). Could experiment with `pi0` or `pi05` or `xvla` +- **80 GB (A100/H100):** → any, with healthy batch. `pi05`, `xvla`, `wall_x` become comfortable. +- **CPU only:** → don't train here. Use Google Colab (see [`docs/source/notebooks.mdx`](./docs/source/notebooks.mdx)) or a rented GPU. + +--- + +## 7. How long should I train? + +Robotics imitation learning usually converges in a **few epochs over the dataset**, not hundreds of thousands of raw steps. Think **epochs first**, then translate to steps. + +### 7.1 Rule of thumb + +- **Typical total: 5–10 epochs.** Start at 5, eval, then decide if more helps. +- Very small datasets (< 30 episodes) may want slightly more epochs — but first, **collect more data**. +- VLAs with a pretrained vision backbone typically need **fewer** epochs than training from scratch. + +### 7.2 Steps ↔ epochs conversion + +``` +total_frames = sum of frames over all episodes # e.g. 50 eps × 30 fps × 30 s ≈ 45,000 +steps_per_epoch = ceil(total_frames / batch_size) +total_steps = epochs × steps_per_epoch +``` + +Examples for `--batch_size=8`: + +| Dataset size | Frames | Steps / epoch | 5 epochs | 10 epochs | +| ----------------------- | ------: | ------------: | -------: | --------: | +| 50 eps × 30 s @ 30 fps | 45,000 | ~5,625 | 28k | 56k | +| 100 eps × 30 s @ 30 fps | 90,000 | ~11,250 | 56k | 113k | +| 300 eps × 30 s @ 30 fps | 270,000 | ~33,750 | 169k | 338k | + +Pass the resulting total with `--steps=`; eval at intermediate checkpoints (`outputs/train/.../checkpoints/`). + +### 7.3 Per-policy starting points (single-task, ~50 episodes) + +| Policy | Batch | Steps (first run) | Notes | +| -------------- | ----: | ----------------: | ----------------------------------------------------------------- | +| `act` | 8–16 | 30k–80k | Usually converges under 50k for single-task. | +| `diffusion` | 8–16 | 80k–150k | Benefits from longer training than ACT. | +| `smolvla` | 4–8 | 30k–80k | Pretrained VLM → converges fast. | +| `pi0` / `pi05` | 1–4 | 30k–80k | Memory-bound; use gradient accumulation for effective batch ≥ 16! | + +### 7.4 Batch size guidance + +- **Bigger batch is preferable** for stable gradients on teleop data. +- If GPU memory is the bottleneck, use **gradient accumulation** to raise _effective_ batch without raising peak memory. +- Scale **learning rate** gently with batch; most LeRobot defaults work fine for a 2–4× batch change. + +### 7.5 Scale LR schedule & checkpoints with `--steps` + +LeRobot's default schedulers (e.g. SmolVLA's cosine decay) use `scheduler_decay_steps=30_000`, which is sized for long training runs. When you shorten training (e.g. 5k–10k steps on a small dataset), **scale the scheduler down to match** — otherwise the LR stays near the peak and never decays. Same for checkpoint frequency. + +```bash +lerobot-train ... \ + --steps=5000 \ + --policy.scheduler_decay_steps=5000 \ + --save_freq=5000 +``` + +Rule of thumb: set `scheduler_decay_steps ≈ steps`, and `save_freq` to whatever granularity you want for eval (e.g. every 1k–5k steps). Match `scheduler_warmup_steps` proportionally if your run is very short. + +### 7.6 SmolVLA: unfreeze the vision encoder for real gains + +SmolVLA ships with `freeze_vision_encoder=True`. Unfreezing usually **improves performance substantially** on specialized tasks, at the cost of more VRAM and slower steps. Enable with: + +```bash +lerobot-train ... --policy.type=smolvla \ + --policy.freeze_vision_encoder=false \ + --policy.train_expert_only=false +``` + +### 7.7 Signals to stop / keep going + +- Train loss plateaus → stop, save a Hub checkpoint. +- Train loss still dropping and you're under 10 epochs → keep going. + +--- + +## 8. Evaluation & benchmarks + +Two flavors of evaluation: + +### 8.1 Real-robot eval (SO-101, etc.) + +Reuse `lerobot-record` with `--policy.path` to run the trained policy on-robot and save the run as an eval dataset. Convention: prefix the dataset with `eval_`. + +```bash +lerobot-record \ + --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/eval_my_task \ + --dataset.single_task="" \ + --dataset.num_episodes=10 \ + --policy.path=${HF_USER}/act_my_task +``` + +Report success rate across episodes. Compare to a teleoperated baseline and to an earlier checkpoint to catch regressions. + +### 8.2 Sim-benchmark eval + +For policies trained on sim datasets (PushT, Aloha, LIBERO, MetaWorld, RoboCasa, …) use `lerobot-eval` against the matching `env.type`: + +```bash +lerobot-eval \ + --policy.path=${HF_USER}/diffusion_pusht \ + --env.type=pusht \ + --eval.n_episodes=50 \ + --eval.batch_size=10 \ + --policy.device=cuda +``` + +- Use `--policy.path=outputs/train/.../checkpoints//pretrained_model` for local checkpoints. +- `--eval.n_episodes` should be ≥ 50 for a stable success-rate estimate. +- Available envs live in `src/lerobot/envs/`. See [`docs/source/libero.mdx`](./docs/source/libero.mdx), [`metaworld.mdx`](./docs/source/metaworld.mdx), [`robocasa.mdx`](./docs/source/robocasa.mdx), [`vlabench.mdx`](./docs/source/vlabench.mdx) for specific benchmarks. +- To add a new benchmark, see [`docs/source/adding_benchmarks.mdx`](./docs/source/adding_benchmarks.mdx) and [`envhub.mdx`](./docs/source/envhub.mdx). + +### 8.2b Dockerfiles for benchmark eval + +Benchmark envs have native dependencies that are painful to install locally. The repo ships **pre-baked Dockerfiles** for each supported benchmark — use these to run `lerobot-eval` in a reproducible environment: + +| Benchmark | Dockerfile | +| ----------- | -------------------------------------------------------------------------------------- | +| LIBERO | [`docker/Dockerfile.benchmark.libero`](./docker/Dockerfile.benchmark.libero) | +| LIBERO+ | [`docker/Dockerfile.benchmark.libero_plus`](./docker/Dockerfile.benchmark.libero_plus) | +| MetaWorld | [`docker/Dockerfile.benchmark.metaworld`](./docker/Dockerfile.benchmark.metaworld) | +| RoboCasa | [`docker/Dockerfile.benchmark.robocasa`](./docker/Dockerfile.benchmark.robocasa) | +| RoboCerebra | [`docker/Dockerfile.benchmark.robocerebra`](./docker/Dockerfile.benchmark.robocerebra) | +| RoboMME | [`docker/Dockerfile.benchmark.robomme`](./docker/Dockerfile.benchmark.robomme) | +| RoboTwin | [`docker/Dockerfile.benchmark.robotwin`](./docker/Dockerfile.benchmark.robotwin) | +| VLABench | [`docker/Dockerfile.benchmark.vlabench`](./docker/Dockerfile.benchmark.vlabench) | + +Build and run (adapt to your benchmark): + +```bash +docker build -f docker/Dockerfile.benchmark.robomme -t lerobot-bench-robomme . +docker run --gpus all --rm -it \ + -v $HOME/.cache/huggingface:/root/.cache/huggingface \ + lerobot-bench-robomme \ + lerobot-eval --policy.path= --env.type= --eval.n_episodes=50 +``` + +See [`docker/README.md`](./docker/README.md) for base-image details. + +### 8.3 Target success rates + +Single-task grasp-and-place with 50 clean episodes: ACT should reach **> 70% success** on the training configuration. Less → data problem (see §5), not model problem. Expect a drop when generalizing to new positions — scale episodes or diversity to recover. + +--- + +## 9. Further reading & resources + +- **Getting started:** [`installation.mdx`](./docs/source/installation.mdx) · [`il_robots.mdx`](./docs/source/il_robots.mdx) · [What makes a good dataset](https://huggingface.co/blog/lerobot-datasets) +- **Per-policy docs:** browse [`docs/source/*.mdx`](./docs/source/) (policies, hardware, benchmarks, advanced training). +- **Community:** [Discord](https://discord.com/invite/s3KuuzsPFb) · [Hub `LeRobot` tag](https://huggingface.co/datasets?other=LeRobot) · [Dataset visualizer](https://huggingface.co/spaces/lerobot/visualize_dataset) + +> Keep this file current. If you learn a rule that would prevent a class of user mistakes, add it here and in [`AGENTS.md`](./AGENTS.md). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 60df93b27..315be3b5d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -78,6 +78,9 @@ Use the templates for required fields and examples. - **Issues:** Follow the [ticket template](https://github.com/huggingface/lerobot/blob/main/.github/ISSUE_TEMPLATE/bug-report.yml). - **Pull requests:** Rebase on `upstream/main`, use a descriptive branch (don't work on `main`), run `pre-commit` and tests locally, and follow the [PR template](https://github.com/huggingface/lerobot/blob/main/.github/PULL_REQUEST_TEMPLATE.md). -One member of the LeRobot team will then review your contribution. +> [!IMPORTANT] +> Community Review Policy: To help scale our efforts and foster a collaborative environment, we ask contributors to review at least one other person's open PR before their own receives attention. This shared responsibility multiplies our review capacity and helps everyone's code get merged faster! + +Once you have submitted your PR and completed a peer review, a member of the LeRobot team will review your contribution. Thank you for contributing to LeRobot! diff --git a/docker/Dockerfile.benchmark.libero_plus b/docker/Dockerfile.benchmark.libero_plus new file mode 100644 index 000000000..5911329a4 --- /dev/null +++ b/docker/Dockerfile.benchmark.libero_plus @@ -0,0 +1,84 @@ +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Benchmark image for LIBERO-plus integration tests. +# Extends the nightly GPU image (which has lerobot[all]) with the LIBERO-plus +# fork source + its 6.4 GB perturbation assets. +# +# Build: docker build -f docker/Dockerfile.benchmark.libero_plus -t lerobot-benchmark-libero-plus . +# Run: docker run --gpus all --rm lerobot-benchmark-libero-plus lerobot-eval ... + +FROM huggingface/lerobot-gpu:latest +ENV MUJOCO_GL=egl + +# unzip for the 6.4 GB assets.zip; the rest are LIBERO-plus build-time extras +# (wand / ImageMagick / fontconfig) not in the nightly base. +USER root +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + unzip libexpat1 libfontconfig1-dev libmagickwand-dev \ + && apt-get clean && rm -rf /var/lib/apt/lists/* +USER user_lerobot + +# robosuite==1.4.1 is mandatory (the fork uses `single_arm_env` removed in +# v1.5+). The rest are LIBERO-plus runtime deps pulled from its setup.py. +# We install these explicitly instead of via the [libero_plus] extra because +# the extra's `libero @ git+...` dep installs as a namespace package and then +# clone and PYTHONPATH-override it below. +RUN uv pip install --no-cache \ + "robosuite==1.4.1" \ + "bddl==1.0.1" \ + "easydict==1.13" \ + "mujoco==3.7.0" \ + "matplotlib==3.10.8" \ + "Wand==0.6.13" \ + "scikit-image==0.25.2" \ + "gym==0.26.2" + +# Clone LIBERO-plus and make it importable as `libero`. The nightly base has +# hf-libero (10 tasks) preinstalled via lerobot[libero]; uninstall it so +# Python resolves `import libero` to the 2402-task LIBERO-plus module instead. +# Pinned to the current upstream main SHA so benchmark builds stay reproducible. +ARG LIBERO_PLUS_SHA=4976dc3 +ENV LIBERO_PLUS_ROOT=/home/user_lerobot/libero-plus/libero/libero +RUN git clone https://github.com/sylvestf/LIBERO-plus.git /home/user_lerobot/libero-plus \ + && git -C /home/user_lerobot/libero-plus checkout ${LIBERO_PLUS_SHA} \ + && cd /home/user_lerobot/libero-plus && uv pip install --no-cache --no-deps -e "." \ + && (uv pip uninstall hf-libero 2>/dev/null || true) +ENV PYTHONPATH="/home/user_lerobot/libero-plus:${PYTHONPATH}" + +# Perturbation textures/scenes: bddl_base_domain.py resolves XMLs via +# DIR_PATH/../assets (package-relative, ignoring ~/.libero/config.yaml). All +# 2402 tasks reference files that ship only in Sylvest/LIBERO-plus's +# assets.zip (6.4 GB) under a deep author-internal prefix — extract and +# flatten it under ${LIBERO_PLUS_ROOT}/assets. +RUN python -c "\ +from huggingface_hub import hf_hub_download; \ +hf_hub_download(repo_id='Sylvest/LIBERO-plus', repo_type='dataset', \ + filename='assets.zip', local_dir='/tmp/libero-plus-dl')" \ + && unzip -q /tmp/libero-plus-dl/assets.zip -d /tmp/libero-plus-dl/extract \ + && ASSETS_DIR=$(find /tmp/libero-plus-dl/extract -type d -name assets | head -1) \ + && mv "${ASSETS_DIR}" ${LIBERO_PLUS_ROOT}/assets \ + && rm -rf /tmp/libero-plus-dl + +# Point ~/.libero/config.yaml at the clone so LIBERO-plus's imports are +# non-interactive (it calls input() when the config is missing). +RUN mkdir -p /home/user_lerobot/.libero \ + && printf "assets: ${LIBERO_PLUS_ROOT}/assets\nbddl_files: ${LIBERO_PLUS_ROOT}/bddl_files\ndatasets: ${LIBERO_PLUS_ROOT}/../datasets\ninit_states: ${LIBERO_PLUS_ROOT}/init_files\n" \ + > /home/user_lerobot/.libero/config.yaml + +# Overlay the PR's source code on top of the nightly image. +COPY --chown=user_lerobot:user_lerobot . . + +CMD ["/bin/bash"] diff --git a/docker/Dockerfile.benchmark.robocasa b/docker/Dockerfile.benchmark.robocasa new file mode 100644 index 000000000..9de1612cb --- /dev/null +++ b/docker/Dockerfile.benchmark.robocasa @@ -0,0 +1,71 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Benchmark image for RoboCasa365 integration tests. +# Extends the nightly GPU image (which already has all extras installed) +# with the PR's source code and RoboCasa-specific asset setup. +# +# Build: docker build -f docker/Dockerfile.benchmark.robocasa -t lerobot-benchmark-robocasa . +# Run: docker run --gpus all --rm lerobot-benchmark-robocasa lerobot-eval ... + +FROM huggingface/lerobot-gpu:latest + +# Install robocasa + robosuite as editable clones. pip-installing from git +# omits data files like robocasa/models/assets/box_links/box_links_assets.json +# (not declared in package_data), which download_kitchen_assets needs at import. +# +# `--no-deps` on robocasa is deliberate: its setup.py pins `lerobot==0.3.3` +# in install_requires, which would shadow the editable lerobot baked into +# this image. We install robocasa's actual runtime deps explicitly instead. +# Pinned SHAs for reproducible benchmark runs. Bump when you need an +# upstream fix; don't rely on `main`/`master` drift. +ARG ROBOCASA_SHA=56e355ccc64389dfc1b8a61a33b9127b975ba681 +ARG ROBOSUITE_SHA=aaa8b9b214ce8e77e82926d677b4d61d55e577ab +RUN git clone https://github.com/robocasa/robocasa.git ~/robocasa && \ + git -C ~/robocasa checkout ${ROBOCASA_SHA} && \ + git clone https://github.com/ARISE-Initiative/robosuite.git ~/robosuite && \ + git -C ~/robosuite checkout ${ROBOSUITE_SHA} && \ + uv pip install --no-cache -e ~/robocasa --no-deps && \ + uv pip install --no-cache -e ~/robosuite && \ + uv pip install --no-cache \ + "numpy==2.2.5" "numba==0.61.2" "scipy==1.15.3" "mujoco==3.3.1" \ + "pygame==2.6.1" "Pillow==12.2.0" "opencv-python==4.13.0.92" \ + "pyyaml==6.0.3" "pynput==1.8.1" "tqdm==4.67.3" "termcolor==3.3.0" \ + "imageio==2.37.3" "h5py==3.16.0" "lxml==6.0.4" "hidapi==0.14.0.post4" \ + "tianshou==0.4.10" "gymnasium==1.2.3" + +# Set up robocasa macros and download kitchen assets. We need: +# - tex : base environment textures +# - tex_generative : AI-generated textures; kitchen fixture XMLs embed +# refs to generative_textures/wall/tex*.png +# unconditionally, so MjModel.from_xml_string fails +# at reset time without them (even if the env is +# constructed with generative_textures=None). +# - fixtures_lw : lightwheel kitchen fixtures (fridge, counters...) +# - objs_lw : lightwheel object meshes (stools, misc props) +# We skip the objaverse/aigen object packs (~30GB combined) by pairing +# this with --env.obj_registries=["lightwheel"] on the lerobot side. +# The download script prompts interactively, so pipe 'y' to auto-accept. +RUN python -m robocasa.scripts.setup_macros && \ + yes y | python -m robocasa.scripts.download_kitchen_assets \ + --type tex tex_generative fixtures_lw objs_lw + +# Overlay the PR's source code on top of the nightly image. +COPY --chown=user_lerobot:user_lerobot . . + +# Re-install lerobot editably so the new source (with RoboCasaEnv registration) +# replaces the stale package baked into the nightly image. +RUN uv pip install --no-cache --no-deps -e . + +CMD ["/bin/bash"] diff --git a/docker/Dockerfile.benchmark.robocerebra b/docker/Dockerfile.benchmark.robocerebra new file mode 100644 index 000000000..9378bd66a --- /dev/null +++ b/docker/Dockerfile.benchmark.robocerebra @@ -0,0 +1,43 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Benchmark image for RoboCerebra integration tests. +# RoboCerebra reuses LIBERO's simulator (libero_10 suite) with a different +# rename_map, so this image is identical to the LIBERO benchmark image — +# extends the nightly GPU base with LIBERO assets + the PR's source code. +# +# Build: docker build -f docker/Dockerfile.benchmark.robocerebra -t lerobot-benchmark-robocerebra . +# Run: docker run --gpus all --rm lerobot-benchmark-robocerebra lerobot-eval ... + +FROM huggingface/lerobot-gpu:latest + +# Pre-download lerobot/libero-assets from HF Hub so nothing is fetched at +# runtime (which times out on CI). Point the libero config at the cached path. +# libero/libero/__init__.py calls input() when ~/.libero/config.yaml is missing, +# so we write the config before any libero import can happen. +RUN LIBERO_DIR=$(python -c \ + "import importlib.util, os; s=importlib.util.find_spec('libero'); \ + print(os.path.join(os.path.dirname(s.origin), 'libero'))") && \ + mkdir -p /home/user_lerobot/.libero && \ + python -c "\ +from huggingface_hub import snapshot_download; \ +snapshot_download(repo_id='lerobot/libero-assets', repo_type='dataset', \ + local_dir='/home/user_lerobot/.libero/assets')" && \ + printf "assets: /home/user_lerobot/.libero/assets\nbddl_files: ${LIBERO_DIR}/bddl_files\ndatasets: ${LIBERO_DIR}/../datasets\ninit_states: ${LIBERO_DIR}/init_files\n" \ + > /home/user_lerobot/.libero/config.yaml + +# Overlay the PR's source code on top of the nightly image. +COPY --chown=user_lerobot:user_lerobot . . + +CMD ["/bin/bash"] diff --git a/docker/Dockerfile.benchmark.robomme b/docker/Dockerfile.benchmark.robomme new file mode 100644 index 000000000..2bfc83b4f --- /dev/null +++ b/docker/Dockerfile.benchmark.robomme @@ -0,0 +1,56 @@ +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Benchmark image for RoboMME integration tests. +# Extends the nightly GPU image (which has lerobot[all]) with Vulkan system +# libs for ManiSkill/SAPIEN and the robomme extra. robomme isn't in [all] +# because mani-skill hard-pins gymnasium==0.29.1 and numpy<2.0.0 which +# conflict with lerobot's defaults; both are safe at runtime: +# - gymnasium 0.29.x has the same 5-tuple step() API as 1.x (since 0.26) +# - numpy 1.26.4 is API-compatible with lerobot's actual usage. +# +# Build: docker build -f docker/Dockerfile.benchmark.robomme -t lerobot-benchmark-robomme . +# Run: docker run --gpus all --rm lerobot-benchmark-robomme lerobot-eval ... + +FROM huggingface/lerobot-gpu:latest + +# NVIDIA Container Toolkit: expose Vulkan driver capability for headless rendering. +ENV NVIDIA_DRIVER_CAPABILITIES=all \ + VK_ICD_FILENAMES=/usr/share/vulkan/icd.d/nvidia_icd.json + +# ManiSkill/SAPIEN's renderer needs Vulkan, which isn't in the base image. +USER root +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + libvulkan1 libvulkan-dev mesa-vulkan-drivers \ + && mkdir -p /usr/share/vulkan/icd.d \ + && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3.0"}}' \ + > /usr/share/vulkan/icd.d/nvidia_icd.json \ + && apt-get clean && rm -rf /var/lib/apt/lists/* +USER user_lerobot + +# Install smolvla + av-dep via the PR's pyproject, then layer robomme on top +# with gymnasium/numpy overrides. robomme isn't a pyproject extra because its +# mani-skill pin conflicts with lerobot's base numpy>=2 (see pyproject.toml). +COPY --chown=user_lerobot:user_lerobot setup.py pyproject.toml uv.lock README.md MANIFEST.in ./ +RUN printf 'gymnasium==0.29.1\nnumpy==1.26.4\n' > /tmp/robomme_override.txt \ + && uv pip install --no-cache --override /tmp/robomme_override.txt \ + -e ".[smolvla,av-dep]" \ + "robomme @ git+https://github.com/RoboMME/robomme_benchmark.git@main" \ + && python -c "import robomme; print('robomme import OK')" + +# Overlay the PR's source code on top of the nightly image. +COPY --chown=user_lerobot:user_lerobot . . + +CMD ["/bin/bash"] diff --git a/docker/Dockerfile.benchmark.robotwin b/docker/Dockerfile.benchmark.robotwin new file mode 100644 index 000000000..57ee21f4b --- /dev/null +++ b/docker/Dockerfile.benchmark.robotwin @@ -0,0 +1,138 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Benchmark image for RoboTwin 2.0 integration tests. +# Extends the nightly GPU image with the RoboTwin simulator stack: +# sapien/mplib/pytorch3d + NVlabs CuRobo + embodiments.zip + objects.zip +# (~3.96 GB of assets; background_texture.zip ~11 GB skipped for smoke eval). +# +# Build: docker build -f docker/Dockerfile.benchmark.robotwin -t lerobot-benchmark-robotwin . +# Run: docker run --gpus all --rm lerobot-benchmark-robotwin \ +# lerobot-eval --env.type=robotwin --env.task=beat_block_hammer ... + +FROM huggingface/lerobot-gpu:latest + +ENV NVIDIA_DRIVER_CAPABILITIES=all \ + VK_ICD_FILENAMES=/usr/share/vulkan/icd.d/nvidia_icd.json \ + ROBOTWIN_ROOT=/opt/robotwin + +# The nightly base is CUDA -base (no compiler, no Vulkan loader). CuRobo's +# `pip install -e .` runs nvcc, and SAPIEN renders via Vulkan — add both. +USER root +# Pinned upstream SHA for reproducible benchmark runs. Bump when we need +# an upstream fix; don't rely on `main` drift. +ARG ROBOTWIN_SHA=0aeea2d669c0f8516f4d5785f0aa33ba812c14b4 +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + cuda-nvcc-12-4 cuda-cudart-dev-12-4 \ + libvulkan1 vulkan-tools \ + && mkdir -p /usr/share/vulkan/icd.d \ + && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3.0"}}' \ + > /usr/share/vulkan/icd.d/nvidia_icd.json \ + && git clone https://github.com/RoboTwin-Platform/RoboTwin.git ${ROBOTWIN_ROOT} \ + && git -C ${ROBOTWIN_ROOT} checkout ${ROBOTWIN_SHA} \ + && chown -R user_lerobot:user_lerobot ${ROBOTWIN_ROOT} \ + && apt-get clean && rm -rf /var/lib/apt/lists/* +USER user_lerobot + +# RoboTwin runtime deps (av is already in the base via [av-dep]). +RUN uv pip install --no-cache \ + "sapien==3.0.0b1" "mplib==0.2.1" "transforms3d==0.4.2" "trimesh==4.4.3" \ + "open3d==0.19.0" "imageio==2.34.2" termcolor zarr pydantic h5py + +# pytorch3d has no universal wheel; must be built from source (~10 min, cached). +RUN uv pip install --no-cache --no-build-isolation \ + "git+https://github.com/facebookresearch/pytorch3d.git@stable" + +# CuRobo — NVlabs motion generator; TORCH_CUDA_ARCH_LIST must be set or the +# build aborts on an empty arch list. RoboTwin's own installer pins v0.7.8, +# which still exposes the v1 API (`curobo.types.math`) that RoboTwin imports. +ARG CUROBO_REF=v0.7.8 +RUN cd ${ROBOTWIN_ROOT}/envs \ + && git clone --branch ${CUROBO_REF} --depth 1 https://github.com/NVlabs/curobo.git \ + && cd curobo \ + && TORCH_CUDA_ARCH_LIST="7.0;7.5;8.0;8.6;8.9;9.0" \ + uv pip install -e . --no-build-isolation --no-cache + +# Upstream patches (mirror RoboTwin's script/_install.sh). +# These patches target the exact versions pinned above; re-check when upgrading. +# mplib==0.2.1: drop a broken `or collide` clause in planner.py. +# Safe to remove once mplib > 0.2.1 ships with the fix upstream. +# sapien==3.0.0b1: fix URDF loader encoding + .srdf extension check. +# Safe to remove once sapien > 3.0.0b1 ships with the fix upstream. +RUN python - <<'EOF' +import pathlib, re, site +for d in site.getsitepackages(): + p = pathlib.Path(d) / "mplib" / "planner.py" + if p.exists(): + p.write_text(re.sub(r"\bor collide\b", "", p.read_text(), count=1)) + print(f"mplib patch applied: {p}") + p = pathlib.Path(d) / "sapien" / "wrapper" / "urdf_loader.py" + if p.exists(): + src = p.read_text().replace( + "with open(srdf_path) as f:", 'with open(srdf_path, encoding="utf-8") as f:' + ).replace('"srdf"', '".srdf"') + p.write_text(src) + print(f"sapien patch applied: {p}") +EOF + +# Simulation assets from TianxingChen/RoboTwin2.0: embodiments (~220 MB) + +# objects (~3.74 GB). background_texture (~11 GB) is intentionally skipped. +# The dataset is public — no auth token needed. +RUN python - <<'EOF' +import os, pathlib, zipfile +from huggingface_hub import hf_hub_download + +assets_dir = pathlib.Path(os.environ["ROBOTWIN_ROOT"]) / "assets" +assets_dir.mkdir(parents=True, exist_ok=True) +for fname in ("embodiments.zip", "objects.zip"): + local = hf_hub_download( + repo_id="TianxingChen/RoboTwin2.0", + repo_type="dataset", + filename=fname, + local_dir=str(assets_dir), + ) + with zipfile.ZipFile(local, "r") as z: + z.extractall(str(assets_dir)) + pathlib.Path(local).unlink() +EOF + +WORKDIR ${ROBOTWIN_ROOT} +RUN python script/update_embodiment_config_path.py + +ENV PYTHONPATH="${ROBOTWIN_ROOT}" + +# Fail the image build early if the CuRobo package layout regresses. Importing +# RoboTwin's planner here is too eager because CuRobo constructs CUDA-backed +# defaults at import time, while Docker builds don't have access to an NVIDIA +# driver. +RUN python - <<'EOF' +from pathlib import Path + +from curobo.types.math import Pose + +planner_src = (Path("/opt/robotwin/envs/robot/planner.py")).read_text() +assert "from curobo.types.math import Pose as CuroboPose" in planner_src + +print("CuRobo import OK:", Pose.__name__) +print("RoboTwin planner import references curobo.types.math") +EOF + +# Return to the lerobot source directory (set by base image) before overlaying. +WORKDIR /lerobot + +# Overlay the PR's source code on top of the nightly image. +COPY --chown=user_lerobot:user_lerobot . . + +CMD ["/bin/bash"] diff --git a/docker/Dockerfile.benchmark.vlabench b/docker/Dockerfile.benchmark.vlabench new file mode 100644 index 000000000..13502a3e3 --- /dev/null +++ b/docker/Dockerfile.benchmark.vlabench @@ -0,0 +1,99 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Benchmark image for VLABench integration tests. +# Extends the nightly GPU image with the PR's source code and VLABench setup. +# +# Build: docker build -f docker/Dockerfile.benchmark.vlabench -t lerobot-benchmark-vlabench . +# Run: docker run --gpus all --rm lerobot-benchmark-vlabench lerobot-eval ... + +FROM huggingface/lerobot-gpu:latest + +# Install VLABench from GitHub (not on PyPI) and pin MuJoCo/dm-control. +# Shallow-clone without submodule recursion (nested SSH-only submodules fail in CI). +# Editable install (-e) because VLABench/utils/ has no __init__.py, so +# find_packages() omits it from wheels; editable mode uses the source tree directly. +# rrt-algorithms has the same packaging issue (rrt/ dir missing __init__.py). +# Patch: constant.py calls os.listdir on ~100 asset/obj/meshes/* dirs at import +# time. Guard the call so missing dirs return [] instead of crashing (in case +# the asset download is partial). +# +# Pinned upstream SHAs for reproducible benchmark runs. Bump when you need +# an upstream fix; don't rely on `main`/`develop` drift. +ARG VLABENCH_SHA=cf588fe60c0c7282174fe979f5913170cfe69017 +ARG RRT_ALGORITHMS_SHA=e51d95ee489a225220d6ae2a764c4111f6ba7d85 +RUN git clone https://github.com/OpenMOSS/VLABench.git ~/VLABench && \ + git -C ~/VLABench checkout ${VLABENCH_SHA} && \ + git clone https://github.com/motion-planning/rrt-algorithms.git ~/rrt-algorithms && \ + git -C ~/rrt-algorithms checkout ${RRT_ALGORITHMS_SHA} && \ + python3 -c "\ +import pathlib; \ +p = pathlib.Path.home() / 'VLABench/VLABench/configs/constant.py'; \ +t = p.read_text(); \ +p.write_text(t.replace( \ + 'subdirs = os.listdir(xml_dir)', \ + 'if not os.path.isdir(xml_dir): return []\n subdirs = os.listdir(xml_dir)'))" && \ + uv pip install --no-cache -e ~/VLABench -e ~/rrt-algorithms \ + mujoco==3.2.2 dm-control==1.0.22 \ + open3d colorlog scikit-learn openai gdown + +# Download VLABench mesh assets. Task configs reference object meshes +# (obj/meshes/fruit/, containers/basket/, tablewares/plates/, etc.); without +# them the task builder picks from an empty mesh list and crashes with +# IndexError at task-build time (random.choice([]) in config_manager.py). +# +# Preferred source: an HF Hub mirror. Set VLABENCH_ASSETS_REPO at build time +# (e.g. --build-arg VLABENCH_ASSETS_REPO=lerobot/vlabench-assets) and we'll +# snapshot_download the repo into VLABench's assets dir. This is the reliable +# path for CI — Google Drive frequently returns HTTP 429 ("Too many users have +# viewed or downloaded this file recently") on shared academic files. +# +# After download we *validate* that at least one XML exists under each +# task-critical subtree and fail the build loudly if not. Silent-empty asset +# dirs are the #1 cause of VLABench runtime crashes in CI, so we surface them +# here rather than after a 10-minute eval build. +# +# Fallback: VLABench's own gdown-based script. Best-effort only. +ARG VLABENCH_ASSETS_REPO="" +RUN ASSETS_DIR="$HOME/VLABench/VLABench/assets" && \ + if [ -n "${VLABENCH_ASSETS_REPO}" ]; then \ + echo "Downloading VLABench assets from HF Hub: ${VLABENCH_ASSETS_REPO}" && \ + uv pip install --no-cache "huggingface_hub[hf_xet]>=0.26" && \ + python -c "from huggingface_hub import snapshot_download; \ +p = snapshot_download(repo_id='${VLABENCH_ASSETS_REPO}', repo_type='dataset', \ + local_dir='${ASSETS_DIR}', allow_patterns=['obj/**', 'scenes/**']); \ +print('snapshot_download returned:', p)"; \ + else \ + echo "No VLABENCH_ASSETS_REPO set — falling back to gdown" && \ + python ~/VLABench/scripts/download_assets.py --choice all; \ + fi && \ + python -c "\ +from pathlib import Path; \ +import sys; \ +root = Path('${ASSETS_DIR}'); \ +checks = ['obj/meshes/tablewares/plates', 'obj/meshes/containers/basket', 'obj/meshes/fruit', 'obj/meshes/containers/tray']; \ +failed = []; \ +print(f'Validating VLABench assets under {root}'); \ +[print(f' {c}: {len(list((root/c).rglob(\"*.xml\")))} XMLs') for c in checks]; \ +[failed.append(c) for c in checks if not any((root/c).rglob('*.xml'))]; \ +sys.exit(f'Empty asset dirs (no *.xml): {failed}') if failed else print('All asset dirs populated.')" + +# Overlay the PR's source code on top of the nightly image. +COPY --chown=user_lerobot:user_lerobot . . + +# Re-install lerobot editably so the new source (with VLABenchEnv registration +# and updated obs handling) replaces the stale package baked into the nightly image. +RUN uv pip install --no-cache --no-deps -e . + +CMD ["/bin/bash"] diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 3dcba5993..01e8bfb76 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -61,6 +61,8 @@ title: SARM title: "Reward Models" - sections: + - local: inference + title: Policy Deployment (lerobot-rollout) - local: async title: Use Async Inference - local: rtc @@ -77,10 +79,22 @@ title: Adding a New Benchmark - local: libero title: LIBERO + - local: libero_plus + title: LIBERO-plus - local: metaworld title: Meta-World + - local: robotwin + title: RoboTwin 2.0 + - local: robocasa + title: RoboCasa365 + - local: robocerebra + title: RoboCerebra + - local: robomme + title: RoboMME - local: envhub_isaaclab_arena title: NVIDIA IsaacLab Arena Environments + - local: vlabench + title: VLABench title: "Benchmarks" - sections: - local: introduction_processors diff --git a/docs/source/hil_data_collection.mdx b/docs/source/hil_data_collection.mdx index c4839577f..ba68959d1 100644 --- a/docs/source/hil_data_collection.mdx +++ b/docs/source/hil_data_collection.mdx @@ -50,30 +50,30 @@ This process can be repeated iteratively: deploy, collect, fine-tune, repeat. Ea ### Teleoperator Requirements -The `examples/hil` HIL scripts require **teleoperators with active motors** that can: +The `lerobot-rollout --strategy.type=dagger` mode requires **teleoperators with active motors** that can: - Enable/disable torque programmatically - Move to target positions (to mirror the robot state when pausing) -**Compatible teleoperators in the current `examples/hil` scripts:** +**Compatible teleoperators:** - `openarm_mini` - OpenArm Mini - `so_leader` - SO100 / SO101 leader arm > [!IMPORTANT] -> The provided `examples/hil` commands default to `bi_openarm_follower` + `openarm_mini`. +> The provided commands default to `bi_openarm_follower` + `openarm_mini`. > `so_follower` + `so_leader` configs are also registered and can be used via CLI flags. --- ## Script -A single script handles both synchronous and RTC-based inference. Toggle RTC with `--rtc.enabled=true`: +Use `lerobot-rollout` with `--strategy.type=dagger` for HIL data collection. Select the inference backend with `--inference.type=sync|rtc`: -| Mode | Flag | Models | -| ------------------------ | -------------------- | --------------------- | -| Standard (default) | _(no flag needed)_ | ACT, Diffusion Policy | -| Real-Time Chunking (RTC) | `--rtc.enabled=true` | Pi0, Pi0.5, SmolVLA | +| Mode | Flag | Models | +| ------------------------ | ---------------------- | --------------------- | +| Standard (default) | _(no flag needed)_ | ACT, Diffusion Policy | +| Real-Time Chunking (RTC) | `--inference.type=rtc` | Pi0, Pi0.5, SmolVLA | --- @@ -97,7 +97,7 @@ python src/lerobot/scripts/lerobot_train.py \ **Standard inference (ACT, Diffusion Policy):** ```bash -python examples/hil/hil_data_collection.py \ +lerobot-rollout --strategy.type=dagger \ --robot.type=bi_openarm_follower \ --robot.left_arm_config.port=can1 \ --robot.left_arm_config.side=left \ @@ -108,11 +108,10 @@ python examples/hil/hil_data_collection.py \ --teleop.port_left=/dev/ttyACM0 \ --teleop.port_right=/dev/ttyACM1 \ --policy.path=outputs/pretrain/checkpoints/last/pretrained_model \ - --dataset.repo_id=your-username/hil-dataset \ + --dataset.repo_id=your-username/rollout_hil_dataset \ --dataset.single_task="Fold the T-shirt properly" \ --dataset.fps=30 \ - --dataset.episode_time_s=1000 \ - --dataset.num_episodes=50 \ + --strategy.num_episodes=50 \ --interpolation_multiplier=2 ``` @@ -121,11 +120,11 @@ python examples/hil/hil_data_collection.py \ For models with high inference latency, enable RTC for smooth execution: ```bash -python examples/hil/hil_data_collection.py \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --rtc.max_guidance_weight=5.0 \ - --rtc.prefix_attention_schedule=LINEAR \ +lerobot-rollout --strategy.type=dagger \ + --inference.type=rtc \ + --inference.rtc.execution_horizon=20 \ + --inference.rtc.max_guidance_weight=5.0 \ + --inference.rtc.prefix_attention_schedule=LINEAR \ --robot.type=bi_openarm_follower \ --robot.left_arm_config.port=can1 \ --robot.left_arm_config.side=left \ @@ -136,11 +135,10 @@ python examples/hil/hil_data_collection.py \ --teleop.port_left=/dev/ttyACM0 \ --teleop.port_right=/dev/ttyACM1 \ --policy.path=outputs/pretrain/checkpoints/last/pretrained_model \ - --dataset.repo_id=your-username/hil-rtc-dataset \ + --dataset.repo_id=your-username/rollout_hil_rtc_dataset \ --dataset.single_task="Fold the T-shirt properly" \ --dataset.fps=30 \ - --dataset.episode_time_s=1000 \ - --dataset.num_episodes=50 \ + --strategy.num_episodes=50 \ --interpolation_multiplier=3 ``` @@ -235,7 +233,7 @@ This HIL data collection approach builds on ideas from interactive imitation lea - **HG-DAgger** (Kelly et al., 2019) made this practical for robotics: a human expert monitors the robot and only intervenes when needed, rather than labeling every state. The gating between autonomous and human control is exactly the pause → takeover → return-to-policy loop used in the scripts here. -- **RaC** (Hu et al., 2025) scales this loop to long-horizon tasks by explicitly decomposing interventions into **recovery** (teleoperating back to a good state) and **correction** (demonstrating the right behavior from there). This decomposition is the protocol followed by the HIL scripts in `examples/hil`. +- **RaC** (Hu et al., 2025) scales this loop to long-horizon tasks by explicitly decomposing interventions into **recovery** (teleoperating back to a good state) and **correction** (demonstrating the right behavior from there). This decomposition is the protocol followed by the DAgger strategy in `lerobot-rollout`. - **π0.6/RECAP** (Physical Intelligence, 2025) applies the same iterative collect-and-finetune loop at scale with VLA models, showing that even large pretrained policies benefit substantially from targeted human corrections on their own failure modes. π0.6 is trained using RECAP. diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx index d03e35d8d..ff0a6229e 100644 --- a/docs/source/il_robots.mdx +++ b/docs/source/il_robots.mdx @@ -32,6 +32,12 @@ Once you’ve gathered enough trajectories, you’ll train a neural network to i If you run into any issues at any point, jump into our [Discord community](https://discord.com/invite/s3KuuzsPFb) for support. + + +Want to quickly get the right commands for your setup? The [quickstart notebook](https://github.com/huggingface/lerobot/blob/main/examples/notebooks/quickstart.ipynb) [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/lerobot/blob/main/examples/notebooks/quickstart.ipynb) lets you configure your robot once and generates all the commands below ready to paste. + + + ## Set up and Calibrate If you haven't yet set up and calibrated your robot and teleop device, please do so by following the robot-specific tutorial. @@ -503,121 +509,42 @@ hf upload ${HF_USER}/act_so101_test${CKPT} \ ## Run inference and evaluate your policy -You can use the `record` script from [`lerobot-record`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/lerobot_record.py) with a policy checkpoint as input, to run inference and evaluate your policy. For instance, run this command or API example to run inference and record 10 evaluation episodes: +Use `lerobot-rollout` to deploy a trained policy on your robot. You can choose different strategies depending on your needs: - + ```bash -lerobot-record \ +lerobot-rollout \ + --strategy.type=base \ + --policy.path=${HF_USER}/my_policy \ --robot.type=so100_follower \ --robot.port=/dev/ttyACM1 \ --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ - --robot.id=my_awesome_follower_arm \ - --display_data=false \ - --dataset.repo_id=${HF_USER}/eval_so100 \ - --dataset.single_task="Put lego brick into the transparent box" \ - --dataset.streaming_encoding=true \ - --dataset.encoder_threads=2 \ - # --dataset.vcodec=auto \ - # <- Teleop optional if you want to teleoperate in between episodes \ - # --teleop.type=so100_leader \ - # --teleop.port=/dev/ttyACM0 \ - # --teleop.id=my_awesome_leader_arm \ - --policy.path=${HF_USER}/my_policy + --task="Put lego brick into the transparent box" \ + --duration=60 ``` - - - -```python -from lerobot.cameras.opencv import OpenCVCameraConfig -from lerobot.datasets import LeRobotDataset -from lerobot.utils.feature_utils import hw_to_dataset_features -from lerobot.policies.act import ACTPolicy -from lerobot.policies import make_pre_post_processors -from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig -from lerobot.scripts.lerobot_record import record_loop -from lerobot.common.control_utils import init_keyboard_listener -from lerobot.utils.utils import log_say -from lerobot.utils.visualization_utils import init_rerun - - -NUM_EPISODES = 5 -FPS = 30 -EPISODE_TIME_SEC = 60 -TASK_DESCRIPTION = "My task description" -HF_MODEL_ID = "/" -HF_DATASET_ID = "/" - -# Create the robot configuration -camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} -robot_config = SO100FollowerConfig( - port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config -) - -# Initialize the robot -robot = SO100Follower(robot_config) - -# Initialize the policy -policy = ACTPolicy.from_pretrained(HF_MODEL_ID) - -# Configure the dataset features -action_features = hw_to_dataset_features(robot.action_features, "action") -obs_features = hw_to_dataset_features(robot.observation_features, "observation") -dataset_features = {**action_features, **obs_features} - -# Create the dataset -dataset = LeRobotDataset.create( - repo_id=HF_DATASET_ID, - fps=FPS, - features=dataset_features, - robot_type=robot.name, - use_videos=True, - image_writer_threads=4, -) - -# Initialize the keyboard listener and rerun visualization -_, events = init_keyboard_listener() -init_rerun(session_name="recording") - -# Connect the robot -robot.connect() - -preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=policy, - pretrained_path=HF_MODEL_ID, - dataset_stats=dataset.meta.stats, -) - -for episode_idx in range(NUM_EPISODES): - log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") - - # Run the policy inference loop - record_loop( - robot=robot, - events=events, - fps=FPS, - policy=policy, - preprocessor=preprocessor, - postprocessor=postprocessor, - dataset=dataset, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - ) - - dataset.save_episode() - -# Clean up -robot.disconnect() -dataset.push_to_hub() + +```bash +lerobot-rollout \ + --strategy.type=sentry \ + --strategy.upload_every_n_episodes=5 \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM1 \ + --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/eval_so100 \ + --dataset.single_task="Put lego brick into the transparent box" \ + --duration=600 ``` - - -As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: +The `--strategy.type` flag selects the execution mode: -1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`). -2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`). +- `base`: Autonomous rollout with no data recording (useful for quick evaluation) +- `sentry`: Continuous recording with auto-upload (useful for large-scale evaluation) +- `highlight`: Ring buffer recording with keystroke save (useful for capturing interesting events) +- `dagger`: Human-in-the-loop data collection (see [HIL Data Collection](./hil_data_collection)) + +All strategies support `--inference.type=rtc` for smooth execution with slow VLA models (Pi0, Pi0.5, SmolVLA). diff --git a/docs/source/inference.mdx b/docs/source/inference.mdx new file mode 100644 index 000000000..b2874d823 --- /dev/null +++ b/docs/source/inference.mdx @@ -0,0 +1,261 @@ +# Policy Deployment (lerobot-rollout) + +`lerobot-rollout` is the single CLI for deploying trained policies on real robots. It supports multiple execution strategies and inference backends, from quick evaluation to continuous recording and human-in-the-loop data collection. + +## Quick Start + +No extra dependencies are needed beyond your robot and policy extras. + +```bash +lerobot-rollout \ + --strategy.type=base \ + --policy.path=lerobot/act_koch_real \ + --robot.type=koch_follower \ + --robot.port=/dev/ttyACM0 \ + --task="pick up cube" \ + --duration=30 +``` + +This runs the policy for 30 seconds with no recording. + +--- + +## Strategies + +Select a strategy with `--strategy.type=`. Each strategy defines a different control loop with its own recording and interaction semantics. + +### Base (`--strategy.type=base`) + +Autonomous policy execution with no data recording. Use this for quick evaluation, demos, or when you only need to observe the robot. + +```bash +lerobot-rollout \ + --strategy.type=base \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --task="Put lego brick into the box" \ + --duration=60 +``` + +| Flag | Description | +| ---------------- | ------------------------------------------------------ | +| `--duration` | Run time in seconds (0 = infinite) | +| `--task` | Task description passed to the policy | +| `--display_data` | Stream observations/actions to Rerun for visualization | + +### Sentry (`--strategy.type=sentry`) + +Continuous autonomous recording with periodic upload to the Hugging Face Hub. Episode boundaries are auto-computed from camera resolution and FPS so each saved episode produces a complete video file, keeping uploads efficient. + +Policy state (hidden state, RTC queue) persists across episode boundaries: the robot does not reset between episodes. + +```bash +lerobot-rollout \ + --strategy.type=sentry \ + --strategy.upload_every_n_episodes=5 \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/rollout_eval_data \ + --dataset.single_task="Put lego brick into the box" \ + --duration=3600 +``` + +| Flag | Description | +| -------------------------------------- | ----------------------------------------------------------- | +| `--strategy.upload_every_n_episodes` | Push to Hub every N episodes (default: 5) | +| `--strategy.target_video_file_size_mb` | Target video file size for episode rotation (default: auto) | +| `--dataset.repo_id` | **Required.** Hub repository for the recorded dataset | +| `--dataset.push_to_hub` | Whether to push to Hub on teardown (default: true) | + +### Highlight (`--strategy.type=highlight`) + +Autonomous rollout with on-demand recording via a memory-bounded ring buffer. The robot runs continuously while the buffer captures the last N seconds of telemetry. Press the save key to flush the buffer and start live recording; press it again to save the episode. + +```bash +lerobot-rollout \ + --strategy.type=highlight \ + --strategy.ring_buffer_seconds=30 \ + --strategy.save_key=s \ + --strategy.push_key=h \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=koch_follower \ + --robot.port=/dev/ttyACM0 \ + --dataset.repo_id=${HF_USER}/rollout_highlight_data \ + --dataset.single_task="Pick up the red cube" +``` + +**Keyboard controls:** + +| Key | Action | +| ------------------ | -------------------------------------------------------- | +| `s` (configurable) | Start recording (flushes buffer) / stop and save episode | +| `h` (configurable) | Push dataset to Hub | +| `ESC` | Stop the session | + +| Flag | Description | +| -------------------------------------- | ---------------------------------------------- | +| `--strategy.ring_buffer_seconds` | Duration of buffered telemetry (default: 30) | +| `--strategy.ring_buffer_max_memory_mb` | Memory cap for the ring buffer (default: 2048) | +| `--strategy.save_key` | Key to toggle recording (default: `s`) | +| `--strategy.push_key` | Key to push to Hub (default: `h`) | + +### DAgger (`--strategy.type=dagger`) + +Human-in-the-loop data collection. Alternates between autonomous policy execution and human intervention via a teleoperator. Intervention frames are tagged with `intervention=True`. Requires a teleoperator (`--teleop.type`). + +See the [Human-In-the-Loop Data Collection](./hil_data_collection) guide for a detailed walkthrough. + +**Corrections-only mode** (default): Only human correction windows are recorded. Each correction becomes one episode. + +```bash +lerobot-rollout \ + --strategy.type=dagger \ + --strategy.num_episodes=20 \ + --policy.path=outputs/pretrain/checkpoints/last/pretrained_model \ + --robot.type=bi_openarm_follower \ + --teleop.type=openarm_mini \ + --dataset.repo_id=${HF_USER}/rollout_hil_data \ + --dataset.single_task="Fold the T-shirt" +``` + +**Continuous recording mode** (`--strategy.record_autonomous=true`): Both autonomous and correction frames are recorded with time-based episode rotation (same as Sentry). + +```bash +lerobot-rollout \ + --strategy.type=dagger \ + --strategy.record_autonomous=true \ + --strategy.num_episodes=50 \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --teleop.type=so101_leader \ + --teleop.port=/dev/ttyACM1 \ + --dataset.repo_id=${HF_USER}/rollout_dagger_data \ + --dataset.single_task="Grasp the block" +``` + +**Keyboard controls** (default input device): + +| Key | Action | +| ------- | ------------------------------------------- | +| `Space` | Pause / resume policy execution | +| `Tab` | Start / stop human correction | +| `Enter` | Push dataset to Hub (corrections-only mode) | +| `ESC` | Stop the session | + +Foot pedal input is also supported via `--strategy.input_device=pedal`. Configure pedal codes with `--strategy.pedal.*` flags. + +| Flag | Description | +| ------------------------------------ | ------------------------------------------------------- | +| `--strategy.num_episodes` | Number of correction episodes to record (default: 10) | +| `--strategy.record_autonomous` | Record autonomous frames too (default: false) | +| `--strategy.upload_every_n_episodes` | Push to Hub every N episodes (default: 5) | +| `--strategy.input_device` | Input device: `keyboard` or `pedal` (default: keyboard) | +| `--teleop.type` | **Required.** Teleoperator type | + +--- + +## Inference Backends + +Select a backend with `--inference.type=`. All strategies work with both backends. + +### Sync (default) + +One policy call per control tick. The main loop blocks until the action is computed. + +Works with all policies. No extra flags needed. + +### Real-Time Chunking (`--inference.type=rtc`) + +A background thread produces action chunks asynchronously. The main control loop polls for the next ready action while the policy computes the next chunk in parallel. + +Use RTC with large, slow VLA models (Pi0, Pi0.5, SmolVLA) for smooth, continuous motion despite high inference latency. + +```bash +lerobot-rollout \ + --strategy.type=base \ + --inference.type=rtc \ + --inference.rtc.execution_horizon=10 \ + --inference.rtc.max_guidance_weight=10.0 \ + --policy.path=${HF_USER}/pi0_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --task="Pick up the cube" \ + --duration=60 \ + --device=cuda +``` + +| Flag | Description | +| ------------------------------------------- | -------------------------------------------------------------- | +| `--inference.rtc.execution_horizon` | Steps to blend with previous chunk (default: varies by policy) | +| `--inference.rtc.max_guidance_weight` | Consistency enforcement strength (default: varies by policy) | +| `--inference.rtc.prefix_attention_schedule` | Blend schedule: `LINEAR`, `EXP`, `ONES`, `ZEROS` | +| `--inference.queue_threshold` | Max queue size before backpressure (default: 30) | + +See the [Real-Time Chunking](./rtc) guide for details on tuning RTC parameters. + +--- + +## Common Flags + +| Flag | Description | Default | +| --------------------------------- | ----------------------------------------------------------------- | ------- | +| `--policy.path` | **Required.** HF Hub model ID or local checkpoint path | -- | +| `--robot.type` | **Required.** Robot type (e.g. `so100_follower`, `koch_follower`) | -- | +| `--robot.port` | Serial port for the robot | -- | +| `--robot.cameras` | Camera configuration (JSON dict) | -- | +| `--fps` | Control loop frequency | 30 | +| `--duration` | Run time in seconds (0 = infinite) | 0 | +| `--device` | Torch device (`cpu`, `cuda`, `mps`) | auto | +| `--task` | Task description (used when no dataset is provided) | -- | +| `--display_data` | Stream telemetry to Rerun visualization | false | +| `--display_ip` / `--display_port` | Remote Rerun server address | -- | +| `--interpolation_multiplier` | Action interpolation factor | 1 | +| `--use_torch_compile` | Enable `torch.compile` for inference | false | +| `--resume` | Resume a previous recording session | false | +| `--play_sounds` | Vocal synthesis for events | true | + +--- + +## Programmatic Usage + +For custom deployments (e.g. with kinematics processors), use the rollout module API directly: + +```python +from lerobot.rollout import BaseStrategyConfig, RolloutConfig, build_rollout_context +from lerobot.rollout.inference import SyncInferenceConfig +from lerobot.rollout.strategies import BaseStrategy +from lerobot.utils.process import ProcessSignalHandler + +cfg = RolloutConfig( + robot=my_robot_config, + policy=my_policy_config, + strategy=BaseStrategyConfig(), + inference=SyncInferenceConfig(), + fps=30, + duration=60, + task="my task", +) + +signal_handler = ProcessSignalHandler(use_threads=True) +ctx = build_rollout_context( + cfg, + signal_handler.shutdown_event, + robot_action_processor=my_custom_action_processor, # optional + robot_observation_processor=my_custom_obs_processor, # optional +) + +strategy = BaseStrategy(cfg.strategy) +try: + strategy.setup(ctx) + strategy.run(ctx) +finally: + strategy.teardown(ctx) +``` + +See `examples/so100_to_so100_EE/rollout.py` and `examples/phone_to_so100/rollout.py` for full examples with kinematics processors. diff --git a/docs/source/libero_plus.mdx b/docs/source/libero_plus.mdx new file mode 100644 index 000000000..4249bf49e --- /dev/null +++ b/docs/source/libero_plus.mdx @@ -0,0 +1,188 @@ +# LIBERO-plus + +LIBERO-plus is a **robustness benchmark** for Vision-Language-Action (VLA) models built on top of [LIBERO](./libero). It systematically stress-tests policies by applying **seven independent perturbation dimensions** to the original LIBERO task set, exposing failure modes that standard benchmarks miss. + +- Paper: [In-depth Robustness Analysis of Vision-Language-Action Models](https://arxiv.org/abs/2510.13626) +- GitHub: [sylvestf/LIBERO-plus](https://github.com/sylvestf/LIBERO-plus) +- Dataset: [lerobot/libero_plus](https://huggingface.co/datasets/lerobot/libero_plus) + +![An overview of the LIBERO-plus benchmark perturbation dimensions](https://github.com/sylvestf/LIBERO-plus/raw/main/static/images/libero-plus.jpg) + +## Perturbation dimensions + +LIBERO-plus creates ~10 000 task variants by perturbing each original LIBERO task along these axes: + +| Dimension | What changes | +| --------------------- | ----------------------------------------------------- | +| Objects layout | Target position, presence of confounding objects | +| Camera viewpoints | Camera position, orientation, field-of-view | +| Robot initial states | Manipulator start pose | +| Language instructions | LLM-rewritten task description (paraphrase / synonym) | +| Light conditions | Intensity, direction, color, shadow | +| Background textures | Scene surface and object appearance | +| Sensor noise | Photometric distortions and image degradation | + +## Available task suites + +LIBERO-plus covers the same five suites as LIBERO: + +| Suite | CLI name | Tasks | Max steps | Description | +| -------------- | ---------------- | ----- | --------- | -------------------------------------------------- | +| LIBERO-Spatial | `libero_spatial` | 10 | 280 | Tasks requiring reasoning about spatial relations | +| LIBERO-Object | `libero_object` | 10 | 280 | Tasks centered on manipulating different objects | +| LIBERO-Goal | `libero_goal` | 10 | 300 | Goal-conditioned tasks with changing targets | +| LIBERO-90 | `libero_90` | 90 | 400 | Short-horizon tasks from the LIBERO-100 collection | +| LIBERO-Long | `libero_10` | 10 | 520 | Long-horizon tasks from the LIBERO-100 collection | + + + Installing LIBERO-plus **replaces** vanilla LIBERO — it uninstalls `hf-libero` + so that `import libero` resolves to the LIBERO-plus fork. You cannot have both + installed at the same time. To switch back to vanilla LIBERO, uninstall the + fork and reinstall with `pip install -e ".[libero]"`. + + +## Installation + +### System dependencies (Linux only) + +```bash +sudo apt install libexpat1 libfontconfig1-dev libmagickwand-dev +``` + +### Python package + +```bash +pip install -e ".[libero]" "robosuite==1.4.1" bddl easydict mujoco wand scikit-image gym +git clone https://github.com/sylvestf/LIBERO-plus.git +cd LIBERO-plus && pip install --no-deps -e . +pip uninstall -y hf-libero # so `import libero` resolves to the fork +``` + +LIBERO-plus is installed from its GitHub fork rather than a pyproject extra — the fork ships as a namespace package that pip can't handle, so it must be cloned and added to `PYTHONPATH`. See `docker/Dockerfile.benchmark.libero_plus` for the canonical install. MuJoCo is required, so only Linux is supported. + + +Set the MuJoCo rendering backend before running evaluation: + +```bash +export MUJOCO_GL=egl # headless / HPC / cloud +``` + + + +### Download LIBERO-plus assets + +LIBERO-plus ships its extended asset pack separately. Download `assets.zip` from the [Hugging Face dataset](https://huggingface.co/datasets/Sylvest/LIBERO-plus/tree/main) and extract it into the LIBERO-plus package directory: + +```bash +# After installing the package, find where it was installed: +python -c "import libero; print(libero.__file__)" +# Then extract assets.zip into /libero/assets/ +``` + +## Evaluation + +### Default evaluation (recommended) + +Evaluate across the four standard suites (10 episodes per task): + +```bash +lerobot-eval \ + --policy.path="your-policy-id" \ + --env.type=libero_plus \ + --env.task=libero_spatial,libero_object,libero_goal,libero_10 \ + --eval.batch_size=1 \ + --eval.n_episodes=10 \ + --env.max_parallel_tasks=1 +``` + +### Single-suite evaluation + +Evaluate on one LIBERO-plus suite: + +```bash +lerobot-eval \ + --policy.path="your-policy-id" \ + --env.type=libero_plus \ + --env.task=libero_spatial \ + --eval.batch_size=1 \ + --eval.n_episodes=10 +``` + +- `--env.task` picks the suite (`libero_spatial`, `libero_object`, etc.). +- `--env.task_ids` restricts to specific task indices (`[0]`, `[1,2,3]`, etc.). Omit to run all tasks in the suite. +- `--eval.batch_size` controls how many environments run in parallel. +- `--eval.n_episodes` sets how many episodes to run per task. + +### Multi-suite evaluation + +Benchmark a policy across multiple suites at once by passing a comma-separated list: + +```bash +lerobot-eval \ + --policy.path="your-policy-id" \ + --env.type=libero_plus \ + --env.task=libero_spatial,libero_object \ + --eval.batch_size=1 \ + --eval.n_episodes=10 +``` + +### Control mode + +LIBERO-plus supports two control modes — `relative` (default) and `absolute`. Different VLA checkpoints are trained with different action parameterizations, so make sure the mode matches your policy: + +```bash +--env.control_mode=relative # or "absolute" +``` + +### Policy inputs and outputs + +**Observations:** + +- `observation.state` — 8-dim proprioceptive features (eef position, axis-angle orientation, gripper qpos) +- `observation.images.image` — main camera view (`agentview_image`), HWC uint8 +- `observation.images.image2` — wrist camera view (`robot0_eye_in_hand_image`), HWC uint8 + +**Actions:** + +- Continuous control in `Box(-1, 1, shape=(7,))` — 6D end-effector delta + 1D gripper + +### Recommended evaluation episodes + +For reproducible benchmarking, use **10 episodes per task** across all four standard suites (Spatial, Object, Goal, Long). This gives 400 total episodes and matches the protocol used for published results. + +## Training + +### Dataset + +A LeRobot-format training dataset for LIBERO-plus is available at: + +- [lerobot/libero_plus](https://huggingface.co/datasets/lerobot/libero_plus) + +### Example training command + +```bash +lerobot-train \ + --policy.type=smolvla \ + --policy.repo_id=${HF_USER}/smolvla_libero_plus \ + --policy.load_vlm_weights=true \ + --dataset.repo_id=lerobot/libero_plus \ + --env.type=libero_plus \ + --env.task=libero_spatial \ + --output_dir=./outputs/ \ + --steps=100000 \ + --batch_size=4 \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval_freq=1000 +``` + +## Relationship to LIBERO + +LIBERO-plus is a drop-in extension of LIBERO: + +- Same Python gym interface (`LiberoEnv`, `LiberoProcessorStep`) +- Same camera names and observation/action format +- Same task suite names +- Installs under the same `libero` Python package name (different GitHub repo) + +To use the original LIBERO benchmark, see [LIBERO](./libero) and use `--env.type=libero`. diff --git a/docs/source/rename_map.mdx b/docs/source/rename_map.mdx index 6249faaca..16ee6344a 100644 --- a/docs/source/rename_map.mdx +++ b/docs/source/rename_map.mdx @@ -61,17 +61,6 @@ lerobot-eval \ --rename_map='{"observation.images.image": "observation.images.base_0_rgb", "observation.images.image2": "observation.images.left_wrist_0_rgb"}' ``` -### Recording - -`lerobot-record` also supports rename maps, nested under the dataset config: - -```bash -lerobot-record \ # When running inference - --policy.path="/smolVLA_finetuned" \ - ... \ - --dataset.rename_map='{"observation.images.glove2": "observation.images.image"}' -``` - ## Alternative: edit the policy config directly If you always use the same dataset or environment, you can **edit the policy's `config.json`** so its observation keys match your data source. Then no rename map is needed. @@ -105,10 +94,10 @@ XVLA-base has three visual inputs and `empty_cameras=0` by default. Your dataset ## Quick reference -| Goal | What to do | -| ----------------------------------------- | --------------------------------------------------------------------------- | -| Dataset keys ≠ policy keys | `--rename_map='{"dataset_key": "policy_key", ...}'` | -| Env keys ≠ policy keys (eval) | `--rename_map='{"env_key": "policy_key", ...}'` | -| Recording with different keys (inference) | `--dataset.rename_map='{"source_key": "policy_key", ...}'`. | -| Fewer cameras than policy expects | `--policy.empty_cameras=N` (supported by PI0, PI05, PI0Fast, SmolVLA, XVLA) | -| Avoid passing a rename map | Edit the policy's `config.json` so its keys match your data source | +| Goal | What to do | +| --------------------------------------- | --------------------------------------------------------------------------- | +| Dataset keys ≠ policy keys | `--rename_map='{"dataset_key": "policy_key", ...}'` | +| Env keys ≠ policy keys (eval) | `--rename_map='{"env_key": "policy_key", ...}'` | +| Rollout with different keys (inference) | `--rename_map='{"source_key": "policy_key", ...}'`. | +| Fewer cameras than policy expects | `--policy.empty_cameras=N` (supported by PI0, PI05, PI0Fast, SmolVLA, XVLA) | +| Avoid passing a rename map | Edit the policy's `config.json` so its keys match your data source | diff --git a/docs/source/robocasa.mdx b/docs/source/robocasa.mdx new file mode 100644 index 000000000..f6a784e72 --- /dev/null +++ b/docs/source/robocasa.mdx @@ -0,0 +1,188 @@ +# RoboCasa365 + +[RoboCasa365](https://robocasa.ai) is a large-scale simulation framework for training and benchmarking **generalist robots** in everyday kitchen tasks. It ships 365 diverse manipulation tasks across 2,500 kitchen environments, 3,200+ object assets and 600+ hours of human demonstration data, on a PandaOmron 12-DOF mobile manipulator (Franka arm on a holonomic base). + +- Paper: [RoboCasa: Large-Scale Simulation of Everyday Tasks for Generalist Robots](https://arxiv.org/abs/2406.02523) +- GitHub: [robocasa/robocasa](https://github.com/robocasa/robocasa) +- Project website: [robocasa.ai](https://robocasa.ai) +- Pretrained policy: [`lerobot/smolvla_robocasa`](https://huggingface.co/lerobot/smolvla_robocasa) +- Single-task dataset (CloseFridge): [`pepijn223/robocasa_CloseFridge`](https://huggingface.co/datasets/pepijn223/robocasa_CloseFridge) + +RoboCasa365 benchmark overview + +## Available tasks + +RoboCasa365 organizes its 365 tasks into two families and three upstream benchmark groups that LeRobot exposes as first-class `--env.task` shortcuts: + +| Family | Tasks | Description | +| --------- | ----- | ------------------------------------------------------------------------------- | +| Atomic | ~65 | Single-skill tasks: pick-and-place, door/drawer manipulation, appliance control | +| Composite | ~300 | Multi-step tasks across 60+ categories: cooking, cleaning, organizing, etc. | + +**Atomic task examples:** `CloseFridge`, `OpenDrawer`, `OpenCabinet`, `TurnOnMicrowave`, `TurnOffStove`, `NavigateKitchen`, `PickPlaceCounterToStove`. + +**Composite task categories:** baking, boiling, brewing, chopping, clearing table, defrosting food, loading dishwasher, making tea, microwaving food, washing dishes, and more. + +`--env.task` accepts three forms: + +- a single task name (`CloseFridge`) +- a comma-separated list (`CloseFridge,OpenBlenderLid,PickPlaceCoffee`) +- a benchmark-group shortcut — `atomic_seen`, `composite_seen`, `composite_unseen`, `pretrain50`, `pretrain100`, `pretrain200`, `pretrain300` — which auto-expands to the upstream task list and auto-sets the dataset `split` (`target` or `pretrain`). + +## Installation + +RoboCasa and its dependency `robosuite` are not published on PyPI, and RoboCasa's own `setup.py` hardcodes `lerobot==0.3.3`, which conflicts with this repo's `lerobot`. LeRobot therefore does **not** expose a `robocasa` extra — install the two packages manually as editable clones (using `--no-deps` on `robocasa` to skip its shadowed `lerobot` pin): + +```bash +# After following the standard LeRobot installation instructions. + +git clone https://github.com/robocasa/robocasa.git ~/robocasa +git clone https://github.com/ARISE-Initiative/robosuite.git ~/robosuite +pip install -e ~/robocasa --no-deps +pip install -e ~/robosuite + +# Robocasa's runtime deps (the ones its setup.py would have pulled, minus +# the bad lerobot pin). +pip install numpy numba scipy mujoco pygame Pillow opencv-python \ + pyyaml pynput tqdm termcolor imageio h5py lxml hidapi \ + tianshou gymnasium + +python -m robocasa.scripts.setup_macros +# Lightweight assets (lightwheel object meshes + textures). Enough for +# the default env out of the box. +python -m robocasa.scripts.download_kitchen_assets \ + --type tex tex_generative fixtures_lw objs_lw +# Optional: full objaverse/aigen registries (~30GB) for richer object +# variety. Enable at eval time via --env.obj_registries (see below). +# python -m robocasa.scripts.download_kitchen_assets --type objs_objaverse +``` + + +RoboCasa requires MuJoCo. Set the rendering backend before training or evaluation: + +```bash +export MUJOCO_GL=egl # for headless servers (HPC, cloud) +``` + + + +### Object registries + +By default the env samples objects only from the `lightwheel` registry (what `--type objs_lw` ships), which avoids a `Probabilities contain NaN` crash when the objaverse / aigen packs aren't on disk. If you've downloaded the full asset set, enable the full registry at runtime: + +```bash +--env.obj_registries='[objaverse,lightwheel]' +``` + +## Evaluation + +All eval snippets below mirror the CI command (see `.github/workflows/benchmark_tests.yml`). The `--rename_map` argument maps RoboCasa's native camera keys (`robot0_agentview_left` / `robot0_eye_in_hand` / `robot0_agentview_right`) onto the three-camera (`camera1` / `camera2` / `camera3`) input layout the released `smolvla_robocasa` policy was trained on. + +### Single-task evaluation (recommended for quick iteration) + +```bash +lerobot-eval \ + --policy.path=lerobot/smolvla_robocasa \ + --env.type=robocasa \ + --env.task=CloseFridge \ + --eval.batch_size=1 \ + --eval.n_episodes=20 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={"observation.images.robot0_agentview_left": "observation.images.camera1", "observation.images.robot0_eye_in_hand": "observation.images.camera2", "observation.images.robot0_agentview_right": "observation.images.camera3"}' +``` + +### Multi-task evaluation + +Pass a comma-separated list of tasks: + +```bash +lerobot-eval \ + --policy.path=lerobot/smolvla_robocasa \ + --env.type=robocasa \ + --env.task=CloseFridge,OpenCabinet,OpenDrawer,TurnOnMicrowave,TurnOffStove \ + --eval.batch_size=1 \ + --eval.n_episodes=20 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={"observation.images.robot0_agentview_left": "observation.images.camera1", "observation.images.robot0_eye_in_hand": "observation.images.camera2", "observation.images.robot0_agentview_right": "observation.images.camera3"}' +``` + +### Benchmark-group evaluation + +Run an entire upstream group (e.g. all 18 `atomic_seen` tasks with `split=target`): + +```bash +lerobot-eval \ + --policy.path=lerobot/smolvla_robocasa \ + --env.type=robocasa \ + --env.task=atomic_seen \ + --eval.batch_size=1 \ + --eval.n_episodes=20 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={"observation.images.robot0_agentview_left": "observation.images.camera1", "observation.images.robot0_eye_in_hand": "observation.images.camera2", "observation.images.robot0_agentview_right": "observation.images.camera3"}' +``` + +### Recommended evaluation episodes + +**20 episodes per task** for reproducible benchmarking. Matches the protocol used in published results. + +## Policy inputs and outputs + +**Observations** (raw RoboCasa camera names are preserved verbatim): + +- `observation.state` — 16-dim proprioceptive state (base position, base quaternion, relative end-effector position, relative end-effector quaternion, gripper qpos) +- `observation.images.robot0_agentview_left` — left agent view, 256×256 HWC uint8 +- `observation.images.robot0_eye_in_hand` — wrist camera view, 256×256 HWC uint8 +- `observation.images.robot0_agentview_right` — right agent view, 256×256 HWC uint8 + +**Actions:** + +- Continuous control in `Box(-1, 1, shape=(12,))` — base motion (4D) + control mode (1D) + end-effector position (3D) + end-effector rotation (3D) + gripper (1D). + +## Training + +### Single-task example + +A ready-to-use single-task dataset is on the Hub: +[`pepijn223/robocasa_CloseFridge`](https://huggingface.co/datasets/pepijn223/robocasa_CloseFridge). + +Fine-tune a SmolVLA base on `CloseFridge`: + +```bash +lerobot-train \ + --policy.type=smolvla \ + --policy.repo_id=${HF_USER}/smolvla_robocasa_CloseFridge \ + --policy.load_vlm_weights=true \ + --policy.push_to_hub=true \ + --dataset.repo_id=pepijn223/robocasa_CloseFridge \ + --env.type=robocasa \ + --env.task=CloseFridge \ + --output_dir=./outputs/smolvla_robocasa_CloseFridge \ + --steps=100000 \ + --batch_size=4 \ + --eval_freq=5000 \ + --eval.batch_size=1 \ + --eval.n_episodes=5 \ + --save_freq=10000 +``` + +Evaluate the resulting checkpoint: + +```bash +lerobot-eval \ + --policy.path=${HF_USER}/smolvla_robocasa_CloseFridge \ + --env.type=robocasa \ + --env.task=CloseFridge \ + --eval.batch_size=1 \ + --eval.n_episodes=20 +``` + +## Reproducing published results + +The released checkpoint [`lerobot/smolvla_robocasa`](https://huggingface.co/lerobot/smolvla_robocasa) is evaluated with the commands in the [Evaluation](#evaluation) section. CI runs a 10-atomic-task smoke eval (one episode each) on every PR touching the benchmark, picking fixture-centric tasks that don't require the objaverse asset pack. diff --git a/docs/source/robocerebra.mdx b/docs/source/robocerebra.mdx new file mode 100644 index 000000000..9776bd40f --- /dev/null +++ b/docs/source/robocerebra.mdx @@ -0,0 +1,99 @@ +# RoboCerebra + +[RoboCerebra](https://robocerebra-project.github.io/) is a long-horizon manipulation benchmark that evaluates **high-level reasoning, planning, and memory** in VLAs. Episodes chain multiple sub-goals with language-grounded intermediate instructions, built on top of LIBERO's simulator stack (MuJoCo + robosuite, Franka Panda 7-DOF). + +- Paper: [RoboCerebra: A Large-scale Benchmark for Long-horizon Robotic Manipulation Evaluation](https://arxiv.org/abs/2506.06677) +- Project website: [robocerebra-project.github.io](https://robocerebra-project.github.io/) +- Dataset: [`lerobot/robocerebra_unified`](https://huggingface.co/datasets/lerobot/robocerebra_unified) — LeRobot v3.0, 6,660 episodes / 571,116 frames at 20 fps, 1,728 language-grounded sub-tasks. +- Pretrained policy: [`lerobot/smolvla_robocerebra`](https://huggingface.co/lerobot/smolvla_robocerebra) + +## Available tasks + +RoboCerebra reuses LIBERO's simulator, so evaluation runs against the LIBERO `libero_10` long-horizon suite: + +| Suite | CLI name | Tasks | Description | +| --------- | ----------- | ----- | ------------------------------------------------------------- | +| LIBERO-10 | `libero_10` | 10 | Long-horizon kitchen/living room tasks chaining 3–6 sub-goals | + +Each RoboCerebra episode in the dataset is segmented into multiple sub-tasks with natural-language instructions, which the unified dataset exposes as independent supervision signals. + +## Installation + +RoboCerebra piggybacks on LIBERO, so the `libero` extra is all you need: + +```bash +pip install -e ".[libero]" +``` + + +RoboCerebra requires Linux (MuJoCo / robosuite). Set the rendering backend before training or evaluation: + +```bash +export MUJOCO_GL=egl # for headless servers (HPC, cloud) +``` + + + +## Evaluation + +RoboCerebra eval runs against LIBERO's `libero_10` suite with RoboCerebra's camera naming (`image` + `wrist_image`) and an extra empty-camera slot so a three-view-trained policy receives the expected input layout: + +```bash +lerobot-eval \ + --policy.path=lerobot/smolvla_robocerebra \ + --env.type=libero \ + --env.task=libero_10 \ + --env.fps=20 \ + --env.obs_type=pixels_agent_pos \ + --env.observation_height=256 \ + --env.observation_width=256 \ + '--env.camera_name_mapping={"agentview_image": "image", "robot0_eye_in_hand_image": "wrist_image"}' \ + --eval.batch_size=1 \ + --eval.n_episodes=10 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={"observation.images.image": "observation.images.camera1", "observation.images.wrist_image": "observation.images.camera2"}' \ + --policy.empty_cameras=1 +``` + +### Recommended evaluation episodes + +**10 episodes per task** across the `libero_10` suite (100 total) for reproducible benchmarking. Matches the protocol used in the RoboCerebra paper. + +## Policy inputs and outputs + +**Observations:** + +- `observation.state` — 8-dim proprioceptive state (7 joint positions + gripper) +- `observation.images.image` — third-person view, 256×256 HWC uint8 +- `observation.images.wrist_image` — wrist-mounted camera view, 256×256 HWC uint8 + +**Actions:** + +- Continuous control in `Box(-1, 1, shape=(7,))` — end-effector delta (6D) + gripper (1D) + +## Training + +The unified dataset at [`lerobot/robocerebra_unified`](https://huggingface.co/datasets/lerobot/robocerebra_unified) exposes two RGB streams and language-grounded sub-task annotations: + +| Feature | Shape | Description | +| -------------------------------- | ------------- | -------------------- | +| `observation.images.image` | (256, 256, 3) | Third-person view | +| `observation.images.wrist_image` | (256, 256, 3) | Wrist-mounted camera | +| `observation.state` | (8,) | Joint pos + gripper | +| `action` | (7,) | EEF delta + gripper | + +Fine-tune a SmolVLA base on it: + +```bash +lerobot-train \ + --policy.path=lerobot/smolvla_base \ + --dataset.repo_id=lerobot/robocerebra_unified \ + --env.type=libero \ + --env.task=libero_10 \ + --output_dir=outputs/smolvla_robocerebra +``` + +## Reproducing published results + +The released checkpoint [`lerobot/smolvla_robocerebra`](https://huggingface.co/lerobot/smolvla_robocerebra) was trained on `lerobot/robocerebra_unified` and evaluated with the command in the [Evaluation](#evaluation) section. CI runs the same command with `--eval.n_episodes=1` as a smoke test on every PR touching the benchmark. diff --git a/docs/source/robomme.mdx b/docs/source/robomme.mdx new file mode 100644 index 000000000..6613a3923 --- /dev/null +++ b/docs/source/robomme.mdx @@ -0,0 +1,130 @@ +# RoboMME + +[RoboMME](https://robomme.github.io) is a memory-augmented manipulation benchmark built on ManiSkill (SAPIEN). It evaluates a robot's ability to retain and use information across an episode — counting, object permanence, reference, and imitation. + +- **16 tasks** across 4 memory-skill suites +- **1,600 training demos** (100 per task, 50 val, 50 test) +- **Dataset**: [`lerobot/robomme`](https://huggingface.co/datasets/lerobot/robomme) — LeRobot v3.0, 768K frames at 10 fps +- **Simulator**: ManiSkill / SAPIEN, Panda arm, Linux only + +![RoboMME benchmark tasks overview](https://cdn-thumbnails.huggingface.co/social-thumbnails/papers/2603.04639/gradient.png) + +## Tasks + +| Suite | Tasks | +| --------------------------------- | ------------------------------------------------------------- | +| **Counting** (temporal memory) | BinFill, PickXtimes, SwingXtimes, StopCube | +| **Permanence** (spatial memory) | VideoUnmask, VideoUnmaskSwap, ButtonUnmask, ButtonUnmaskSwap | +| **Reference** (object memory) | PickHighlight, VideoRepick, VideoPlaceButton, VideoPlaceOrder | +| **Imitation** (procedural memory) | MoveCube, InsertPeg, PatternLock, RouteStick | + +## Installation + +> RoboMME requires **Linux** (ManiSkill/SAPIEN uses Vulkan rendering). Docker is recommended to isolate dependency conflicts. + +### Native (Linux) + +```bash +pip install --override <(printf 'gymnasium==0.29.1\nnumpy==1.26.4\n') \ + -e '.[smolvla,av-dep]' \ + 'robomme @ git+https://github.com/RoboMME/robomme_benchmark.git@main' +``` + +> **Dependency note**: `mani-skill` (pulled by `robomme`) pins `gymnasium==0.29.1` and `numpy<2.0.0`, which conflict with lerobot's base `numpy>=2.0.0`. That's why `robomme` is not a pyproject extra — use the override install above, or the Docker approach below to avoid conflicts entirely. + +### Docker (recommended) + +```bash +# Build base image first (from repo root) +docker build -f docker/Dockerfile.eval-base -t lerobot-eval-base . + +# Build RoboMME eval image (applies gymnasium + numpy pin overrides) +docker build -f docker/Dockerfile.benchmark.robomme -t lerobot-robomme . +``` + +The `docker/Dockerfile.benchmark.robomme` image overrides `gymnasium==0.29.1` and `numpy==1.26.4` after lerobot's install. Both versions are runtime-safe for lerobot's actual API usage. + +## Running Evaluation + +### Default (single task, single episode) + +```bash +lerobot-eval \ + --policy.path= \ + --env.type=robomme \ + --env.task=PickXtimes \ + --env.dataset_split=test \ + --env.task_ids=[0] \ + --eval.batch_size=1 \ + --eval.n_episodes=1 +``` + +### Multi-task evaluation + +Evaluate multiple tasks in one run by comma-separating task names. Use `task_ids` to control which episodes are evaluated per task. Recommended: 50 episodes per task for the test split. + +```bash +lerobot-eval \ + --policy.path= \ + --env.type=robomme \ + --env.task=PickXtimes,BinFill,StopCube,MoveCube,InsertPeg \ + --env.dataset_split=test \ + --env.task_ids=[0,1,2,3,4,5,6,7,8,9] \ + --eval.batch_size=1 \ + --eval.n_episodes=50 +``` + +### Key CLI options for `env.type=robomme` + +| Option | Default | Description | +| -------------------- | ------------- | -------------------------------------------------- | +| `env.task` | `PickXtimes` | Any of the 16 task names above (comma-separated) | +| `env.dataset_split` | `test` | `train`, `val`, or `test` | +| `env.action_space` | `joint_angle` | `joint_angle` (8-D) or `ee_pose` (7-D) | +| `env.episode_length` | `300` | Max steps per episode | +| `env.task_ids` | `null` | List of episode indices to evaluate (null = `[0]`) | + +## Dataset + +The dataset [`lerobot/robomme`](https://huggingface.co/datasets/lerobot/robomme) is in **LeRobot v3.0 format** and can be loaded directly: + +```python +from lerobot.datasets.lerobot_dataset import LeRobotDataset + +dataset = LeRobotDataset("lerobot/robomme") +``` + +### Dataset features + +| Feature | Shape | Description | +| ------------------ | ------------- | ------------------------------- | +| `image` | (256, 256, 3) | Front camera RGB | +| `wrist_image` | (256, 256, 3) | Wrist camera RGB | +| `actions` | (8,) | Joint angles + gripper | +| `state` | (8,) | Joint positions + gripper state | +| `simple_subgoal` | str | High-level language annotation | +| `grounded_subgoal` | str | Grounded language annotation | +| `episode_index` | int | Episode ID | +| `frame_index` | int | Frame within episode | + +### Feature key alignment (training) + +The env wrapper exposes `pixels/image` and `pixels/wrist_image` as observation keys. The `features_map` in `RoboMMEEnv` maps these to `observation.images.image` and `observation.images.wrist_image` for the policy. State is exposed as `agent_pos` and maps to `observation.state`. + +The dataset's `image` and `wrist_image` columns already align with the policy input keys, so no renaming is needed when fine-tuning. + +## Action Spaces + +| Type | Dim | Description | +| ------------- | --- | --------------------------------------------------------- | +| `joint_angle` | 8 | 7 joint angles + 1 gripper (−1 closed, +1 open, absolute) | +| `ee_pose` | 7 | xyz + roll/pitch/yaw + gripper | + +Set via `--env.action_space=joint_angle` (default) or `--env.action_space=ee_pose`. + +## Platform Notes + +- **Linux only**: ManiSkill requires SAPIEN/Vulkan. macOS and Windows are not supported. +- **GPU recommended**: Rendering is CPU-capable but slow; CUDA + Vulkan gives full speed. +- **gymnasium / numpy conflict**: See installation note above. Docker image handles this automatically. +- **ManiSkill fork**: `robomme` depends on a specific ManiSkill fork (`YinpeiDai/ManiSkill`), pulled in automatically via the `robomme` package. diff --git a/docs/source/robotwin.mdx b/docs/source/robotwin.mdx new file mode 100644 index 000000000..ad1db766f --- /dev/null +++ b/docs/source/robotwin.mdx @@ -0,0 +1,223 @@ +# RoboTwin 2.0 + +RoboTwin 2.0 is a **large-scale dual-arm manipulation benchmark** built on the SAPIEN physics engine. It provides a standardized evaluation protocol for bimanual robotic policies across 50 tasks (as of upstream `main`) with strong domain randomization (clutter, lighting, background, tabletop height, and language instructions). + +- Paper: [RoboTwin 2.0: A Scalable Data Generator and Benchmark with Strong Domain Randomization for Robust Bimanual Robotic Manipulation](https://arxiv.org/abs/2506.18088) +- GitHub: [RoboTwin-Platform/RoboTwin](https://github.com/RoboTwin-Platform/RoboTwin) +- Leaderboard: [robotwin-platform.github.io/leaderboard](https://robotwin-platform.github.io/leaderboard) +- Dataset: [lerobot/robotwin_unified](https://huggingface.co/datasets/lerobot/robotwin_unified) + +![RoboTwin 2.0 benchmark overview](https://www.aitntnews.com/pictures/2025/7/8/9a7f79cb-5ba9-11f0-8581-fa163e47d677.png) + +## Overview + +| Property | Value | +| ------------- | -------------------------------------------------------- | +| Tasks | 50 dual-arm manipulation tasks | +| Robot | Aloha-AgileX bimanual (14 DOF, 7 per arm) | +| Action space | 14-dim joint-space, continuous in `[-1, 1]` | +| Cameras | `head_camera`, `left_camera`, `right_camera` | +| Simulator | SAPIEN (not MuJoCo) | +| Eval protocol | 100 episodes/task, 50 demo_clean demonstrations | +| Eval settings | **Easy** (`demo_clean`) and **Hard** (`demo_randomized`) | + +## Available tasks + +RoboTwin 2.0 ships 50 dual-arm manipulation tasks in its upstream `envs/` directory. The canonical list is the `ROBOTWIN_TASKS` tuple in `src/lerobot/envs/robotwin.py`, mirrored verbatim from the upstream repo. Example tasks: + +| Task | CLI name | Category | +| ------------------------ | ------------------------ | ----------------- | +| Beat block with hammer | `beat_block_hammer` | Tool use | +| Click bell / alarm clock | `click_bell` | Precision press | +| Stack blocks (2 / 3) | `stack_blocks_two/three` | Stacking | +| Stack bowls (2 / 3) | `stack_bowls_two/three` | Stacking | +| Handover block / mic | `handover_block` | Bimanual coord. | +| Lift pot | `lift_pot` | Bimanual lift | +| Shake bottle | `shake_bottle` | Continuous motion | +| Turn switch | `turn_switch` | Articulated obj | +| Stamp seal | `stamp_seal` | Precision place | +| Scan object | `scan_object` | Mobile manip. | + +Pass a comma-separated list to `--env.task` to run multiple tasks in a single eval sweep. + + + `open_laptop` is currently broken upstream (its `check_success()` uses + `self.arm_tag`, which is only set inside the scripted-expert `play_once()` + path and therefore unavailable during normal policy eval). Avoid it until the + upstream bug is fixed, or patch the task to default `self.arm_tag = "left"` in + `load_actors()`. + + +## Dataset + +The RoboTwin 2.0 dataset is available in **LeRobot v3.0 format** on the Hugging Face Hub: + +``` +lerobot/robotwin_unified +``` + +It contains over 100,000 pre-collected trajectories across all 50 tasks (79.6 GB, Apache 2.0 license). No format conversion is needed — it is already in the correct LeRobot v3.0 schema with video observations and action labels. + +You can load it directly with the HF Datasets library: + +```python +from datasets import load_dataset + +ds = load_dataset("lerobot/robotwin_unified", split="train") +``` + +## Installation + +RoboTwin 2.0 requires **Linux** with an NVIDIA GPU (CUDA 12.1 recommended). Installation takes approximately 20 minutes. + +### 1. Create a conda environment + +```bash +conda create -n robotwin python=3.10 -y +conda activate robotwin +``` + +### 2. Install LeRobot + +```bash +git clone https://github.com/huggingface/lerobot.git +cd lerobot +pip install -e "." +``` + +### 3. Install RoboTwin 2.0 + +```bash +git clone https://github.com/RoboTwin-Platform/RoboTwin.git +cd RoboTwin +bash script/_install.sh +bash script/_download_assets.sh +``` + +The install script handles all Python dependencies including SAPIEN, CuRobo, mplib, and pytorch3d. + + +If the automated install fails, install manually: + +```bash +pip install -r requirements.txt +pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable" +cd envs && git clone https://github.com/NVlabs/curobo.git && cd curobo +pip install -e . --no-build-isolation +``` + +Then apply the required mplib fix: in `mplib/planner.py` line 807, remove `or collide` from the conditional. + + + +### 4. Add RoboTwin to PYTHONPATH + +The RoboTwin task modules must be importable by LeRobot. From within the `RoboTwin/` directory: + +```bash +export PYTHONPATH="${PYTHONPATH}:$(pwd)" +``` + +Add this to your shell profile to make it permanent. + +## Evaluation + +### Standard evaluation (recommended) + +Evaluate a policy on a single task with the official protocol (100 episodes): + +```bash +lerobot-eval \ + --policy.path="your-hf-policy-id" \ + --env.type=robotwin \ + --env.task=beat_block_hammer \ + --eval.batch_size=1 \ + --eval.n_episodes=100 +``` + +### Single-task quick check + +```bash +lerobot-eval \ + --policy.path="your-hf-policy-id" \ + --env.type=robotwin \ + --env.task=beat_block_hammer \ + --eval.batch_size=1 \ + --eval.n_episodes=5 +``` + +### Multi-task sweep + +Evaluate on several tasks in one run: + +```bash +lerobot-eval \ + --policy.path="your-hf-policy-id" \ + --env.type=robotwin \ + --env.task=beat_block_hammer,click_bell,handover_block,stack_blocks_two \ + --eval.batch_size=1 \ + --eval.n_episodes=100 +``` + +### Full benchmark (all 50 tasks) + +```bash +lerobot-eval \ + --policy.path="your-hf-policy-id" \ + --env.type=robotwin \ + --env.task=adjust_bottle,beat_block_hammer,blocks_ranking_rgb,blocks_ranking_size,click_alarmclock,click_bell,dump_bin_bigbin,grab_roller,handover_block,handover_mic,hanging_mug,lift_pot,move_can_pot,move_pillbottle_pad,move_playingcard_away,move_stapler_pad,open_microwave,pick_diverse_bottles,pick_dual_bottles,place_a2b_left,place_a2b_right,place_bread_basket,place_bread_skillet,place_burger_fries,place_can_basket,place_cans_plasticbox,place_container_plate,place_dual_shoes,place_empty_cup,place_fan,place_mouse_pad,place_object_basket,place_object_scale,place_object_stand,place_phone_stand,place_shoe,press_stapler,put_bottles_dustbin,put_object_cabinet,rotate_qrcode,scan_object,shake_bottle,shake_bottle_horizontally,stack_blocks_three,stack_blocks_two,stack_bowls_three,stack_bowls_two,stamp_seal,turn_switch \ + --eval.batch_size=1 \ + --eval.n_episodes=100 +``` + + + `open_laptop` is intentionally omitted above because of the upstream + `self.arm_tag` bug (see the **Available tasks** section). Re-add it once the + upstream fix lands. + + +## Camera configuration + +By default, all three cameras are included: + +| Camera key | Description | +| -------------- | ------------------------------ | +| `head_camera` | Torso-mounted overhead view | +| `left_camera` | Left arm wrist-mounted camera | +| `right_camera` | Right arm wrist-mounted camera | + +To use a subset of cameras, override `--env.camera_names`: + +```bash +lerobot-eval \ + --policy.path="your-hf-policy-id" \ + --env.type=robotwin \ + --env.task=beat_block_hammer \ + --env.camera_names="head_camera,left_camera" \ + --eval.batch_size=1 \ + --eval.n_episodes=10 +``` + +## Environment config reference + +Key parameters for `RoboTwinEnvConfig`: + +| Parameter | Default | Description | +| -------------------- | ---------------------------------------- | ---------------------------------- | +| `task` | `"beat_block_hammer"` | Comma-separated task name(s) | +| `fps` | `25` | Simulation FPS | +| `episode_length` | `300` | Max steps per episode | +| `obs_type` | `"pixels_agent_pos"` | `"pixels"` or `"pixels_agent_pos"` | +| `camera_names` | `"head_camera,left_camera,right_camera"` | Comma-separated active cameras | +| `observation_height` | `240` | Camera pixel height | +| `observation_width` | `320` | Camera pixel width | + +## Leaderboard submission + +Results can be submitted to the [RoboTwin 2.0 leaderboard](https://robotwin-platform.github.io/leaderboard). The official protocol requires: + +- Training on 50 `demo_clean` demonstrations per task +- Evaluating 100 episodes per task +- Reporting success rate separately for **Easy** (`demo_clean`) and **Hard** (`demo_randomized`) settings + +For submission instructions, refer to the [RoboTwin 2.0 documentation](https://robotwin-platform.github.io/doc/). diff --git a/docs/source/rtc.mdx b/docs/source/rtc.mdx index 9485d8b66..eadc34344 100644 --- a/docs/source/rtc.mdx +++ b/docs/source/rtc.mdx @@ -34,7 +34,7 @@ pip install -e ".[smolvla]" ### Using RTC with Pi0 -You can find a complete reference implementation in [eval_with_real_robot.py](examples/rtc/eval_with_real_robot.py). +You can use `lerobot-rollout --strategy.type=base --inference.type=rtc` for RTC deployment on real robots. The snippet below provides a simplified pseudo-example of how RTC operates with Pi0 in your pipeline: ```python @@ -137,8 +137,12 @@ The script generates a visualization of the denoising process, comparing standar ## Testing RTC with a Real Robot ```bash -python examples/rtc/eval_with_real_robot.py \ +lerobot-rollout \ + --strategy.type=base \ --policy.path=${HF_USERNAME}/policy_repo_id \ + --inference.type=rtc \ + --inference.rtc.execution_horizon=10 \ + --inference.rtc.max_guidance_weight=10.0 \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58FA0834591 \ --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ @@ -178,7 +182,7 @@ visualizer = RTCDebugVisualizer() # ... create plots ``` -See `examples/rtc/eval_dataset.py` for a complete example of visualization. +See `examples/rtc/eval_dataset.py` for a complete example of offline RTC visualization. ## References diff --git a/docs/source/unitree_g1.mdx b/docs/source/unitree_g1.mdx index 2e615085e..69965a561 100644 --- a/docs/source/unitree_g1.mdx +++ b/docs/source/unitree_g1.mdx @@ -274,7 +274,8 @@ python src/lerobot/scripts/lerobot_train.py \ Once trained, we recommend deploying policies using inference-time RTC: ```bash -python examples/rtc/eval_with_real_robot.py \ +lerobot-rollout \ + --strategy.type=base \ --policy.path=your-username/your-repo-id \ --policy.device=cuda \ --robot.type=unitree_g1 \ @@ -284,7 +285,7 @@ python examples/rtc/eval_with_real_robot.py \ --task="task_description" \ --duration=1000 \ --fps=30 \ - --rtc.enabled=true + --inference.type=rtc ``` --- diff --git a/docs/source/vlabench.mdx b/docs/source/vlabench.mdx new file mode 100644 index 000000000..da579d674 --- /dev/null +++ b/docs/source/vlabench.mdx @@ -0,0 +1,176 @@ +# VLABench + +[VLABench](https://github.com/OpenMOSS/VLABench) is a large-scale benchmark for **language-conditioned robotic manipulation with long-horizon reasoning**. The upstream suite covers 100 task categories across 2,000+ objects and evaluates six dimensions of robot intelligence: mesh & texture understanding, spatial reasoning, world-knowledge transfer, semantic instruction comprehension, physical-law understanding, and long-horizon planning. Built on MuJoCo / dm_control with a Franka Panda 7-DOF arm. LeRobot exposes **43 of these tasks** through `--env.task` (21 primitives + 22 composites, see [Available tasks](#available-tasks) below). + +- Paper: [VLABench: A Large-Scale Benchmark for Language-Conditioned Robotics Manipulation with Long-Horizon Reasoning](https://arxiv.org/abs/2412.18194) +- GitHub: [OpenMOSS/VLABench](https://github.com/OpenMOSS/VLABench) +- Project website: [vlabench.github.io](https://vlabench.github.io) +- Pretrained policy: [`lerobot/smolvla_vlabench`](https://huggingface.co/lerobot/smolvla_vlabench) + +VLABench benchmark overview + +## Available tasks + +VLABench ships two task suites covering **43 task categories** in LeRobot's `--env.task` surface: + +| Suite | CLI name | Tasks | Description | +| --------- | ----------- | ----- | ---------------------------------------------------------------- | +| Primitive | `primitive` | 21 | Single / few-skill combinations (select, insert, physics QA) | +| Composite | `composite` | 22 | Multi-step reasoning and long-horizon planning (cook, rearrange) | + +**Primitive tasks:** `select_fruit`, `select_toy`, `select_chemistry_tube`, `add_condiment`, `select_book`, `select_painting`, `select_drink`, `insert_flower`, `select_billiards`, `select_ingredient`, `select_mahjong`, `select_poker`, and physical-reasoning tasks (`density_qa`, `friction_qa`, `magnetism_qa`, `reflection_qa`, `simple_cuestick_usage`, `simple_seesaw_usage`, `sound_speed_qa`, `thermal_expansion_qa`, `weight_qa`). + +**Composite tasks:** `cluster_billiards`, `cluster_book`, `cluster_drink`, `cluster_toy`, `cook_dishes`, `cool_drink`, `find_unseen_object`, `get_coffee`, `hammer_nail`, `heat_food`, `make_juice`, `play_mahjong`, `play_math_game`, `play_poker`, `play_snooker`, `rearrange_book`, `rearrange_chemistry_tube`, `set_dining_table`, `set_study_table`, `store_food`, `take_chemistry_experiment`, `use_seesaw_complex`. + +`--env.task` accepts three forms: + +- a single task name (`select_fruit`) +- a comma-separated list (`select_fruit,heat_food`) +- a suite shortcut (`primitive`, `composite`, or `primitive,composite`) + +## Installation + +VLABench is **not on PyPI** — its only distribution is the [OpenMOSS/VLABench](https://github.com/OpenMOSS/VLABench) GitHub repo — so LeRobot does not expose a `vlabench` extra. Install it manually as an editable clone, alongside the MuJoCo / dm_control pins VLABench needs, then fetch the mesh assets: + +```bash +# After following the standard LeRobot installation instructions. + +git clone https://github.com/OpenMOSS/VLABench.git ~/VLABench +git clone https://github.com/motion-planning/rrt-algorithms.git ~/rrt-algorithms +pip install -e ~/VLABench -e ~/rrt-algorithms +pip install "mujoco==3.2.2" "dm-control==1.0.22" \ + open3d colorlog scikit-learn openai gdown + +python ~/VLABench/scripts/download_assets.py +``` + + +VLABench requires Linux (`sys_platform == 'linux'`) and Python 3.10+. Set the MuJoCo rendering backend before running: + +```bash +export MUJOCO_GL=egl # for headless servers (HPC, cloud) +``` + + + +## Evaluation + +All eval snippets below mirror the command CI runs (see `.github/workflows/benchmark_tests.yml`). The `--rename_map` argument maps VLABench's `image` / `second_image` / `wrist_image` camera keys onto the three-camera (`camera1` / `camera2` / `camera3`) input layout the released `smolvla_vlabench` policy was trained on. + +### Single-task evaluation (recommended for quick iteration) + +```bash +lerobot-eval \ + --policy.path=lerobot/smolvla_vlabench \ + --env.type=vlabench \ + --env.task=select_fruit \ + --eval.batch_size=1 \ + --eval.n_episodes=10 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={"observation.images.image": "observation.images.camera1", "observation.images.second_image": "observation.images.camera2", "observation.images.wrist_image": "observation.images.camera3"}' +``` + +### Multi-task evaluation + +Pass a comma-separated list of tasks: + +```bash +lerobot-eval \ + --policy.path=lerobot/smolvla_vlabench \ + --env.type=vlabench \ + --env.task=select_fruit,select_toy,add_condiment,heat_food \ + --eval.batch_size=1 \ + --eval.n_episodes=10 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + '--rename_map={"observation.images.image": "observation.images.camera1", "observation.images.second_image": "observation.images.camera2", "observation.images.wrist_image": "observation.images.camera3"}' +``` + +### Suite-wide evaluation + +Run an entire suite (all 21 primitives or all 22 composites): + +```bash +lerobot-eval \ + --policy.path=lerobot/smolvla_vlabench \ + --env.type=vlabench \ + --env.task=primitive \ + --eval.batch_size=1 \ + --eval.n_episodes=10 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + --env.max_parallel_tasks=1 \ + '--rename_map={"observation.images.image": "observation.images.camera1", "observation.images.second_image": "observation.images.camera2", "observation.images.wrist_image": "observation.images.camera3"}' +``` + +Or both suites: + +```bash +lerobot-eval \ + --policy.path=lerobot/smolvla_vlabench \ + --env.type=vlabench \ + --env.task=primitive,composite \ + --eval.batch_size=1 \ + --eval.n_episodes=10 \ + --eval.use_async_envs=false \ + --policy.device=cuda \ + --env.max_parallel_tasks=1 \ + '--rename_map={"observation.images.image": "observation.images.camera1", "observation.images.second_image": "observation.images.camera2", "observation.images.wrist_image": "observation.images.camera3"}' +``` + +### Recommended evaluation episodes + +**10 episodes per task** for reproducible benchmarking (210 total for the full primitive suite, 220 for composite). Matches the protocol in the VLABench paper. + +## Policy inputs and outputs + +**Observations:** + +- `observation.state` — 7-dim end-effector state (position xyz + Euler xyz + gripper) +- `observation.images.image` — front camera, 480×480 HWC uint8 +- `observation.images.second_image` — second camera, 480×480 HWC uint8 +- `observation.images.wrist_image` — wrist camera, 480×480 HWC uint8 + +**Actions:** + +- Continuous control in `Box(-1, 1, shape=(7,))` — 3D position + 3D Euler orientation + 1D gripper. + +## Training + +### Datasets + +Pre-collected VLABench datasets in LeRobot format on the Hub: + +- [`VLABench/vlabench_primitive_ft_lerobot_video`](https://huggingface.co/datasets/VLABench/vlabench_primitive_ft_lerobot_video) — 5,000 episodes, 128 tasks, 480×480 images. +- [`VLABench/vlabench_composite_ft_lerobot_video`](https://huggingface.co/datasets/VLABench/vlabench_composite_ft_lerobot_video) — 5,977 episodes, 167 tasks, 224×224 images. + +### Example training command + +Fine-tune a SmolVLA base on the primitive suite: + +```bash +lerobot-train \ + --policy.type=smolvla \ + --policy.repo_id=${HF_USER}/smolvla_vlabench_primitive \ + --policy.load_vlm_weights=true \ + --policy.push_to_hub=true \ + --dataset.repo_id=VLABench/vlabench_primitive_ft_lerobot_video \ + --env.type=vlabench \ + --env.task=select_fruit \ + --output_dir=./outputs/smolvla_vlabench_primitive \ + --steps=100000 \ + --batch_size=4 \ + --eval_freq=5000 \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --save_freq=10000 +``` + +## Reproducing published results + +The released checkpoint [`lerobot/smolvla_vlabench`](https://huggingface.co/lerobot/smolvla_vlabench) was trained on the primitive-suite dataset above and is evaluated with the [Single-task](#single-task-evaluation-recommended-for-quick-iteration) / [Suite-wide](#suite-wide-evaluation) commands. CI runs a 10-primitive-task smoke eval (one episode each) on every PR touching the benchmark. diff --git a/docs/source/xvla.mdx b/docs/source/xvla.mdx index a5ae8f1df..be686512b 100644 --- a/docs/source/xvla.mdx +++ b/docs/source/xvla.mdx @@ -220,7 +220,7 @@ REAL_DIM = 12 # Postprocessing: Trim 20D predictions to 12D for deployment ``` -See the [action_hub.py](/home/jade_choghari/robot/lerobot/src/lerobot/policies/xvla/action_hub.py) implementation for details. +See the [action_hub.py](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/xvla/action_hub.py) implementation for details. #### Auto Action Mode (Recommended) @@ -519,9 +519,9 @@ If you use X-VLA in your research, please cite: - [X-VLA Paper](https://arxiv.org/pdf/2510.10274) - [LeRobot Documentation](https://github.com/huggingface/lerobot) -- [Action Registry Implementation](https://github.com/huggingface/lerobot/src/lerobot/policies/xvla/action_hub.py) -- [Processor Implementation](https://github.com/huggingface/lerobot/src/lerobot/policies/xvla/processor_xvla.py) -- [Model Configuration](https://github.com/huggingface/lerobot/src/lerobot/policies/xvla/configuration_xvla.py) +- [Action Registry Implementation](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/xvla/action_hub.py) +- [Processor Implementation](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/xvla/processor_xvla.py) +- [Model Configuration](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/xvla/configuration_xvla.py) ## Contributing diff --git a/examples/hil/hil_data_collection.py b/examples/hil/hil_data_collection.py deleted file mode 100644 index 09a36dbe1..000000000 --- a/examples/hil/hil_data_collection.py +++ /dev/null @@ -1,1184 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2025 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Human-in-the-Loop (HIL) Data Collection with optional Real-Time Chunking (RTC). - -Implements the RaC paradigm (https://arxiv.org/abs/2509.07953) for LeRobot. By default uses synchronous -inference (best for fast models like ACT / Diffusion Policy). Set --rtc.enabled=true for -asynchronous background inference (recommended for large models like Pi0 / Pi0.5 / SmolVLA). - -The workflow: -1. Policy runs autonomously -2. Press SPACE to pause - robot holds position -3. Press 'c' to take control - human provides RECOVERY + CORRECTION -4. Press 'p' to hand control back to policy and continue recording -5. Press → to end episode (save and continue to next) -6. Reset, then do next rollout - -Keyboard Controls: - SPACE - Pause policy (robot holds position, no recording) - c - Take control (start correction, recording resumes) - p - Resume policy after pause/correction (recording continues) - → - End episode (save and continue to next) - ← - Re-record episode - ESC - Stop recording and push dataset to hub - -Usage: - # Standard synchronous inference (ACT, Diffusion Policy) - python examples/hil/hil_data_collection.py \ - --robot.type=bi_openarm_follower \ - --teleop.type=openarm_mini \ - --policy.path=path/to/pretrained_model \ - --dataset.repo_id=user/hil-dataset \ - --dataset.single_task="Fold the T-shirt properly" \ - --dataset.fps=30 \ - --interpolation_multiplier=2 - - # With RTC for large models (Pi0, Pi0.5, SmolVLA) - python examples/hil/hil_data_collection.py \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --rtc.max_guidance_weight=5.0 \ - --rtc.prefix_attention_schedule=LINEAR \ - --robot.type=bi_openarm_follower \ - --teleop.type=openarm_mini \ - --policy.path=path/to/pretrained_model \ - --dataset.repo_id=user/hil-dataset \ - --dataset.single_task="Fold the T-shirt properly" \ - --dataset.fps=30 \ - --interpolation_multiplier=3 - - # RTC with bi_openarm_follower + OpenArm Mini teleop and pi0.5 policy - python examples/hil/hil_data_collection.py \ - --policy.path=lerobot-data-collection/folding_final \ - --robot.type=bi_openarm_follower \ - --robot.cameras='{left_wrist: {type: opencv, index_or_path: "/dev/video4", width: 1280, height: 720, fps: 30}, base: {type: opencv, index_or_path: "/dev/video2", width: 640, height: 480, fps: 30}, right_wrist: {type: opencv, index_or_path: "/dev/video0", width: 1280, height: 720, fps: 30}}' \ - --robot.left_arm_config.port=can0 \ - --robot.left_arm_config.side=left \ - --robot.left_arm_config.can_interface=socketcan \ - --robot.left_arm_config.disable_torque_on_disconnect=true \ - --robot.left_arm_config.max_relative_target=8.0 \ - --robot.right_arm_config.port=can1 \ - --robot.right_arm_config.side=right \ - --robot.right_arm_config.can_interface=socketcan \ - --robot.right_arm_config.disable_torque_on_disconnect=true \ - --robot.right_arm_config.max_relative_target=8.0 \ - --teleop.type=openarm_mini \ - --teleop.port_left=/dev/ttyACM1 \ - --teleop.port_right=/dev/ttyACM0 \ - --dataset.repo_id=lerobot-data-collection/hil_folding \ - --dataset.single_task="Fold the T-shirt properly" \ - --dataset.fps=30 \ - --dataset.num_episodes=50 \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --rtc.max_guidance_weight=5.0 \ - --rtc.prefix_attention_schedule=LINEAR \ - --interpolation_multiplier=3 \ - --calibrate=true \ - --device=cuda -""" - -import logging -import math -import time -from dataclasses import dataclass, field -from pprint import pformat -from threading import Event, Lock, Thread -from typing import Any - -import torch -from hil_utils import ( - HILDatasetConfig, - init_keyboard_listener, - make_identity_processors, - print_controls, - reset_loop, - teleop_disable_torque, - teleop_smooth_move_to, -) - -from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401 -from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401 -from lerobot.common.control_utils import is_headless, predict_action -from lerobot.configs import PreTrainedConfig, parser -from lerobot.datasets import ( - LeRobotDataset, - VideoEncodingManager, - aggregate_pipeline_dataset_features, - create_initial_features, - safe_stop_image_writer, -) -from lerobot.policies import PreTrainedPolicy, get_policy_class, make_policy, make_pre_post_processors -from lerobot.policies.rtc import ActionInterpolator, ActionQueue, LatencyTracker, RTCConfig -from lerobot.policies.utils import make_robot_action -from lerobot.processor import ( - NormalizerProcessorStep, - PolicyProcessorPipeline, - RelativeActionsProcessorStep, - TransitionKey, - create_transition, - rename_stats, - to_relative_actions, -) -from lerobot.robots import Robot, RobotConfig, make_robot_from_config -from lerobot.robots.bi_openarm_follower import BiOpenArmFollowerConfig -from lerobot.robots.so_follower import SOFollowerRobotConfig # noqa: F401 -from lerobot.teleoperators import Teleoperator, TeleoperatorConfig, make_teleoperator_from_config -from lerobot.teleoperators.openarm_mini import OpenArmMiniConfig # noqa: F401 -from lerobot.teleoperators.so_leader import SOLeaderTeleopConfig # noqa: F401 -from lerobot.utils import get_safe_torch_device -from lerobot.utils.constants import ACTION, OBS_STATE, OBS_STR -from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts, hw_to_dataset_features -from lerobot.utils.robot_utils import precise_sleep -from lerobot.utils.utils import init_logging, log_say -from lerobot.utils.visualization_utils import init_rerun, log_rerun_data - -logger = logging.getLogger(__name__) - - -# RTC helpers - - -class ThreadSafeRobot: - """Thread-safe wrapper for robot operations (used with RTC background thread).""" - - def __init__(self, robot: Robot): - self._robot = robot - self._lock = Lock() - - def get_observation(self) -> dict[str, Any]: - with self._lock: - return self._robot.get_observation() - - def send_action(self, action: dict) -> None: - with self._lock: - self._robot.send_action(action) - - @property - def observation_features(self) -> dict: - return self._robot.observation_features - - @property - def action_features(self) -> dict: - return self._robot.action_features - - @property - def name(self) -> str: - return self._robot.name - - @property - def robot_type(self) -> str: - return self._robot.robot_type - - @property - def cameras(self): - return getattr(self._robot, "cameras", {}) - - -def _set_openarm_max_relative_target_if_missing( - robot_cfg: RobotConfig, max_relative_target: float = 8.0 -) -> None: - """Set a safe default max_relative_target for OpenArm followers when not provided.""" - if isinstance(robot_cfg, BiOpenArmFollowerConfig): - if robot_cfg.left_arm_config.max_relative_target is None: - robot_cfg.left_arm_config.max_relative_target = max_relative_target - if robot_cfg.right_arm_config.max_relative_target is None: - robot_cfg.right_arm_config.max_relative_target = max_relative_target - - -def _reanchor_relative_rtc_prefix( - prev_actions_absolute: torch.Tensor, - current_state: torch.Tensor, - relative_step: RelativeActionsProcessorStep | None, - normalizer_step: NormalizerProcessorStep | None, - policy_device: torch.device | str, -) -> torch.Tensor: - """Convert absolute leftovers into model space for relative-action RTC policies.""" - if relative_step is None: - return prev_actions_absolute.to(policy_device) - - state = current_state.detach().cpu() - if state.dim() == 1: - state = state.unsqueeze(0) - - action_cpu = prev_actions_absolute.detach().cpu() - mask = relative_step._build_mask(action_cpu.shape[-1]) - relative_actions = to_relative_actions(action_cpu, state, mask) - - transition = create_transition(action=relative_actions) - if normalizer_step is not None: - transition = normalizer_step(transition) - - return transition[TransitionKey.ACTION].to(policy_device) - - -def _normalize_prev_actions_length(prev_actions: torch.Tensor, target_steps: int) -> torch.Tensor: - """Pad/truncate RTC prefix actions to a fixed length for stable compiled inference.""" - if prev_actions.ndim != 2: - raise ValueError(f"Expected prev_actions to be 2D [T, A], got shape={tuple(prev_actions.shape)}") - - steps, action_dim = prev_actions.shape - if steps == target_steps: - return prev_actions - if steps > target_steps: - return prev_actions[:target_steps] - - padded = torch.zeros((target_steps, action_dim), dtype=prev_actions.dtype, device=prev_actions.device) - padded[:steps] = prev_actions - return padded - - -def _resolve_action_key_order(cfg, dataset_action_names: list[str]) -> list[str]: - """Choose action name ordering used to map policy tensor outputs to robot action dict.""" - policy_action_names = getattr(cfg.policy, "action_feature_names", None) - if not policy_action_names: - return dataset_action_names - - policy_action_names = list(policy_action_names) - if len(policy_action_names) != len(dataset_action_names): - logger.warning( - "[RTC] policy.action_feature_names length (%d) != dataset action dim (%d); " - "falling back to dataset order", - len(policy_action_names), - len(dataset_action_names), - ) - return dataset_action_names - - if set(dataset_action_names) != set(policy_action_names): - logger.warning( - "[RTC] policy.action_feature_names keys do not match dataset action keys; " - "falling back to dataset order" - ) - return dataset_action_names - - return policy_action_names - - -def _resolve_state_joint_order( - policy_action_names: list[str] | None, - available_joint_names: list[str], -) -> list[str]: - """Resolve joint-state ordering used to build observation.state.""" - if not policy_action_names: - return available_joint_names - - policy_action_names = list(policy_action_names) - available_set = set(available_joint_names) - policy_set = set(policy_action_names) - - if len(policy_action_names) != len(available_joint_names) or policy_set != available_set: - logger.warning( - "policy.action_feature_names does not match available state joints; " - "falling back to robot observation order" - ) - return available_joint_names - - logger.info("Using policy.action_feature_names order for observation.state mapping") - return policy_action_names - - -def _start_pedal_listener(events: dict): - """Start foot pedal listener thread if evdev is available. - - Pedal input is restricted to HIL control handoff only: - policy -> pause -> takeover -> resume policy. - Episode save/advance remains keyboard-only (right arrow). - """ - import threading - - try: - from evdev import InputDevice, categorize, ecodes - except ImportError: - logging.warning("[Pedal] evdev not installed - pedal support disabled") - return - - pedal_device = "/dev/input/by-id/usb-PCsensor_FootSwitch-event-kbd" - key_left = "KEY_A" - key_right = "KEY_C" - - def pedal_reader(): - try: - dev = InputDevice(pedal_device) - logger.info(f"[Pedal] Connected: {dev.name}") - - for ev in dev.read_loop(): - if ev.type != ecodes.EV_KEY: - continue - - key = categorize(ev) - code = key.keycode - if isinstance(code, (list, tuple)): - code = code[0] - - if key.keystate != 1: - continue - - if events["in_reset"]: - if code in [key_left, key_right]: - events["start_next_episode"] = True - else: - if code not in [key_left, key_right]: - continue - - if events["correction_active"]: - events["resume_policy"] = True - elif events["policy_paused"]: - events["start_next_episode"] = True - else: - events["policy_paused"] = True - - except FileNotFoundError: - logging.info(f"[Pedal] Device not found: {pedal_device}") - except PermissionError: - logging.warning(f"[Pedal] Permission denied for {pedal_device}") - except Exception as e: - logging.warning(f"[Pedal] Error: {e}") - - thread = threading.Thread(target=pedal_reader, daemon=True) - thread.start() - - -def _rtc_inference_thread( - policy: PreTrainedPolicy, - obs_holder: dict, - obs_lock: Lock, - hw_features: dict, - preprocessor: PolicyProcessorPipeline, - postprocessor: PolicyProcessorPipeline, - queue_holder: dict, - shutdown_event: Event, - policy_active: Event, - compile_warmup_done: Event, - cfg, -): - """Background thread for RTC action chunk generation.""" - latency_tracker = LatencyTracker() - time_per_chunk = 1.0 / cfg.dataset.fps - threshold = 30 - policy_device = policy.config.device - stats_window_start = time.perf_counter() - policy_inference_count = 0 - latency_sum_s = 0.0 - inference_count = 0 - warmup_required = max(1, int(cfg.compile_warmup_inferences)) if cfg.use_torch_compile else 0 - - relative_step = next( - ( - step - for step in preprocessor.steps - if isinstance(step, RelativeActionsProcessorStep) and step.enabled - ), - None, - ) - normalizer_step = next( - (step for step in preprocessor.steps if isinstance(step, NormalizerProcessorStep)), - None, - ) - if relative_step is not None: - if relative_step.action_names is None: - cfg_action_names = getattr(cfg.policy, "action_feature_names", None) - if cfg_action_names: - relative_step.action_names = list(cfg_action_names) - else: - fallback_action_names = obs_holder.get("action_feature_names") - if fallback_action_names: - relative_step.action_names = list(fallback_action_names) - logger.info("[RTC] Relative actions enabled: re-anchoring RTC prefix to current state") - - while not shutdown_event.is_set(): - if not policy_active.is_set(): - time.sleep(0.01) - continue - - queue = queue_holder.get("queue") - with obs_lock: - obs = obs_holder.get("obs") - if queue is None or obs is None: - time.sleep(0.01) - continue - - if queue.qsize() <= threshold: - try: - current_time = time.perf_counter() - idx_before = queue.get_action_index() - prev_actions = queue.get_left_over() - - latency = latency_tracker.max() - delay = math.ceil(latency / time_per_chunk) if latency else 0 - - obs_batch = build_dataset_frame(hw_features, obs, prefix="observation") - for name in obs_batch: - obs_batch[name] = torch.from_numpy(obs_batch[name]) - if "image" in name: - obs_batch[name] = obs_batch[name].float() / 255 - obs_batch[name] = obs_batch[name].permute(2, 0, 1).contiguous() - obs_batch[name] = obs_batch[name].unsqueeze(0).to(policy_device) - - obs_batch["task"] = [cfg.dataset.single_task] - obs_batch["robot_type"] = obs_holder.get("robot_type", "unknown") - - preprocessed = preprocessor(obs_batch) - - if prev_actions is not None and relative_step is not None and OBS_STATE in obs_batch: - prev_actions_absolute = queue.get_processed_left_over() - if prev_actions_absolute is not None and prev_actions_absolute.numel() > 0: - prev_actions = _reanchor_relative_rtc_prefix( - prev_actions_absolute=prev_actions_absolute, - current_state=obs_batch[OBS_STATE], - relative_step=relative_step, - normalizer_step=normalizer_step, - policy_device=policy_device, - ) - - if prev_actions is not None: - prev_actions = _normalize_prev_actions_length( - prev_actions, target_steps=cfg.rtc.execution_horizon - ) - - actions = policy.predict_action_chunk( - preprocessed, inference_delay=delay, prev_chunk_left_over=prev_actions - ) - - original = actions.squeeze(0).clone() - processed = postprocessor(actions).squeeze(0) - new_latency = time.perf_counter() - current_time - new_delay = math.ceil(new_latency / time_per_chunk) - inference_count += 1 - is_warmup_inference = cfg.use_torch_compile and inference_count <= warmup_required - if is_warmup_inference: - latency_tracker.reset() - else: - latency_tracker.add(new_latency) - queue.merge(original, processed, new_delay, idx_before) - policy_inference_count += 1 - latency_sum_s += new_latency - if ( - is_warmup_inference - and inference_count >= warmup_required - and not compile_warmup_done.is_set() - ): - compile_warmup_done.set() - logger.info( - "[RTC] Compile warmup complete (%d/%d inferences)", - inference_count, - warmup_required, - ) - logger.debug("[RTC] Inference latency=%.2fs, queue=%d", new_latency, queue.qsize()) - except Exception as e: - logger.error("[RTC] Error: %s", e) - time.sleep(0.5) - else: - time.sleep(0.01) - - now = time.perf_counter() - if cfg.log_hz and (window_elapsed := now - stats_window_start) >= cfg.hz_log_interval_s: - policy_hz = policy_inference_count / window_elapsed - avg_latency_ms = ( - (latency_sum_s / policy_inference_count * 1000.0) if policy_inference_count else 0.0 - ) - logger.info( - "[HIL RTC rates] policy=%.1f Hz | avg_inference=%.1f ms | queue=%d", - policy_hz, - avg_latency_ms, - queue.qsize(), - ) - stats_window_start = now - policy_inference_count = 0 - latency_sum_s = 0.0 - - -# Config - - -@dataclass -class HILConfig: - robot: RobotConfig - teleop: TeleoperatorConfig - dataset: HILDatasetConfig - policy: PreTrainedConfig | None = None - rtc: RTCConfig = field(default_factory=RTCConfig) - interpolation_multiplier: int = 2 - record_interpolated_actions: bool = False - display_data: bool = True - play_sounds: bool = True - resume: bool = False - device: str = "cuda" - use_torch_compile: bool = False - compile_warmup_inferences: int = 2 - calibrate: bool = False - log_hz: bool = True - hz_log_interval_s: float = 2.0 - - def __post_init__(self): - policy_path = parser.get_path_arg("policy") - if policy_path: - cli_overrides = parser.get_cli_overrides("policy") - self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.pretrained_path = policy_path - if self.policy is None: - raise ValueError("policy.path is required") - - @classmethod - def __get_path_fields__(cls) -> list[str]: - return ["policy"] - - -# Rollout loops - - -@safe_stop_image_writer -def _rollout_sync( - robot: Robot, - teleop: Teleoperator, - policy: PreTrainedPolicy, - preprocessor: PolicyProcessorPipeline, - postprocessor: PolicyProcessorPipeline, - dataset: LeRobotDataset, - events: dict, - cfg: HILConfig, -): - """Rollout loop with standard synchronous inference.""" - fps = cfg.dataset.fps - device = get_safe_torch_device(cfg.device) - stream_online = bool(cfg.dataset.streaming_encoding) - record_stride = 1 if cfg.record_interpolated_actions else max(1, cfg.interpolation_multiplier) - - policy.reset() - preprocessor.reset() - postprocessor.reset() - - frame_buffer: list[dict] = [] - teleop_disable_torque(teleop) - - was_paused = False - waiting_for_takeover = False - last_action: dict[str, Any] | None = None - robot_action: dict[str, Any] = {} - action_keys = list(dataset.features[ACTION]["names"]) - obs_state_names = list(dataset.features[f"{OBS_STR}.state"]["names"]) - obs_image_names = [ - key.removeprefix(f"{OBS_STR}.images.") - for key in dataset.features - if key.startswith(f"{OBS_STR}.images.") - ] - - interpolator = ActionInterpolator(multiplier=cfg.interpolation_multiplier) - control_interval = interpolator.get_control_interval(fps) - - timestamp = 0.0 - record_tick = 0 - start_t = time.perf_counter() - stats_window_start = start_t - policy_inference_count = 0 - robot_command_count = 0 - - while timestamp < cfg.dataset.episode_time_s: - loop_start = time.perf_counter() - - if events["exit_early"]: - events["exit_early"] = False - events["policy_paused"] = False - events["correction_active"] = False - events["resume_policy"] = False - break - - if events["resume_policy"] and ( - events["policy_paused"] or events["correction_active"] or waiting_for_takeover - ): - events["resume_policy"] = False - events["start_next_episode"] = False - events["policy_paused"] = False - events["correction_active"] = False - waiting_for_takeover = False - was_paused = False - last_action = None - interpolator.reset() - policy.reset() - preprocessor.reset() - postprocessor.reset() - - if events["policy_paused"] and not was_paused: - obs = robot.get_observation() - robot_pos = { - k: v for k, v in obs.items() if k.endswith(".pos") and k in robot.observation_features - } - teleop_smooth_move_to(teleop, robot_pos, duration_s=2.0, fps=50) - events["start_next_episode"] = False - waiting_for_takeover = True - was_paused = True - interpolator.reset() - - if waiting_for_takeover and events["start_next_episode"]: - teleop_disable_torque(teleop) - events["start_next_episode"] = False - events["correction_active"] = True - waiting_for_takeover = False - - obs = robot.get_observation() - obs_filtered = {k: obs[k] for k in obs_state_names if k in obs} - obs_filtered.update({k: obs[k] for k in obs_image_names if k in obs}) - obs_frame = build_dataset_frame(dataset.features, obs_filtered, prefix=OBS_STR) - - if events["correction_active"]: - robot_action = teleop.get_action() - robot.send_action(robot_action) - robot_command_count += 1 - action_frame = build_dataset_frame(dataset.features, robot_action, prefix=ACTION) - if record_tick % record_stride == 0: - frame = {**obs_frame, **action_frame, "task": cfg.dataset.single_task} - if stream_online: - dataset.add_frame(frame) - else: - frame_buffer.append(frame) - record_tick += 1 - - elif waiting_for_takeover or events["policy_paused"]: - if last_action: - robot.send_action(last_action) - robot_command_count += 1 - - else: - if interpolator.needs_new_action(): - action_values = predict_action( - observation=obs_frame, - policy=policy, - device=device, - preprocessor=preprocessor, - postprocessor=postprocessor, - use_amp=policy.config.use_amp, - task=cfg.dataset.single_task, - robot_type=robot.robot_type, - ) - policy_inference_count += 1 - robot_action = make_robot_action(action_values, dataset.features) - action_tensor = torch.tensor([robot_action[k] for k in action_keys]) - interpolator.add(action_tensor) - - interp_action = interpolator.get() - if interp_action is not None: - robot_action = {k: interp_action[i].item() for i, k in enumerate(action_keys)} - robot.send_action(robot_action) - robot_command_count += 1 - last_action = robot_action - action_frame = build_dataset_frame(dataset.features, robot_action, prefix=ACTION) - if record_tick % record_stride == 0: - frame = {**obs_frame, **action_frame, "task": cfg.dataset.single_task} - if stream_online: - dataset.add_frame(frame) - else: - frame_buffer.append(frame) - record_tick += 1 - - if cfg.display_data and robot_action: - log_rerun_data(observation=obs_filtered, action=robot_action) - - dt = time.perf_counter() - loop_start - if (sleep_time := control_interval - dt) > 0: - precise_sleep(sleep_time) - now = time.perf_counter() - timestamp = now - start_t - - if cfg.log_hz and (window_elapsed := now - stats_window_start) >= cfg.hz_log_interval_s: - policy_hz = policy_inference_count / window_elapsed - robot_hz = robot_command_count / window_elapsed - logger.info( - "[HIL rates] policy=%.1f Hz (target=%.1f) | robot=%.1f Hz (target=%.1f)", - policy_hz, - fps, - robot_hz, - fps * cfg.interpolation_multiplier, - ) - stats_window_start = now - policy_inference_count = 0 - robot_command_count = 0 - - teleop_disable_torque(teleop) - - if not stream_online: - for frame in frame_buffer: - dataset.add_frame(frame) - - -@safe_stop_image_writer -def _rollout_rtc( - robot, - teleop: Teleoperator, - policy: PreTrainedPolicy, - preprocessor: PolicyProcessorPipeline, - postprocessor: PolicyProcessorPipeline, - dataset: LeRobotDataset, - events: dict, - cfg: HILConfig, - queue_holder: dict, - obs_holder: dict, - obs_lock: Lock, - policy_active: Event, - compile_warmup_done: Event, - hw_features: dict, -): - """Rollout loop with RTC for asynchronous inference.""" - fps = cfg.dataset.fps - stream_online = bool(cfg.dataset.streaming_encoding) - record_stride = 1 if cfg.record_interpolated_actions else max(1, cfg.interpolation_multiplier) - - policy.reset() - preprocessor.reset() - postprocessor.reset() - - frame_buffer: list[dict] = [] - teleop_disable_torque(teleop) - - was_paused = False - waiting_for_takeover = False - last_action: dict[str, Any] | None = None - dataset_action_keys = list(dataset.features[ACTION]["names"]) - action_keys = _resolve_action_key_order(cfg, dataset_action_keys) - if action_keys != dataset_action_keys: - logger.info("[RTC] Using policy.action_feature_names order for action tensor mapping") - else: - logger.info("[RTC] Using dataset action feature order for action tensor mapping") - obs_state_names = list(dataset.features[f"{OBS_STR}.state"]["names"]) - obs_image_names = [ - key.removeprefix(f"{OBS_STR}.images.") - for key in dataset.features - if key.startswith(f"{OBS_STR}.images.") - ] - - interpolator = ActionInterpolator(multiplier=cfg.interpolation_multiplier) - control_interval = interpolator.get_control_interval(fps) - - robot_action: dict[str, Any] = {} - timestamp = 0.0 - start_t = time.perf_counter() - stats_window_start = start_t - robot_command_count = 0 - record_tick = 0 - obs_poll_interval = 1.0 / fps - last_obs_poll_t = 0.0 - obs_filtered: dict[str, Any] = {} - obs_frame: dict[str, Any] = {} - warmup_wait_logged = False - warmup_queue_flushed = False - - while timestamp < cfg.dataset.episode_time_s: - loop_start = time.perf_counter() - - if events["exit_early"]: - events["exit_early"] = False - events["policy_paused"] = False - events["correction_active"] = False - events["resume_policy"] = False - break - - if events["resume_policy"] and ( - events["policy_paused"] or events["correction_active"] or waiting_for_takeover - ): - events["resume_policy"] = False - events["start_next_episode"] = False - events["policy_paused"] = False - events["correction_active"] = False - waiting_for_takeover = False - was_paused = False - last_action = None - interpolator.reset() - queue_holder["queue"] = ActionQueue(cfg.rtc) - policy_active.clear() - policy.reset() - preprocessor.reset() - postprocessor.reset() - - if events["policy_paused"] and not was_paused: - policy_active.clear() - obs = robot.get_observation() - robot_pos = { - k: v for k, v in obs.items() if k.endswith(".pos") and k in robot.observation_features - } - teleop_smooth_move_to(teleop, robot_pos, duration_s=2.0, fps=50) - events["start_next_episode"] = False - waiting_for_takeover = True - was_paused = True - interpolator.reset() - - if waiting_for_takeover and events["start_next_episode"]: - teleop_disable_torque(teleop) - events["start_next_episode"] = False - events["correction_active"] = True - waiting_for_takeover = False - queue_holder["queue"] = ActionQueue(cfg.rtc) - - now_for_obs = time.perf_counter() - should_poll_obs = ( - not obs_filtered - or (now_for_obs - last_obs_poll_t) >= obs_poll_interval - or events["correction_active"] - or waiting_for_takeover - or events["policy_paused"] - ) - if should_poll_obs: - obs = robot.get_observation() - obs_filtered = {k: obs[k] for k in obs_state_names if k in obs} - obs_filtered.update({k: obs[k] for k in obs_image_names if k in obs}) - obs_frame = build_dataset_frame(dataset.features, obs_filtered, prefix=OBS_STR) - with obs_lock: - obs_holder["obs"] = obs_filtered - last_obs_poll_t = now_for_obs - - if events["correction_active"]: - robot_action = teleop.get_action() - robot.send_action(robot_action) - robot_command_count += 1 - action_frame = build_dataset_frame(dataset.features, robot_action, prefix=ACTION) - if record_tick % record_stride == 0: - frame = {**obs_frame, **action_frame, "task": cfg.dataset.single_task} - if stream_online: - dataset.add_frame(frame) - else: - frame_buffer.append(frame) - record_tick += 1 - - elif waiting_for_takeover or events["policy_paused"]: - if last_action: - robot.send_action(last_action) - robot_command_count += 1 - - else: - if not policy_active.is_set(): - policy_active.set() - - if cfg.use_torch_compile and not compile_warmup_done.is_set(): - if not warmup_wait_logged: - logger.info( - "[RTC] Waiting for compile warmup (%d inferences) before policy rollout", - max(1, int(cfg.compile_warmup_inferences)), - ) - warmup_wait_logged = True - else: - if cfg.use_torch_compile and not warmup_queue_flushed: - queue_holder["queue"] = ActionQueue(cfg.rtc) - interpolator.reset() - warmup_queue_flushed = True - logger.info("[RTC] Warmup queue cleared; starting live policy rollout") - - queue = queue_holder["queue"] - - if interpolator.needs_new_action(): - new_action = queue.get() if queue else None - if new_action is not None: - interpolator.add(new_action.cpu()) - - action_tensor = interpolator.get() - if action_tensor is not None: - robot_action = { - k: action_tensor[i].item() - for i, k in enumerate(action_keys) - if i < len(action_tensor) - } - robot.send_action(robot_action) - robot_command_count += 1 - last_action = robot_action - action_frame = build_dataset_frame(dataset.features, robot_action, prefix=ACTION) - if record_tick % record_stride == 0: - frame = {**obs_frame, **action_frame, "task": cfg.dataset.single_task} - if stream_online: - dataset.add_frame(frame) - else: - frame_buffer.append(frame) - record_tick += 1 - - dt = time.perf_counter() - loop_start - if (sleep_time := control_interval - dt) > 0: - precise_sleep(sleep_time) - now = time.perf_counter() - timestamp = now - start_t - - if cfg.log_hz and (window_elapsed := now - stats_window_start) >= cfg.hz_log_interval_s: - robot_hz = robot_command_count / window_elapsed - logger.info( - "[HIL RTC rates] robot=%.1f Hz (target=%.1f)", - robot_hz, - fps * cfg.interpolation_multiplier, - ) - stats_window_start = now - robot_command_count = 0 - - policy_active.clear() - teleop_disable_torque(teleop) - - if not stream_online: - for frame in frame_buffer: - dataset.add_frame(frame) - - -# Main collection function - - -@parser.wrap() -def hil_collect(cfg: HILConfig) -> LeRobotDataset: - """Main HIL data collection function (supports both sync and RTC modes).""" - init_logging() - logger.info(pformat(cfg.__dict__)) - - use_rtc = cfg.rtc.enabled - - if use_rtc: - _set_openarm_max_relative_target_if_missing(cfg.robot, max_relative_target=8.0) - - if cfg.display_data: - init_rerun(session_name="hil_collection") - - robot_raw = make_robot_from_config(cfg.robot) - teleop = make_teleoperator_from_config(cfg.teleop) - - teleop_proc, obs_proc = make_identity_processors() - - action_features_hw = {k: v for k, v in robot_raw.action_features.items() if k.endswith(".pos")} - all_observation_features = robot_raw.observation_features - available_joint_names = [ - key for key, value in all_observation_features.items() if key.endswith(".pos") and value is float - ] - ordered_joint_names = _resolve_state_joint_order( - getattr(cfg.policy, "action_feature_names", None), - available_joint_names, - ) - observation_features_hw = { - joint_name: all_observation_features[joint_name] for joint_name in ordered_joint_names - } - for key, value in all_observation_features.items(): - if isinstance(value, tuple): - observation_features_hw[key] = value - - dataset_features = combine_feature_dicts( - aggregate_pipeline_dataset_features( - pipeline=teleop_proc, - initial_features=create_initial_features(action=action_features_hw), - use_videos=cfg.dataset.video, - ), - aggregate_pipeline_dataset_features( - pipeline=obs_proc, - initial_features=create_initial_features(observation=observation_features_hw), - use_videos=cfg.dataset.video, - ), - ) - - dataset = None - listener = None - shutdown_event = Event() - policy_active = Event() - compile_warmup_done = Event() - if not cfg.use_torch_compile: - compile_warmup_done.set() - rtc_thread = None - - try: - if cfg.resume: - dataset = LeRobotDataset( - cfg.dataset.repo_id, - root=cfg.dataset.root, - batch_encoding_size=cfg.dataset.video_encoding_batch_size, - vcodec=cfg.dataset.vcodec, - streaming_encoding=cfg.dataset.streaming_encoding, - encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, - encoder_threads=cfg.dataset.encoder_threads, - ) - if hasattr(robot_raw, "cameras") and robot_raw.cameras: - dataset.start_image_writer( - num_processes=cfg.dataset.num_image_writer_processes, - num_threads=cfg.dataset.num_image_writer_threads_per_camera * len(robot_raw.cameras), - ) - else: - dataset = LeRobotDataset.create( - cfg.dataset.repo_id, - cfg.dataset.fps, - root=cfg.dataset.root, - robot_type=robot_raw.name, - features=dataset_features, - use_videos=cfg.dataset.video, - image_writer_processes=cfg.dataset.num_image_writer_processes, - image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera - * len(robot_raw.cameras if hasattr(robot_raw, "cameras") else []), - batch_encoding_size=cfg.dataset.video_encoding_batch_size, - vcodec=cfg.dataset.vcodec, - streaming_encoding=cfg.dataset.streaming_encoding, - encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, - encoder_threads=cfg.dataset.encoder_threads, - ) - - # Load policy — RTC needs manual loading for predict_action_chunk support - if use_rtc: - policy_class = get_policy_class(cfg.policy.type) - policy_config = PreTrainedConfig.from_pretrained(cfg.policy.pretrained_path) - if hasattr(policy_config, "compile_model"): - policy_config.compile_model = cfg.use_torch_compile - policy = policy_class.from_pretrained(cfg.policy.pretrained_path, config=policy_config) - policy.config.rtc_config = cfg.rtc - if hasattr(policy, "init_rtc_processor"): - policy.init_rtc_processor() - policy = policy.to(cfg.device) - policy.eval() - else: - policy = make_policy(cfg.policy, ds_meta=dataset.meta) - - preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=cfg.policy, - pretrained_path=cfg.policy.pretrained_path, - dataset_stats=rename_stats(dataset.meta.stats, cfg.dataset.rename_map), - preprocessor_overrides={ - "device_processor": {"device": cfg.device}, - "rename_observations_processor": {"rename_map": cfg.dataset.rename_map}, - }, - ) - - # Connect hardware - if use_rtc: - logger.info("Connecting robot (calibrate=%s)", cfg.calibrate) - robot_raw.connect(calibrate=False) - if cfg.calibrate and hasattr(robot_raw, "calibrate"): - robot_raw.calibrate() - robot_raw.disconnect() - robot_raw.connect(calibrate=False) - else: - robot_raw.connect() - - robot = ThreadSafeRobot(robot_raw) if use_rtc else robot_raw - teleop.connect() - listener, events = init_keyboard_listener() - - # RTC-specific setup - queue_holder = None - obs_holder = None - obs_lock = Lock() - hw_features = None - if use_rtc: - _start_pedal_listener(events) - queue_holder = {"queue": ActionQueue(cfg.rtc)} - obs_holder = { - "obs": None, - "robot_type": robot.robot_type, - "action_feature_names": [key for key in robot.action_features if key.endswith(".pos")], - } - hw_features = hw_to_dataset_features(observation_features_hw, "observation") - - rtc_thread = Thread( - target=_rtc_inference_thread, - args=( - policy, - obs_holder, - obs_lock, - hw_features, - preprocessor, - postprocessor, - queue_holder, - shutdown_event, - policy_active, - compile_warmup_done, - cfg, - ), - daemon=True, - ) - rtc_thread.start() - - print_controls(rtc=use_rtc) - logger.info(f" Policy: {cfg.policy.pretrained_path}") - logger.info(f" Task: {cfg.dataset.single_task}") - logger.info(f" Interpolation: {cfg.interpolation_multiplier}x") - if use_rtc: - logger.info(f" RTC: enabled (execution_horizon={cfg.rtc.execution_horizon})") - - with VideoEncodingManager(dataset): - recorded = 0 - while recorded < cfg.dataset.num_episodes and not events["stop_recording"]: - log_say(f"Episode {dataset.num_episodes}", cfg.play_sounds) - - if use_rtc: - queue_holder["queue"] = ActionQueue(cfg.rtc) - _rollout_rtc( - robot=robot, - teleop=teleop, - policy=policy, - preprocessor=preprocessor, - postprocessor=postprocessor, - dataset=dataset, - events=events, - cfg=cfg, - queue_holder=queue_holder, - obs_holder=obs_holder, - obs_lock=obs_lock, - policy_active=policy_active, - compile_warmup_done=compile_warmup_done, - hw_features=hw_features, - ) - else: - _rollout_sync( - robot=robot, - teleop=teleop, - policy=policy, - preprocessor=preprocessor, - postprocessor=postprocessor, - dataset=dataset, - events=events, - cfg=cfg, - ) - - if events["rerecord_episode"]: - log_say("Re-recording", cfg.play_sounds) - events["rerecord_episode"] = False - events["exit_early"] = False - dataset.clear_episode_buffer() - continue - - dataset.save_episode() - recorded += 1 - - if recorded < cfg.dataset.num_episodes and not events["stop_recording"]: - reset_loop(robot, teleop, events, cfg.dataset.fps) - - finally: - log_say("Stop recording", cfg.play_sounds, blocking=True) - - shutdown_event.set() - policy_active.clear() - - if rtc_thread and rtc_thread.is_alive(): - rtc_thread.join(timeout=2.0) - - if dataset: - dataset.finalize() - - if robot_raw.is_connected: - robot_raw.disconnect() - if teleop.is_connected: - teleop.disconnect() - - if not is_headless() and listener: - listener.stop() - - if cfg.dataset.push_to_hub and dataset is not None: - dataset.push_to_hub(tags=cfg.dataset.tags, private=cfg.dataset.private) - - return dataset - - -def main(): - from lerobot.utils.import_utils import register_third_party_plugins - - register_third_party_plugins() - hil_collect() - - -if __name__ == "__main__": - main() diff --git a/examples/hil/hil_utils.py b/examples/hil/hil_utils.py deleted file mode 100644 index 0f433d83a..000000000 --- a/examples/hil/hil_utils.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2025 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Shared utilities for Human-in-the-Loop data collection scripts.""" - -import logging -import time -from dataclasses import dataclass, field -from pathlib import Path - -from lerobot.common.control_utils import is_headless -from lerobot.processor import ( - IdentityProcessorStep, - RobotAction, - RobotObservation, - RobotProcessorPipeline, - observation_to_transition, - robot_action_observation_to_transition, - transition_to_observation, - transition_to_robot_action, -) -from lerobot.robots import Robot -from lerobot.teleoperators import Teleoperator -from lerobot.utils.robot_utils import precise_sleep - -logger = logging.getLogger(__name__) - - -@dataclass -class HILDatasetConfig: - repo_id: str - single_task: str - root: str | Path | None = None - fps: int = 30 - episode_time_s: float = 120 - num_episodes: int = 50 - video: bool = True - push_to_hub: bool = True - private: bool = False - tags: list[str] | None = None - num_image_writer_processes: int = 0 - num_image_writer_threads_per_camera: int = 4 - video_encoding_batch_size: int = 1 - vcodec: str = "auto" - streaming_encoding: bool = True - encoder_queue_maxsize: int = 30 - encoder_threads: int | None = None - rename_map: dict[str, str] = field(default_factory=dict) - - -def teleop_has_motor_control(teleop: Teleoperator) -> bool: - """Check if teleoperator has motor control capabilities.""" - return all(hasattr(teleop, attr) for attr in ("enable_torque", "disable_torque", "write_goal_positions")) - - -def teleop_disable_torque(teleop: Teleoperator) -> None: - """Disable teleop torque if supported.""" - if hasattr(teleop, "disable_torque"): - teleop.disable_torque() - - -def teleop_enable_torque(teleop: Teleoperator) -> None: - """Enable teleop torque if supported.""" - if hasattr(teleop, "enable_torque"): - teleop.enable_torque() - - -def teleop_smooth_move_to(teleop: Teleoperator, target_pos: dict, duration_s: float = 2.0, fps: int = 50): - """Smoothly move teleop to target position if motor control is available.""" - if not teleop_has_motor_control(teleop): - logger.warning("Teleop does not support motor control - cannot mirror robot position") - return - - teleop_enable_torque(teleop) - current = teleop.get_action() - steps = max(int(duration_s * fps), 1) - - for step in range(steps + 1): - t = step / steps - interp = {} - for k in current: - if k in target_pos: - interp[k] = current[k] * (1 - t) + target_pos[k] * t - else: - interp[k] = current[k] - teleop.write_goal_positions(interp) - time.sleep(1 / fps) - - -def init_keyboard_listener(): - """Initialize keyboard listener with HIL controls.""" - events = { - "exit_early": False, - "rerecord_episode": False, - "stop_recording": False, - "policy_paused": False, - "correction_active": False, - "resume_policy": False, - "in_reset": False, - "start_next_episode": False, - } - - if is_headless(): - logger.warning("Headless environment - keyboard controls unavailable") - return None, events - - from pynput import keyboard - - def on_press(key): - try: - if events["in_reset"]: - if key in [keyboard.Key.space, keyboard.Key.right]: - logger.info("[HIL] Starting next episode...") - events["start_next_episode"] = True - elif hasattr(key, "char") and key.char == "c": - events["start_next_episode"] = True - elif key == keyboard.Key.esc: - logger.info("[HIL] ESC - Stop recording, pushing to hub...") - events["stop_recording"] = True - events["start_next_episode"] = True - else: - if key == keyboard.Key.space: - if not events["policy_paused"] and not events["correction_active"]: - logger.info("[HIL] PAUSED - Press 'c' to take control or 'p' to resume policy") - events["policy_paused"] = True - elif hasattr(key, "char") and key.char == "c": - if events["policy_paused"] and not events["correction_active"]: - logger.info("[HIL] Taking control...") - events["start_next_episode"] = True - elif hasattr(key, "char") and key.char == "p": - if events["policy_paused"] or events["correction_active"]: - logger.info("[HIL] Resuming policy...") - events["resume_policy"] = True - elif key == keyboard.Key.right: - logger.info("[HIL] End episode") - events["exit_early"] = True - elif key == keyboard.Key.left: - logger.info("[HIL] Re-record episode") - events["rerecord_episode"] = True - events["exit_early"] = True - elif key == keyboard.Key.esc: - logger.info("[HIL] ESC - Stop recording...") - events["stop_recording"] = True - events["exit_early"] = True - except Exception as e: - logger.info(f"Key error: {e}") - - listener = keyboard.Listener(on_press=on_press) - listener.start() - return listener, events - - -def make_identity_processors(): - """Create identity processors for recording.""" - teleop_proc = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( - steps=[IdentityProcessorStep()], - to_transition=robot_action_observation_to_transition, - to_output=transition_to_robot_action, - ) - obs_proc = RobotProcessorPipeline[RobotObservation, RobotObservation]( - steps=[IdentityProcessorStep()], - to_transition=observation_to_transition, - to_output=transition_to_observation, - ) - return teleop_proc, obs_proc - - -def reset_loop(robot: Robot, teleop: Teleoperator, events: dict, fps: int): - """Reset period where human repositions environment.""" - logger.info("[HIL] RESET") - - events["in_reset"] = True - events["start_next_episode"] = False - - obs = robot.get_observation() - robot_pos = {k: v for k, v in obs.items() if k.endswith(".pos") and k in robot.observation_features} - teleop_smooth_move_to(teleop, robot_pos, duration_s=2.0, fps=50) - - logger.info("Press any key to enable teleoperation") - while not events["start_next_episode"] and not events["stop_recording"]: - precise_sleep(0.05) - - if events["stop_recording"]: - return - - events["start_next_episode"] = False - teleop_disable_torque(teleop) - logger.info("Teleop enabled - press any key to start episode") - - while not events["start_next_episode"] and not events["stop_recording"]: - loop_start = time.perf_counter() - action = teleop.get_action() - robot.send_action(action) - precise_sleep(1 / fps - (time.perf_counter() - loop_start)) - - events["in_reset"] = False - events["start_next_episode"] = False - events["exit_early"] = False - events["policy_paused"] = False - events["correction_active"] = False - events["resume_policy"] = False - - -def print_controls(rtc: bool = False): - """Print control instructions.""" - mode = "Human-in-the-Loop Data Collection" + (" (RTC)" if rtc else "") - logger.info( - "%s\n Controls:\n" - " SPACE - Pause policy\n" - " c - Take control\n" - " p - Resume policy after pause/correction\n" - " → - End episode\n" - " ESC - Stop and push to hub", - mode, - ) diff --git a/examples/lekiwi/evaluate.py b/examples/lekiwi/evaluate.py index d8c53829e..3ddcd1f14 100644 --- a/examples/lekiwi/evaluate.py +++ b/examples/lekiwi/evaluate.py @@ -14,17 +14,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lerobot.common.control_utils import init_keyboard_listener +import logging +import time + +from lerobot.common.control_utils import init_keyboard_listener, predict_action from lerobot.datasets import LeRobotDataset from lerobot.policies import make_pre_post_processors from lerobot.policies.act import ACTPolicy +from lerobot.policies.utils import make_robot_action from lerobot.processor import make_default_processors from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig -from lerobot.scripts.lerobot_record import record_loop from lerobot.utils.constants import ACTION, OBS_STR -from lerobot.utils.feature_utils import hw_to_dataset_features +from lerobot.utils.feature_utils import build_dataset_frame, hw_to_dataset_features +from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.utils import log_say -from lerobot.utils.visualization_utils import init_rerun +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data NUM_EPISODES = 2 FPS = 30 @@ -35,6 +39,9 @@ HF_DATASET_ID = "/" def main(): + # NOTE: For production policy deployment, use `lerobot-rollout` CLI instead. + # This script provides a self-contained example for educational purposes. + # Create the robot configuration & robot robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") @@ -83,43 +90,67 @@ def main(): raise ValueError("Robot is not connected!") print("Starting evaluate loop...") + control_interval = 1 / FPS recorded_episodes = 0 while recorded_episodes < NUM_EPISODES and not events["stop_recording"]: log_say(f"Running inference, recording eval episode {recorded_episodes} of {NUM_EPISODES}") - # Main record loop - record_loop( - robot=robot, - events=events, - fps=FPS, - policy=policy, - preprocessor=preprocessor, # Pass the pre and post policy processors - postprocessor=postprocessor, - dataset=dataset, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=teleop_action_processor, - robot_action_processor=robot_action_processor, - robot_observation_processor=robot_observation_processor, - ) + # Inline evaluation loop: predict actions and send to robot + timestamp = 0 + start_episode_t = time.perf_counter() + while timestamp < EPISODE_TIME_SEC: + start_loop_t = time.perf_counter() + + if events["exit_early"]: + events["exit_early"] = False + break + + # Get robot observation + obs = robot.get_observation() + obs_processed = robot_observation_processor(obs) + observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR) + + # Predict action using the policy + action_tensor = predict_action( + observation=observation_frame, + policy=policy, + device=policy.config.device, + preprocessor=preprocessor, + postprocessor=postprocessor, + use_amp=policy.config.device.type == "cuda", + task=TASK_DESCRIPTION, + robot_type=robot.name, + ) + + # Convert policy output to robot action dict + action_values = make_robot_action(action_tensor, dataset.features) + + # Process and send action to robot + robot_action_to_send = robot_action_processor((action_values, obs)) + robot.send_action(robot_action_to_send) + + # Write to dataset + action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION) + frame = {**observation_frame, **action_frame, "task": TASK_DESCRIPTION} + dataset.add_frame(frame) + + log_rerun_data(observation=obs_processed, action=action_values) + + dt_s = time.perf_counter() - start_loop_t + sleep_time_s = control_interval - dt_s + if sleep_time_s < 0: + logging.warning( + f"Evaluate loop is running slower ({1 / dt_s:.1f} Hz) than the target FPS ({FPS} Hz)." + ) + precise_sleep(max(sleep_time_s, 0.0)) + timestamp = time.perf_counter() - start_episode_t # Reset the environment if not stopping or re-recording if not events["stop_recording"] and ( (recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"] ): log_say("Reset the environment") - record_loop( - robot=robot, - events=events, - fps=FPS, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=teleop_action_processor, - robot_action_processor=robot_action_processor, - robot_observation_processor=robot_observation_processor, - ) + log_say("Waiting for environment reset, press right arrow key when ready...") if events["rerecord_episode"]: log_say("Re-record episode") diff --git a/examples/lekiwi/record.py b/examples/lekiwi/record.py index de5df7756..2c581f5ff 100644 --- a/examples/lekiwi/record.py +++ b/examples/lekiwi/record.py @@ -45,9 +45,6 @@ def main(): leader_arm = SO100Leader(leader_arm_config) keyboard = KeyboardTeleop(keyboard_config) - # TODO(Steven): Update this example to use pipelines - teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors() - # Configure the dataset features action_features = hw_to_dataset_features(robot.action_features, ACTION) obs_features = hw_to_dataset_features(robot.observation_features, OBS_STR) @@ -77,6 +74,10 @@ def main(): if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected: raise ValueError("Robot or teleop is not connected!") + teleop_action_processor, robot_action_processor, robot_observation_processor = ( + make_default_processors() + ) + print("Starting record loop...") recorded_episodes = 0 while recorded_episodes < NUM_EPISODES and not events["stop_recording"]: @@ -87,14 +88,14 @@ def main(): robot=robot, events=events, fps=FPS, + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, dataset=dataset, teleop=[leader_arm, keyboard], control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=teleop_action_processor, - robot_action_processor=robot_action_processor, - robot_observation_processor=robot_observation_processor, ) # Reset the environment if not stopping or re-recording @@ -106,13 +107,13 @@ def main(): robot=robot, events=events, fps=FPS, + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, teleop=[leader_arm, keyboard], control_time_s=RESET_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=teleop_action_processor, - robot_action_processor=robot_action_processor, - robot_observation_processor=robot_observation_processor, ) if events["rerecord_episode"]: diff --git a/examples/lekiwi/rollout.py b/examples/lekiwi/rollout.py new file mode 100644 index 000000000..4fb103c8c --- /dev/null +++ b/examples/lekiwi/rollout.py @@ -0,0 +1,77 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a trained policy on LeKiwi without recording (base rollout). + +Uses the rollout engine's :class:`BaseStrategy` (autonomous execution, +no dataset) with :class:`SyncInferenceConfig` (inline policy call per +control tick). For a CLI entry point with the same capabilities plus +recording, upload, and human-in-the-loop variants, see ``lerobot-rollout``. +""" + +from lerobot.configs import PreTrainedConfig +from lerobot.robots.lekiwi import LeKiwiClientConfig +from lerobot.rollout import BaseStrategyConfig, RolloutConfig, build_rollout_context +from lerobot.rollout.inference import SyncInferenceConfig +from lerobot.rollout.strategies import BaseStrategy +from lerobot.utils.process import ProcessSignalHandler +from lerobot.utils.utils import init_logging + +FPS = 30 +DURATION_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" + + +def main(): + init_logging() + + # Robot: LeKiwi client — make sure lekiwi_host is already running on the robot. + robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") + + # Policy: load the pretrained config. ``pretrained_path`` is read downstream + # by ``build_rollout_context`` to reload the full model. + policy_config = PreTrainedConfig.from_pretrained(HF_MODEL_ID) + policy_config.pretrained_path = HF_MODEL_ID + + # Assemble the rollout config: base strategy (no recording) + sync inference. + cfg = RolloutConfig( + robot=robot_config, + policy=policy_config, + strategy=BaseStrategyConfig(), + inference=SyncInferenceConfig(), + fps=FPS, + duration=DURATION_SEC, + task=TASK_DESCRIPTION, + ) + + # Graceful Ctrl-C: the strategy loop exits when shutdown_event is set. + signal_handler = ProcessSignalHandler(use_threads=True) + + # Build the context (connects robot, loads policy, wires the inference strategy). + # No custom processors here — LeKiwi runs on raw joint features. + ctx = build_rollout_context(cfg, signal_handler.shutdown_event) + + strategy = BaseStrategy(cfg.strategy) + try: + strategy.setup(ctx) + strategy.run(ctx) + finally: + strategy.teardown(ctx) + + +if __name__ == "__main__": + main() diff --git a/examples/notebooks/quickstart.ipynb b/examples/notebooks/quickstart.ipynb new file mode 100644 index 000000000..647b79506 --- /dev/null +++ b/examples/notebooks/quickstart.ipynb @@ -0,0 +1,342 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 🤗 LeRobot Quickstart\n", + "\n", + "Calibration → teleoperation → data collection → training → evaluation.\n", + "\n", + "Install the required dependencies: `pip install -e .[notebook,dataset,training,viz,hardware]`.\n", + "\n", + "**How to use:**\n", + "1. Edit the **Configuration** cell with your settings.\n", + "2. Run all cells (`Run All`).\n", + "3. Each section prints a ready-to-paste terminal command - copy it and run it.\n", + "\n", + "Each setup is different, please refer to the [LeRobot documentation](https://huggingface.co/docs/lerobot/il_robots) for more details on each step and available options.
\n", + "Feel free to make this notebook your own and adapt it to your needs!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Utils" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def _cameras_arg(cameras: dict) -> str:\n", + " if not cameras:\n", + " return \"\"\n", + " entries = [f\"{n}: {{{', '.join(f'{k}: {v}' for k, v in cfg.items())}}}\" for n, cfg in cameras.items()]\n", + " return \"{ \" + \", \".join(entries) + \" }\"\n", + "\n", + "\n", + "def print_cmd(*parts: str) -> None:\n", + " \"\"\"Print a shell command with line continuations, skipping empty parts.\"\"\"\n", + " non_empty = [p for p in parts if p]\n", + " print(\" \\\\\\n \".join(non_empty))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Configuration\n", + "\n", + "Edit this cell, then **Run All** to generate all commands below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Robot (follower) - run `lerobot-find-port` to discover the port\n", + "ROBOT_TYPE = \"so101_follower\"\n", + "ROBOT_PORT = \"/dev/ttyACM0\"\n", + "ROBOT_ID = \"my_follower_arm\"\n", + "\n", + "# Teleop (leader) - run `lerobot-find-port` to discover the port\n", + "TELEOP_TYPE = \"so101_leader\"\n", + "TELEOP_PORT = \"/dev/ttyACM1\"\n", + "TELEOP_ID = \"my_leader_arm\"\n", + "\n", + "# Cameras - set to {} to disable\n", + "# Run `lerobot-find-cameras opencv` to list available cameras and their indices\n", + "CAMERAS = {\n", + " \"top\": {\"type\": \"opencv\", \"index_or_path\": 2, \"width\": 640, \"height\": 480, \"fps\": 30},\n", + " \"wrist\": {\"type\": \"opencv\", \"index_or_path\": 4, \"width\": 640, \"height\": 480, \"fps\": 30},\n", + "}\n", + "\n", + "# Dataset\n", + "HF_USER = \"your_hf_username\" # `huggingface-cli whoami` to find your username\n", + "DATASET_NAME = \"my_so101_dataset\"\n", + "TASK_DESCRIPTION = \"pick and place the block\"\n", + "NUM_EPISODES = 10\n", + "\n", + "# Training\n", + "POLICY_TYPE = \"act\" # act, diffusion, smolvla, ...\n", + "POLICY_DEVICE = \"cuda\" # cuda / cpu / mps\n", + "TRAIN_STEPS = 10_000\n", + "SAVE_FREQ = 2_000\n", + "OUTPUT_DIR = f\"outputs/train/{DATASET_NAME}\"\n", + "\n", + "# Inference - Hub repo ID or local checkpoint path\n", + "# e.g. set to f\"{OUTPUT_DIR}/checkpoints/last\" to use a local checkpoint\n", + "POLICY_PATH = f\"{HF_USER}/{DATASET_NAME}_{POLICY_TYPE}\"\n", + "LAST_CHECKPOINT_PATH = f\"{OUTPUT_DIR}/checkpoints/last\"\n", + "\n", + "# Derived\n", + "DATASET_REPO_ID = f\"{HF_USER}/{DATASET_NAME}\"\n", + "DATASET_ROOT = f\"data/{DATASET_NAME}\"\n", + "POLICY_REPO_ID = f\"{HF_USER}/{DATASET_NAME}_{POLICY_TYPE}\"\n", + "EVAL_REPO_ID = f\"{HF_USER}/eval_{DATASET_NAME}\"\n", + "CAMERAS_ARG = _cameras_arg(CAMERAS)\n", + "CAMERAS_FLAG = f'--robot.cameras=\"{CAMERAS_ARG}\"' if CAMERAS_ARG else \"\"\n", + "\n", + "print(f\"Robot : {ROBOT_TYPE} @ {ROBOT_PORT}\")\n", + "print(f\"Teleop : {TELEOP_TYPE} @ {TELEOP_PORT}\")\n", + "print(f\"Cameras: {list(CAMERAS) or 'none'}\")\n", + "print(f\"Dataset: {DATASET_REPO_ID} ({NUM_EPISODES} episodes) saved to {DATASET_ROOT}\")\n", + "print(f\"Policy : {POLICY_TYPE} -> {POLICY_REPO_ID}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 1. Calibration\n", + "\n", + "Run once per arm before first use." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Follower\n", + "print_cmd(\n", + " \"lerobot-calibrate\",\n", + " f\"--robot.type={ROBOT_TYPE}\",\n", + " f\"--robot.port={ROBOT_PORT}\",\n", + " f\"--robot.id={ROBOT_ID}\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Leader\n", + "print_cmd(\n", + " \"lerobot-calibrate\",\n", + " f\"--teleop.type={TELEOP_TYPE}\",\n", + " f\"--teleop.port={TELEOP_PORT}\",\n", + " f\"--teleop.id={TELEOP_ID}\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 2. Teleoperation\n", + "\n", + "See the [teleoperation docs](https://huggingface.co/docs/lerobot/il_robots#teleoperate) and the [cameras guide](https://huggingface.co/docs/lerobot/cameras) for more options." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print_cmd(\n", + " \"lerobot-teleoperate\",\n", + " f\"--robot.type={ROBOT_TYPE}\",\n", + " f\"--robot.port={ROBOT_PORT}\",\n", + " f\"--robot.id={ROBOT_ID}\",\n", + " CAMERAS_FLAG,\n", + " f\"--teleop.type={TELEOP_TYPE}\",\n", + " f\"--teleop.port={TELEOP_PORT}\",\n", + " f\"--teleop.id={TELEOP_ID}\",\n", + " \"--display_data=true\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 3. Record Dataset\n", + "\n", + "See the [recording docs](https://huggingface.co/docs/lerobot/il_robots#record-a-dataset) for tips on gathering good data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print_cmd(\n", + " \"lerobot-record\",\n", + " f\"--robot.type={ROBOT_TYPE}\",\n", + " f\"--robot.port={ROBOT_PORT}\",\n", + " f\"--robot.id={ROBOT_ID}\",\n", + " CAMERAS_FLAG,\n", + " f\"--teleop.type={TELEOP_TYPE}\",\n", + " f\"--teleop.port={TELEOP_PORT}\",\n", + " f\"--teleop.id={TELEOP_ID}\",\n", + " f\"--dataset.repo_id={DATASET_REPO_ID}\",\n", + " f\"--dataset.num_episodes={NUM_EPISODES}\",\n", + " f'--dataset.single_task=\"{TASK_DESCRIPTION}\"',\n", + " \"--dataset.streaming_encoding=true\",\n", + " \"--display_data=true\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Resume a previously interrupted recording session\n", + "print_cmd(\n", + " \"lerobot-record\",\n", + " f\"--robot.type={ROBOT_TYPE}\",\n", + " f\"--robot.port={ROBOT_PORT}\",\n", + " f\"--robot.id={ROBOT_ID}\",\n", + " CAMERAS_FLAG,\n", + " f\"--teleop.type={TELEOP_TYPE}\",\n", + " f\"--teleop.port={TELEOP_PORT}\",\n", + " f\"--teleop.id={TELEOP_ID}\",\n", + " f\"--dataset.repo_id={DATASET_REPO_ID}\",\n", + " f\"--dataset.root={DATASET_ROOT}\",\n", + " f\"--dataset.num_episodes={NUM_EPISODES}\",\n", + " f'--dataset.single_task=\"{TASK_DESCRIPTION}\"',\n", + " \"--dataset.streaming_encoding=true\",\n", + " \"--display_data=true\",\n", + " \"--resume=true\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 4. Train Policy\n", + "\n", + "See the [training docs](https://huggingface.co/docs/lerobot/il_robots#train-a-policy) for configuration options and tips." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print_cmd(\n", + " \"lerobot-train\",\n", + " f\"--dataset.repo_id={DATASET_REPO_ID}\",\n", + " f\"--policy.type={POLICY_TYPE}\",\n", + " f\"--policy.device={POLICY_DEVICE}\",\n", + " f\"--policy.repo_id={POLICY_REPO_ID}\",\n", + " f\"--output_dir={OUTPUT_DIR}\",\n", + " f\"--steps={TRAIN_STEPS}\",\n", + " f\"--save_freq={SAVE_FREQ}\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Resume a previously interrupted training session\n", + "print_cmd(\n", + " \"lerobot-train\",\n", + " f\"--config_path={LAST_CHECKPOINT_PATH}/pretrained_model/train_config.json\",\n", + " \"--resume=true\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 5. Inference\n", + "\n", + "Uses `POLICY_PATH` from the Configuration cell (defaults to the Hub repo ID). You can also put there the `LAST_CHECKPOINT_PATH`.\n", + "\n", + "See the [inference docs](https://huggingface.co/docs/lerobot/il_robots#run-inference-and-evaluate-your-policy) for details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print_cmd(\n", + " \"lerobot-record\",\n", + " f\"--policy.path={POLICY_PATH}\",\n", + " f\"--robot.type={ROBOT_TYPE}\",\n", + " f\"--robot.port={ROBOT_PORT}\",\n", + " f\"--robot.id={ROBOT_ID}\",\n", + " CAMERAS_FLAG,\n", + " f\"--teleop.type={TELEOP_TYPE}\",\n", + " f\"--teleop.port={TELEOP_PORT}\",\n", + " f\"--teleop.id={TELEOP_ID}\",\n", + " f\"--dataset.repo_id={EVAL_REPO_ID}\",\n", + " f\"--dataset.num_episodes={NUM_EPISODES}\",\n", + " f'--dataset.single_task=\"{TASK_DESCRIPTION}\"',\n", + " \"--dataset.streaming_encoding=true\",\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "lerobot (3.12.3)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/phone_to_so100/evaluate.py b/examples/phone_to_so100/evaluate.py index 267e67c48..e859123d0 100644 --- a/examples/phone_to_so100/evaluate.py +++ b/examples/phone_to_so100/evaluate.py @@ -14,13 +14,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +import time + from lerobot.cameras.opencv import OpenCVCameraConfig -from lerobot.common.control_utils import init_keyboard_listener +from lerobot.common.control_utils import init_keyboard_listener, predict_action from lerobot.configs import FeatureType, PolicyFeature from lerobot.datasets import LeRobotDataset, aggregate_pipeline_dataset_features, create_initial_features from lerobot.model.kinematics import RobotKinematics from lerobot.policies import make_pre_post_processors from lerobot.policies.act import ACTPolicy +from lerobot.policies.utils import make_robot_action from lerobot.processor import ( RobotProcessorPipeline, make_default_teleop_action_processor, @@ -34,11 +38,12 @@ from lerobot.robots.so_follower.robot_kinematic_processor import ( ForwardKinematicsJointsToEE, InverseKinematicsEEToJoints, ) -from lerobot.scripts.lerobot_record import record_loop from lerobot.types import RobotAction, RobotObservation -from lerobot.utils.feature_utils import combine_feature_dicts +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts +from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.utils import log_say -from lerobot.utils.visualization_utils import init_rerun +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data NUM_EPISODES = 5 FPS = 30 @@ -49,6 +54,9 @@ HF_DATASET_ID = "/" def main(): + # NOTE: For production policy deployment, use `lerobot-rollout` CLI instead. + # This script provides a self-contained example for educational purposes. + # Create the robot configuration & robot camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} robot_config = SO100FollowerConfig( @@ -143,43 +151,67 @@ def main(): raise ValueError("Robot is not connected!") print("Starting evaluate loop...") + control_interval = 1 / FPS episode_idx = 0 for episode_idx in range(NUM_EPISODES): log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") - # Main record loop - record_loop( - robot=robot, - events=events, - fps=FPS, - policy=policy, - preprocessor=preprocessor, # Pass the pre and post policy processors - postprocessor=postprocessor, - dataset=dataset, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=make_default_teleop_action_processor(), - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose_processor, - ) + # Inline evaluation loop: predict actions and send to robot + timestamp = 0 + start_episode_t = time.perf_counter() + while timestamp < EPISODE_TIME_SEC: + start_loop_t = time.perf_counter() + + if events["exit_early"]: + events["exit_early"] = False + break + + # Get robot observation + obs = robot.get_observation() + obs_processed = robot_joints_to_ee_pose_processor(obs) + observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR) + + # Predict action using the policy + action_tensor = predict_action( + observation=observation_frame, + policy=policy, + device=policy.config.device, + preprocessor=preprocessor, + postprocessor=postprocessor, + use_amp=policy.config.device.type == "cuda", + task=TASK_DESCRIPTION, + robot_type=robot.name, + ) + + # Convert policy output to robot action dict + action_values = make_robot_action(action_tensor, dataset.features) + + # Process and send action to robot (EE -> joints via IK) + robot_action_to_send = robot_ee_to_joints_processor((action_values, obs)) + robot.send_action(robot_action_to_send) + + # Write to dataset + action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION) + frame = {**observation_frame, **action_frame, "task": TASK_DESCRIPTION} + dataset.add_frame(frame) + + log_rerun_data(observation=obs_processed, action=action_values) + + dt_s = time.perf_counter() - start_loop_t + sleep_time_s = control_interval - dt_s + if sleep_time_s < 0: + logging.warning( + f"Evaluate loop is running slower ({1 / dt_s:.1f} Hz) than the target FPS ({FPS} Hz)." + ) + precise_sleep(max(sleep_time_s, 0.0)) + timestamp = time.perf_counter() - start_episode_t # Reset the environment if not stopping or re-recording if not events["stop_recording"] and ( (episode_idx < NUM_EPISODES - 1) or events["rerecord_episode"] ): log_say("Reset the environment") - record_loop( - robot=robot, - events=events, - fps=FPS, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=make_default_teleop_action_processor(), - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose_processor, - ) + log_say("Waiting for environment reset, press right arrow key when ready...") if events["rerecord_episode"]: log_say("Re-record episode") @@ -190,7 +222,6 @@ def main(): # Save episode dataset.save_episode() - episode_idx += 1 finally: # Clean up log_say("Stop recording") diff --git a/examples/phone_to_so100/record.py b/examples/phone_to_so100/record.py index 6a8d38ec3..87b8e49fd 100644 --- a/examples/phone_to_so100/record.py +++ b/examples/phone_to_so100/record.py @@ -65,14 +65,15 @@ def main(): robot = SO100Follower(robot_config) phone = Phone(teleop_config) - # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf + # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: + # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf kinematics_solver = RobotKinematics( urdf_path="./SO101/so101_new_calib.urdf", target_frame_name="gripper_frame_link", joint_names=list(robot.bus.motors.keys()), ) - # Build pipeline to convert phone action to EE action + # Build pipeline to convert phone action to EE action (with gripper velocity mapped to joint). phone_to_robot_ee_pose_processor = RobotProcessorPipeline[ tuple[RobotAction, RobotObservation], RobotAction ]( @@ -94,7 +95,7 @@ def main(): to_output=transition_to_robot_action, ) - # Build pipeline to convert EE action to joints action + # Build pipeline to convert EE action to joints action (IK). robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( steps=[ InverseKinematicsEEToJoints( @@ -107,7 +108,7 @@ def main(): to_output=transition_to_robot_action, ) - # Build pipeline to convert joint observation to EE observation + # Build pipeline to convert joint observation to EE observation (FK). robot_joints_to_ee_pose = RobotProcessorPipeline[RobotObservation, RobotObservation]( steps=[ ForwardKinematicsJointsToEE( @@ -118,13 +119,12 @@ def main(): to_output=transition_to_observation, ) - # Create the dataset + # Create the dataset, deriving features from the pipelines so the on-disk schema + # matches exactly what the pipelines produce at runtime. dataset = LeRobotDataset.create( repo_id=HF_REPO_ID, fps=FPS, features=combine_feature_dicts( - # Run the feature contract of the pipelines - # This tells you how the features would look like after the pipeline steps aggregate_pipeline_dataset_features( pipeline=phone_to_robot_ee_pose_processor, initial_features=create_initial_features(action=phone.action_features), @@ -163,14 +163,14 @@ def main(): robot=robot, events=events, fps=FPS, + teleop_action_processor=phone_to_robot_ee_pose_processor, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose, teleop=phone, dataset=dataset, control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=phone_to_robot_ee_pose_processor, - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose, ) # Reset the environment if not stopping or re-recording @@ -182,13 +182,13 @@ def main(): robot=robot, events=events, fps=FPS, + teleop_action_processor=phone_to_robot_ee_pose_processor, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose, teleop=phone, control_time_s=RESET_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=phone_to_robot_ee_pose_processor, - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose, ) if events["rerecord_episode"]: diff --git a/examples/phone_to_so100/rollout.py b/examples/phone_to_so100/rollout.py new file mode 100644 index 000000000..ca6706c52 --- /dev/null +++ b/examples/phone_to_so100/rollout.py @@ -0,0 +1,126 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a trained EE-space policy on SO100 (phone-trained) without recording. + +Mirrors ``examples/so100_to_so100_EE/rollout.py`` — the model was trained +with phone teleoperation in EE space, so at deployment we only need the +joint↔EE conversion on the robot side; the phone is not used. + +Uses :class:`BaseStrategy` (no recording) + :class:`SyncInferenceConfig` +(inline policy call). For recording during rollout, switch to Sentry, +Highlight, or DAgger via ``lerobot-rollout --strategy.type=...``. +""" + +from lerobot.cameras.opencv import OpenCVCameraConfig +from lerobot.configs import PreTrainedConfig +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import ( + RobotProcessorPipeline, + observation_to_transition, + robot_action_observation_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig +from lerobot.robots.so_follower.robot_kinematic_processor import ( + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.rollout import BaseStrategyConfig, RolloutConfig, build_rollout_context +from lerobot.rollout.inference import SyncInferenceConfig +from lerobot.rollout.strategies import BaseStrategy +from lerobot.types import RobotAction, RobotObservation +from lerobot.utils.process import ProcessSignalHandler +from lerobot.utils.utils import init_logging + +FPS = 30 +DURATION_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" + + +def main(): + init_logging() + + camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} + robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem58760434471", + id="my_awesome_follower_arm", + cameras=camera_config, + use_degrees=True, + ) + + # Peek at motor names once to build the kinematic solver. + temp_robot = SO100Follower(robot_config) + motor_names = list(temp_robot.bus.motors.keys()) + + kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=motor_names, + ) + + robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=motor_names)], + to_transition=observation_to_transition, + to_output=transition_to_observation, + ) + + robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=motor_names, + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, + ) + + policy_config = PreTrainedConfig.from_pretrained(HF_MODEL_ID) + policy_config.pretrained_path = HF_MODEL_ID + + cfg = RolloutConfig( + robot=robot_config, + policy=policy_config, + strategy=BaseStrategyConfig(), + inference=SyncInferenceConfig(), + fps=FPS, + duration=DURATION_SEC, + task=TASK_DESCRIPTION, + ) + + signal_handler = ProcessSignalHandler(use_threads=True) + + ctx = build_rollout_context( + cfg, + signal_handler.shutdown_event, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + + strategy = BaseStrategy(cfg.strategy) + try: + strategy.setup(ctx) + strategy.run(ctx) + finally: + strategy.teardown(ctx) + + +if __name__ == "__main__": + main() diff --git a/examples/rtc/eval_with_real_robot.py b/examples/rtc/eval_with_real_robot.py deleted file mode 100644 index 66562749c..000000000 --- a/examples/rtc/eval_with_real_robot.py +++ /dev/null @@ -1,673 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2025 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Demo script showing how to use Real-Time Chunking (RTC) with action chunking policies on real robots. - -This script demonstrates: -1. Creating a robot and policy (SmolVLA, Pi0, etc.) with RTC -2. Consuming actions from the policy while the robot executes -3. Periodically requesting new action chunks in the background using threads -4. Managing action buffers and timing for real-time operation - -For simulation environments, see eval_with_simulation.py - -Usage: - # Run RTC with Real robot with RTC - uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=/smolvla_check_rtc_last3 \ - --policy.device=mps \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --robot.type=so100_follower \ - --robot.port=/dev/tty.usbmodem58FA0834591 \ - --robot.id=so100_follower \ - --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ - --task="Move green small object into the purple platform" \ - --duration=120 - - # Run RTC with Real robot without RTC - uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=/smolvla_check_rtc_last3 \ - --policy.device=mps \ - --rtc.enabled=false \ - --robot.type=so100_follower \ - --robot.port=/dev/tty.usbmodem58FA0834591 \ - --robot.id=so100_follower \ - --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ - --task="Move green small object into the purple platform" \ - --duration=120 - - # Run RTC with Real robot with pi0.5 policy - uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=/pi05_check_rtc \ - --policy.device=mps \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --robot.type=so100_follower \ - --robot.port=/dev/tty.usbmodem58FA0834591 \ - --robot.id=so100_follower \ - --robot.cameras="{ gripper: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}}" \ - --task="Move green small object into the purple platform" \ - --duration=120 - - # Run RTC with bi_openarm_follower (dual-arm OpenArms) and pi0.5 policy - python examples/rtc/eval_with_real_robot.py \ - --policy.path=lerobot-data-collection/folding_final \ - --robot.type=bi_openarm_follower \ - --robot.cameras='{left_wrist: {type: opencv, index_or_path: "/dev/video4", width: 1280, height: 720, fps: 30}, base: {type: opencv, index_or_path: "/dev/video2", width: 640, height: 480, fps: 30}, right_wrist: {type: opencv, index_or_path: "/dev/video0", width: 1280, height: 720, fps: 30}}' \ - --robot.left_arm_config.port=can0 \ - --robot.left_arm_config.side=left \ - --robot.left_arm_config.can_interface=socketcan \ - --robot.left_arm_config.disable_torque_on_disconnect=true \ - --robot.left_arm_config.max_relative_target=8.0 \ - --robot.right_arm_config.port=can1 \ - --robot.right_arm_config.side=right \ - --robot.right_arm_config.can_interface=socketcan \ - --robot.right_arm_config.disable_torque_on_disconnect=true \ - --robot.right_arm_config.max_relative_target=8.0 \ - --task="Fold the T-shirt properly" \ - --fps=30 \ - --duration=2000 \ - --interpolation_multiplier=3 \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --rtc.max_guidance_weight=5.0 \ - --rtc.prefix_attention_schedule=LINEAR \ - --device=cuda -""" - -import logging -import math -import sys -import time -import traceback -from dataclasses import dataclass, field -from threading import Event, Lock, Thread - -import torch -from torch import Tensor - -from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401 -from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401 -from lerobot.cameras.zmq import ZMQCameraConfig # noqa: F401 -from lerobot.configs import PreTrainedConfig, RTCAttentionSchedule, parser -from lerobot.policies import get_policy_class, make_pre_post_processors -from lerobot.policies.rtc import ActionInterpolator, ActionQueue, LatencyTracker, RTCConfig -from lerobot.processor import ( - NormalizerProcessorStep, - RelativeActionsProcessorStep, - TransitionKey, - create_transition, - make_default_robot_action_processor, - make_default_robot_observation_processor, - to_relative_actions, -) -from lerobot.rl.process import ProcessSignalHandler -from lerobot.robots import ( # noqa: F401 - Robot, - RobotConfig, - bi_openarm_follower, - bi_so_follower, - koch_follower, - so_follower, - unitree_g1, -) -from lerobot.robots.utils import make_robot_from_config -from lerobot.utils.constants import OBS_IMAGES, OBS_STATE -from lerobot.utils.feature_utils import build_dataset_frame, hw_to_dataset_features -from lerobot.utils.hub import HubMixin -from lerobot.utils.utils import init_logging - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -class RobotWrapper: - def __init__(self, robot: Robot): - self.robot = robot - self.lock = Lock() - - def get_observation(self) -> dict[str, Tensor]: - with self.lock: - return self.robot.get_observation() - - def send_action(self, action: Tensor): - with self.lock: - self.robot.send_action(action) - - def observation_features(self) -> list[str]: - with self.lock: - return self.robot.observation_features - - def action_features(self) -> list[str]: - with self.lock: - return self.robot.action_features - - -@dataclass -class RTCDemoConfig(HubMixin): - """Configuration for RTC demo with action chunking policies and real robots.""" - - # Policy configuration - policy: PreTrainedConfig | None = None - - # Robot configuration - robot: RobotConfig | None = None - - # RTC configuration - rtc: RTCConfig = field( - default_factory=lambda: RTCConfig( - execution_horizon=10, - max_guidance_weight=1.0, - prefix_attention_schedule=RTCAttentionSchedule.EXP, - ) - ) - - # Demo parameters - duration: float = 30.0 # Duration to run the demo (seconds) - fps: float = 10.0 # Action execution frequency (Hz) - interpolation_multiplier: int = 1 # Control rate multiplier (1=off, 2=2x, 3=3x) - - # Compute device - device: str | None = None # Device to run on (cuda, cpu, auto) - - # Get new actions horizon. The amount of executed steps after which will be requested new actions. - # It should be higher than inference delay + execution horizon. - action_queue_size_to_get_new_actions: int = 30 - - # Task to execute - task: str = field(default="", metadata={"help": "Task to execute"}) - - # Torch compile configuration - use_torch_compile: bool = field( - default=False, - metadata={"help": "Use torch.compile for faster inference (PyTorch 2.0+)"}, - ) - - torch_compile_backend: str = field( - default="inductor", - metadata={"help": "Backend for torch.compile (inductor, aot_eager, cudagraphs)"}, - ) - - torch_compile_mode: str = field( - default="default", - metadata={"help": "Compilation mode (default, reduce-overhead, max-autotune)"}, - ) - - torch_compile_disable_cudagraphs: bool = field( - default=True, - metadata={ - "help": "Disable CUDA graphs in torch.compile. Required due to in-place tensor " - "operations in denoising loop (x_t += dt * v_t) which cause tensor aliasing issues." - }, - ) - - def __post_init__(self): - # HACK: We parse again the cli args here to get the pretrained path if there was one. - policy_path = parser.get_path_arg("policy") - if policy_path: - cli_overrides = parser.get_cli_overrides("policy") - self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.pretrained_path = policy_path - else: - raise ValueError("Policy path is required") - - # Validate that robot configuration is provided - if self.robot is None: - raise ValueError("Robot configuration must be provided") - - @classmethod - def __get_path_fields__(cls) -> list[str]: - """This enables the parser to load config from the policy using `--policy.path=local/dir`""" - return ["policy"] - - -def is_image_key(k: str) -> bool: - return k.startswith(OBS_IMAGES) - - -def _reanchor_relative_rtc_prefix( - prev_actions_absolute: Tensor, - current_state: Tensor, - relative_step: RelativeActionsProcessorStep, - normalizer_step: NormalizerProcessorStep | None, - policy_device: torch.device | str, -) -> Tensor: - """Convert absolute leftovers into model-space for relative-action RTC policies. - - When a policy uses relative actions, the RTC prefix (leftover actions from - the previous chunk) is stored in absolute space. Before feeding it back to - the policy we need to re-express it relative to the *current* robot state - and then re-normalize. - """ - state = current_state.detach().cpu() - if state.dim() == 1: - state = state.unsqueeze(0) - - action_cpu = prev_actions_absolute.detach().cpu() - mask = relative_step._build_mask(action_cpu.shape[-1]) - relative_actions = to_relative_actions(action_cpu, state, mask) - - transition = create_transition(action=relative_actions) - if normalizer_step is not None: - transition = normalizer_step(transition) - - return transition[TransitionKey.ACTION].to(policy_device) - - -def get_actions( - policy, - robot: RobotWrapper, - robot_observation_processor, - action_queue: ActionQueue, - shutdown_event: Event, - cfg: RTCDemoConfig, -): - """Thread function to request action chunks from the policy. - - Args: - policy: The policy instance (SmolVLA, Pi0, etc.) - robot: The robot instance for getting observations - robot_observation_processor: Processor for raw robot observations - action_queue: Queue to put new action chunks - shutdown_event: Event to signal shutdown - cfg: Demo configuration - """ - try: - logger.info("[GET_ACTIONS] Starting get actions thread") - - latency_tracker = LatencyTracker() # Track latency of action chunks - fps = cfg.fps - time_per_chunk = 1.0 / fps - - # Only keep .pos joints + camera streams if the policy was trained on positions, - # not the full pos/vel/torque state the robot exposes. - observation_features_hw = { - key: value - for key, value in robot.observation_features().items() - if key.endswith(".pos") or isinstance(value, tuple) - } - - dataset_features = hw_to_dataset_features(observation_features_hw, "observation") - policy_device = policy.config.device - - # Load preprocessor and postprocessor from pretrained files - # The stats are embedded in the processor .safetensors files - logger.info(f"[GET_ACTIONS] Loading preprocessor/postprocessor from {cfg.policy.pretrained_path}") - - preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=cfg.policy, - pretrained_path=cfg.policy.pretrained_path, - dataset_stats=None, # Will load from pretrained processor files - preprocessor_overrides={ - "device_processor": {"device": cfg.policy.device}, - }, - ) - - logger.info("[GET_ACTIONS] Preprocessor/postprocessor loaded successfully with embedded stats") - - relative_step = next( - (s for s in preprocessor.steps if isinstance(s, RelativeActionsProcessorStep) and s.enabled), - None, - ) - normalizer_step = next( - (s for s in preprocessor.steps if isinstance(s, NormalizerProcessorStep)), - None, - ) - if relative_step is not None: - if relative_step.action_names is None: - cfg_names = getattr(cfg.policy, "action_feature_names", None) - if cfg_names: - relative_step.action_names = list(cfg_names) - else: - relative_step.action_names = [ - k for k in robot.robot.action_features if k.endswith(".pos") - ] - logger.info("[GET_ACTIONS] Relative actions enabled: will re-anchor RTC prefix") - - get_actions_threshold = cfg.action_queue_size_to_get_new_actions - - if not cfg.rtc.enabled: - get_actions_threshold = 0 - - while not shutdown_event.is_set(): - if action_queue.qsize() <= get_actions_threshold: - current_time = time.perf_counter() - action_index_before_inference = action_queue.get_action_index() - prev_actions = action_queue.get_left_over() - - inference_latency = latency_tracker.max() - inference_delay = math.ceil(inference_latency / time_per_chunk) - - obs = robot.get_observation() - - # Apply robot observation processor - obs_processed = robot_observation_processor(obs) - - obs_with_policy_features = build_dataset_frame( - dataset_features, obs_processed, prefix="observation" - ) - - for name in obs_with_policy_features: - obs_with_policy_features[name] = torch.from_numpy(obs_with_policy_features[name]) - if "image" in name: - obs_with_policy_features[name] = ( - obs_with_policy_features[name].type(torch.float32) / 255 - ) - obs_with_policy_features[name] = ( - obs_with_policy_features[name].permute(2, 0, 1).contiguous() - ) - obs_with_policy_features[name] = obs_with_policy_features[name].unsqueeze(0) - obs_with_policy_features[name] = obs_with_policy_features[name].to(policy_device) - - obs_with_policy_features["task"] = [cfg.task] # Task should be a list, not a string! - obs_with_policy_features["robot_type"] = ( - robot.robot.name if hasattr(robot.robot, "name") else "" - ) - - preproceseded_obs = preprocessor(obs_with_policy_features) - - # Re-anchor leftover actions for relative-action policies. - # We need the *postprocessed* (absolute) leftover, not the original - # (normalized/relative) one that get_left_over() returns. - if ( - prev_actions is not None - and relative_step is not None - and OBS_STATE in obs_with_policy_features - ): - with action_queue.lock: - if action_queue.queue is not None: - prev_actions_abs = action_queue.queue[action_queue.last_index :].clone() - else: - prev_actions_abs = None - if prev_actions_abs is not None and prev_actions_abs.numel() > 0: - prev_actions = _reanchor_relative_rtc_prefix( - prev_actions_absolute=prev_actions_abs, - current_state=obs_with_policy_features[OBS_STATE], - relative_step=relative_step, - normalizer_step=normalizer_step, - policy_device=policy_device, - ) - - # Generate actions WITH RTC - actions = policy.predict_action_chunk( - preproceseded_obs, - inference_delay=inference_delay, - prev_chunk_left_over=prev_actions, - ) - - # Store original actions (before postprocessing) for RTC - original_actions = actions.squeeze(0).clone() - - postprocessed_actions = postprocessor(actions) - - postprocessed_actions = postprocessed_actions.squeeze(0) - - new_latency = time.perf_counter() - current_time - new_delay = math.ceil(new_latency / time_per_chunk) - latency_tracker.add(new_latency) - - if cfg.action_queue_size_to_get_new_actions < cfg.rtc.execution_horizon + new_delay: - logger.warning( - "[GET_ACTIONS] cfg.action_queue_size_to_get_new_actions Too small, It should be higher than inference delay + execution horizon." - ) - - action_queue.merge( - original_actions, postprocessed_actions, new_delay, action_index_before_inference - ) - else: - # Small sleep to prevent busy waiting - time.sleep(0.1) - - logger.info("[GET_ACTIONS] get actions thread shutting down") - except Exception as e: - logger.error(f"[GET_ACTIONS] Fatal exception in get_actions thread: {e}") - logger.error(traceback.format_exc()) - sys.exit(1) - - -def actor_control( - robot: RobotWrapper, - robot_action_processor, - action_queue: ActionQueue, - shutdown_event: Event, - cfg: RTCDemoConfig, -): - """Thread function to execute actions on the robot. - - Args: - robot: The robot instance - action_queue: Queue to get actions from - shutdown_event: Event to signal shutdown - cfg: Demo configuration - """ - try: - logger.info("[ACTOR] Starting actor thread") - - action_keys = [k for k in robot.action_features() if k.endswith(".pos")] - - action_count = 0 - interpolator = ActionInterpolator(multiplier=cfg.interpolation_multiplier) - action_interval = interpolator.get_control_interval(cfg.fps) - - while not shutdown_event.is_set(): - start_time = time.perf_counter() - - if interpolator.needs_new_action(): - new_action = action_queue.get() - if new_action is not None: - interpolator.add(new_action.cpu()) - - action = interpolator.get() - if action is not None: - action = action.cpu() - action_dict = {key: action[i].item() for i, key in enumerate(action_keys)} - action_processed = robot_action_processor((action_dict, None)) - robot.send_action(action_processed) - action_count += 1 - - dt_s = time.perf_counter() - start_time - time.sleep(max(0, (action_interval - dt_s) - 0.001)) - - logger.info(f"[ACTOR] Actor thread shutting down. Total actions executed: {action_count}") - except Exception as e: - logger.error(f"[ACTOR] Fatal exception in actor_control thread: {e}") - logger.error(traceback.format_exc()) - sys.exit(1) - - -def _apply_torch_compile(policy, cfg: RTCDemoConfig): - """Apply torch.compile to the policy's predict_action_chunk method. - - Args: - policy: Policy instance to compile - cfg: Configuration containing torch compile settings - - Returns: - Policy with compiled predict_action_chunk method - """ - - # PI models handle their own compilation - if policy.type == "pi05" or policy.type == "pi0": - return policy - - try: - # Check if torch.compile is available (PyTorch 2.0+) - if not hasattr(torch, "compile"): - logger.warning( - f"torch.compile is not available. Requires PyTorch 2.0+. " - f"Current version: {torch.__version__}. Skipping compilation." - ) - return policy - - logger.info("Applying torch.compile to predict_action_chunk...") - logger.info(f" Backend: {cfg.torch_compile_backend}") - logger.info(f" Mode: {cfg.torch_compile_mode}") - logger.info(f" Disable CUDA graphs: {cfg.torch_compile_disable_cudagraphs}") - - # Compile the predict_action_chunk method - # - CUDA graphs disabled to prevent tensor aliasing from in-place ops (x_t += dt * v_t) - compile_kwargs = { - "backend": cfg.torch_compile_backend, - "mode": cfg.torch_compile_mode, - } - - # Disable CUDA graphs if requested (prevents tensor aliasing issues) - if cfg.torch_compile_disable_cudagraphs: - compile_kwargs["options"] = {"triton.cudagraphs": False} - - original_method = policy.predict_action_chunk - compiled_method = torch.compile(original_method, **compile_kwargs) - policy.predict_action_chunk = compiled_method - logger.info("✓ Successfully compiled predict_action_chunk") - - except Exception as e: - logger.error(f"Failed to apply torch.compile: {e}") - logger.warning("Continuing without torch.compile") - - return policy - - -@parser.wrap() -def demo_cli(cfg: RTCDemoConfig): - """Main entry point for RTC demo with draccus configuration.""" - - # Initialize logging - init_logging() - - logger.info(f"Using device: {cfg.device}") - - # Setup signal handler for graceful shutdown - signal_handler = ProcessSignalHandler(use_threads=True, display_pid=False) - shutdown_event = signal_handler.shutdown_event - - policy = None - robot = None - get_actions_thread = None - actor_thread = None - - policy_class = get_policy_class(cfg.policy.type) - - # Load config and set compile_model for pi0/pi05 models - config = PreTrainedConfig.from_pretrained(cfg.policy.pretrained_path) - - if cfg.policy.type == "pi05" or cfg.policy.type == "pi0": - config.compile_model = cfg.use_torch_compile - - if config.use_peft: - from peft import PeftConfig, PeftModel - - peft_pretrained_path = cfg.policy.pretrained_path - peft_config = PeftConfig.from_pretrained(peft_pretrained_path) - - policy = policy_class.from_pretrained( - pretrained_name_or_path=peft_config.base_model_name_or_path, config=config - ) - policy = PeftModel.from_pretrained(policy, peft_pretrained_path, config=peft_config) - else: - policy = policy_class.from_pretrained(cfg.policy.pretrained_path, config=config) - - # Turn on RTC - policy.config.rtc_config = cfg.rtc - - # Init RTC processort, as by default if RTC disabled in the config - # The processor won't be created - policy.init_rtc_processor() - - assert policy.name in ["smolvla", "pi05", "pi0"], "Only smolvla, pi05, and pi0 are supported for RTC" - - policy = policy.to(cfg.device) - policy.eval() - - # Apply torch.compile to predict_action_chunk method if enabled - if cfg.use_torch_compile: - policy = _apply_torch_compile(policy, cfg) - - # Create robot - logger.info(f"Initializing robot: {cfg.robot.type}") - robot = make_robot_from_config(cfg.robot) - robot.connect() - robot_wrapper = RobotWrapper(robot) - - # Create robot observation processor - robot_observation_processor = make_default_robot_observation_processor() - robot_action_processor = make_default_robot_action_processor() - - # Create action queue for communication between threads - action_queue = ActionQueue(cfg.rtc) - - # Start chunk requester thread - get_actions_thread = Thread( - target=get_actions, - args=(policy, robot_wrapper, robot_observation_processor, action_queue, shutdown_event, cfg), - daemon=True, - name="GetActions", - ) - get_actions_thread.start() - logger.info("Started get actions thread") - - # Start action executor thread - actor_thread = Thread( - target=actor_control, - args=(robot_wrapper, robot_action_processor, action_queue, shutdown_event, cfg), - daemon=True, - name="Actor", - ) - actor_thread.start() - logger.info("Started actor thread") - - logger.info("Started stop by duration thread") - - # Main thread monitors for duration or shutdown - logger.info(f"Running demo for {cfg.duration} seconds...") - start_time = time.time() - - while not shutdown_event.is_set() and (time.time() - start_time) < cfg.duration: - time.sleep(10) - - # Log queue status periodically - if int(time.time() - start_time) % 5 == 0: - logger.info(f"[MAIN] Action queue size: {action_queue.qsize()}") - - if time.time() - start_time > cfg.duration: - break - - logger.info("Demo duration reached or shutdown requested") - - # Signal shutdown - shutdown_event.set() - - # Wait for threads to finish - if get_actions_thread and get_actions_thread.is_alive(): - logger.info("Waiting for chunk requester thread to finish...") - get_actions_thread.join() - - if actor_thread and actor_thread.is_alive(): - logger.info("Waiting for action executor thread to finish...") - actor_thread.join() - - # Cleanup robot - if robot: - robot.disconnect() - logger.info("Robot disconnected") - - logger.info("Cleanup completed") - - -if __name__ == "__main__": - demo_cli() - logging.info("RTC demo finished") diff --git a/examples/so100_to_so100_EE/evaluate.py b/examples/so100_to_so100_EE/evaluate.py index fb5204997..63def68d0 100644 --- a/examples/so100_to_so100_EE/evaluate.py +++ b/examples/so100_to_so100_EE/evaluate.py @@ -14,13 +14,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +import time + from lerobot.cameras.opencv import OpenCVCameraConfig -from lerobot.common.control_utils import init_keyboard_listener +from lerobot.common.control_utils import init_keyboard_listener, predict_action from lerobot.configs import FeatureType, PolicyFeature from lerobot.datasets import LeRobotDataset, aggregate_pipeline_dataset_features, create_initial_features from lerobot.model.kinematics import RobotKinematics from lerobot.policies import make_pre_post_processors from lerobot.policies.act import ACTPolicy +from lerobot.policies.utils import make_robot_action from lerobot.processor import ( RobotProcessorPipeline, make_default_teleop_action_processor, @@ -34,11 +38,12 @@ from lerobot.robots.so_follower.robot_kinematic_processor import ( ForwardKinematicsJointsToEE, InverseKinematicsEEToJoints, ) -from lerobot.scripts.lerobot_record import record_loop from lerobot.types import RobotAction, RobotObservation -from lerobot.utils.feature_utils import combine_feature_dicts +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts +from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.utils import log_say -from lerobot.utils.visualization_utils import init_rerun +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data NUM_EPISODES = 5 FPS = 30 @@ -49,6 +54,9 @@ HF_DATASET_ID = "/" def main(): + # NOTE: For production policy deployment, use `lerobot-rollout` CLI instead. + # This script provides a self-contained example for educational purposes. + # Create the robot configuration & robot camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} robot_config = SO100FollowerConfig( @@ -143,43 +151,67 @@ def main(): raise ValueError("Robot is not connected!") print("Starting evaluate loop...") + control_interval = 1 / FPS episode_idx = 0 for episode_idx in range(NUM_EPISODES): log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") - # Main record loop - record_loop( - robot=robot, - events=events, - fps=FPS, - policy=policy, - preprocessor=preprocessor, # Pass the pre and post policy processors - postprocessor=postprocessor, - dataset=dataset, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=make_default_teleop_action_processor(), - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose_processor, - ) + # Inline evaluation loop: predict actions and send to robot + timestamp = 0 + start_episode_t = time.perf_counter() + while timestamp < EPISODE_TIME_SEC: + start_loop_t = time.perf_counter() + + if events["exit_early"]: + events["exit_early"] = False + break + + # Get robot observation + obs = robot.get_observation() + obs_processed = robot_joints_to_ee_pose_processor(obs) + observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR) + + # Predict action using the policy + action_tensor = predict_action( + observation=observation_frame, + policy=policy, + device=policy.config.device, + preprocessor=preprocessor, + postprocessor=postprocessor, + use_amp=policy.config.device.type == "cuda", + task=TASK_DESCRIPTION, + robot_type=robot.name, + ) + + # Convert policy output to robot action dict + action_values = make_robot_action(action_tensor, dataset.features) + + # Process and send action to robot (EE -> joints via IK) + robot_action_to_send = robot_ee_to_joints_processor((action_values, obs)) + robot.send_action(robot_action_to_send) + + # Write to dataset + action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION) + frame = {**observation_frame, **action_frame, "task": TASK_DESCRIPTION} + dataset.add_frame(frame) + + log_rerun_data(observation=obs_processed, action=action_values) + + dt_s = time.perf_counter() - start_loop_t + sleep_time_s = control_interval - dt_s + if sleep_time_s < 0: + logging.warning( + f"Evaluate loop is running slower ({1 / dt_s:.1f} Hz) than the target FPS ({FPS} Hz)." + ) + precise_sleep(max(sleep_time_s, 0.0)) + timestamp = time.perf_counter() - start_episode_t # Reset the environment if not stopping or re-recording if not events["stop_recording"] and ( (episode_idx < NUM_EPISODES - 1) or events["rerecord_episode"] ): log_say("Reset the environment") - record_loop( - robot=robot, - events=events, - fps=FPS, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=make_default_teleop_action_processor(), - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose_processor, - ) + log_say("Waiting for environment reset, press right arrow key when ready...") if events["rerecord_episode"]: log_say("Re-record episode") @@ -190,7 +222,6 @@ def main(): # Save episode dataset.save_episode() - episode_idx += 1 finally: # Clean up log_say("Stop recording") diff --git a/examples/so100_to_so100_EE/record.py b/examples/so100_to_so100_EE/record.py index a7ac5bb80..a0b92da3b 100644 --- a/examples/so100_to_so100_EE/record.py +++ b/examples/so100_to_so100_EE/record.py @@ -62,21 +62,20 @@ def main(): follower = SO100Follower(follower_config) leader = SO100Leader(leader_config) - # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf + # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: + # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf follower_kinematics_solver = RobotKinematics( urdf_path="./SO101/so101_new_calib.urdf", target_frame_name="gripper_frame_link", joint_names=list(follower.bus.motors.keys()), ) - - # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf leader_kinematics_solver = RobotKinematics( urdf_path="./SO101/so101_new_calib.urdf", target_frame_name="gripper_frame_link", joint_names=list(leader.bus.motors.keys()), ) - # Build pipeline to convert follower joints to EE observation + # Build pipeline to convert follower joints to EE observation. follower_joints_to_ee = RobotProcessorPipeline[RobotObservation, RobotObservation]( steps=[ ForwardKinematicsJointsToEE( @@ -87,7 +86,7 @@ def main(): to_output=transition_to_observation, ) - # Build pipeline to convert leader joints to EE action + # Build pipeline to convert leader joints to EE action. leader_joints_to_ee = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( steps=[ ForwardKinematicsJointsToEE( @@ -98,9 +97,9 @@ def main(): to_output=transition_to_robot_action, ) - # Build pipeline to convert EE action to follower joints + # Build pipeline to convert EE action to follower joints (with safety bounds). ee_to_follower_joints = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( - [ + steps=[ EEBoundsAndSafety( end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.10, @@ -115,13 +114,12 @@ def main(): to_output=transition_to_robot_action, ) - # Create the dataset + # Create the dataset, deriving features from the pipelines so the on-disk schema + # matches exactly what the pipelines produce at runtime. dataset = LeRobotDataset.create( repo_id=HF_REPO_ID, fps=FPS, features=combine_feature_dicts( - # Run the feature contract of the pipelines - # This tells you how the features would look like after the pipeline steps aggregate_pipeline_dataset_features( pipeline=leader_joints_to_ee, initial_features=create_initial_features(action=leader.action_features), @@ -144,7 +142,7 @@ def main(): # Initialize the keyboard listener and rerun visualization listener, events = init_keyboard_listener() - init_rerun(session_name="recording_phone") + init_rerun(session_name="recording_so100_ee") try: if not leader.is_connected or not follower.is_connected: @@ -160,14 +158,14 @@ def main(): robot=follower, events=events, fps=FPS, + teleop_action_processor=leader_joints_to_ee, + robot_action_processor=ee_to_follower_joints, + robot_observation_processor=follower_joints_to_ee, teleop=leader, dataset=dataset, control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=leader_joints_to_ee, - robot_action_processor=ee_to_follower_joints, - robot_observation_processor=follower_joints_to_ee, ) # Reset the environment if not stopping or re-recording @@ -179,13 +177,13 @@ def main(): robot=follower, events=events, fps=FPS, + teleop_action_processor=leader_joints_to_ee, + robot_action_processor=ee_to_follower_joints, + robot_observation_processor=follower_joints_to_ee, teleop=leader, control_time_s=RESET_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=leader_joints_to_ee, - robot_action_processor=ee_to_follower_joints, - robot_observation_processor=follower_joints_to_ee, ) if events["rerecord_episode"]: diff --git a/examples/so100_to_so100_EE/rollout.py b/examples/so100_to_so100_EE/rollout.py new file mode 100644 index 000000000..d608bfab2 --- /dev/null +++ b/examples/so100_to_so100_EE/rollout.py @@ -0,0 +1,134 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a trained EE-space policy on SO100 without recording (base rollout). + +Uses the rollout engine's :class:`BaseStrategy` (autonomous execution, +no dataset) with :class:`SyncInferenceConfig` (inline policy call per +control tick). The custom observation/action processors convert between +joint space (robot hardware) and end-effector space (policy I/O) via +forward/inverse kinematics. +""" + +from lerobot.cameras.opencv import OpenCVCameraConfig +from lerobot.configs import PreTrainedConfig +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import ( + RobotProcessorPipeline, + observation_to_transition, + robot_action_observation_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig +from lerobot.robots.so_follower.robot_kinematic_processor import ( + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.rollout import BaseStrategyConfig, RolloutConfig, build_rollout_context +from lerobot.rollout.inference import SyncInferenceConfig +from lerobot.rollout.strategies import BaseStrategy +from lerobot.types import RobotAction, RobotObservation +from lerobot.utils.process import ProcessSignalHandler +from lerobot.utils.utils import init_logging + +FPS = 30 +DURATION_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" + + +def main(): + init_logging() + + # Robot configuration — the rollout engine will connect it inside build_rollout_context. + camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} + robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", + id="my_awesome_follower_arm", + cameras=camera_config, + use_degrees=True, + ) + + # Kinematic solver: we need the motor-name list, so peek at the robot once. + # (The rollout engine owns the connected instance; we only use this for introspection.) + temp_robot = SO100Follower(robot_config) + motor_names = list(temp_robot.bus.motors.keys()) + + # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: + # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf + kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=motor_names, + ) + + # Joint-space observation → EE-space observation (consumed by the policy). + robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=motor_names)], + to_transition=observation_to_transition, + to_output=transition_to_observation, + ) + + # EE-space action (produced by the policy) → joint-space action (sent to robot). + robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=motor_names, + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, + ) + + # Policy config (full model is loaded inside build_rollout_context). + policy_config = PreTrainedConfig.from_pretrained(HF_MODEL_ID) + policy_config.pretrained_path = HF_MODEL_ID + + cfg = RolloutConfig( + robot=robot_config, + policy=policy_config, + strategy=BaseStrategyConfig(), + inference=SyncInferenceConfig(), + fps=FPS, + duration=DURATION_SEC, + task=TASK_DESCRIPTION, + ) + + signal_handler = ProcessSignalHandler(use_threads=True) + + # Pass the EE kinematic processors via kwargs; the defaults (identity) would + # otherwise skip the joint↔EE conversion and the policy would receive the + # wrong observation/action space. + ctx = build_rollout_context( + cfg, + signal_handler.shutdown_event, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + + strategy = BaseStrategy(cfg.strategy) + try: + strategy.setup(ctx) + strategy.run(ctx) + finally: + strategy.teardown(ctx) + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index 2f12840e7..d3d0c0ed3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -108,9 +108,9 @@ training = [ "wandb>=0.24.0,<0.25.0", ] hardware = [ - "pynput>=1.7.8,<1.9.0", - "pyserial>=3.5,<4.0", - "deepdiff>=7.0.1,<9.0.0", + "lerobot[pynput-dep]", + "lerobot[pyserial-dep]", + "lerobot[deepdiff-dep]", ] viz = [ "rerun-sdk>=0.24.0,<0.27.0", @@ -136,10 +136,14 @@ scipy-dep = ["scipy>=1.14.0,<2.0.0"] diffusers-dep = ["diffusers>=0.27.2,<0.36.0"] qwen-vl-utils-dep = ["qwen-vl-utils>=0.0.11,<0.1.0"] matplotlib-dep = ["matplotlib>=3.10.3,<4.0.0", "contourpy>=1.3.0,<2.0.0"] # NOTE: Explicitly listing contourpy helps the resolver converge faster. +pyserial-dep = ["pyserial>=3.5,<4.0"] +deepdiff-dep = ["deepdiff>=7.0.1,<9.0.0"] +pynput-dep = ["pynput>=1.7.8,<1.9.0"] +pyzmq-dep = ["pyzmq>=26.2.1,<28.0.0"] # Motors -feetech = ["feetech-servo-sdk>=1.0.0,<2.0.0"] -dynamixel = ["dynamixel-sdk>=3.7.31,<3.9.0"] +feetech = ["feetech-servo-sdk>=1.0.0,<2.0.0", "lerobot[pyserial-dep]", "lerobot[deepdiff-dep]"] +dynamixel = ["dynamixel-sdk>=3.7.31,<3.9.0", "lerobot[pyserial-dep]", "lerobot[deepdiff-dep]"] damiao = ["lerobot[can-dep]"] robstride = ["lerobot[can-dep]"] @@ -147,10 +151,11 @@ robstride = ["lerobot[can-dep]"] openarms = ["lerobot[damiao]"] gamepad = ["lerobot[pygame-dep]", "hidapi>=0.14.0,<0.15.0"] hopejr = ["lerobot[feetech]", "lerobot[pygame-dep]"] -lekiwi = ["lerobot[feetech]", "pyzmq>=26.2.1,<28.0.0"] +lekiwi = ["lerobot[feetech]", "lerobot[pyzmq-dep]"] unitree_g1 = [ # "unitree-sdk2==1.0.1", - "pyzmq>=26.2.1,<28.0.0", + "lerobot[pyzmq-dep]", + "lerobot[pyserial-dep]", "onnxruntime>=1.16.0,<2.0.0", "onnx>=1.16.0,<2.0.0", "meshcat>=0.3.0,<0.4.0", @@ -196,7 +201,8 @@ async = ["lerobot[grpcio-dep]", "lerobot[matplotlib-dep]"] peft = ["lerobot[transformers-dep]", "lerobot[peft-dep]"] # Development -dev = ["pre-commit>=3.7.0,<5.0.0", "debugpy>=1.8.1,<1.9.0", "lerobot[grpcio-dep]", "grpcio-tools==1.73.1", "mypy>=1.19.1", "ruff>=0.14.1"] +dev = ["pre-commit>=3.7.0,<5.0.0", "debugpy>=1.8.1,<1.9.0", "lerobot[grpcio-dep]", "grpcio-tools==1.73.1", "mypy>=1.19.1", "ruff>=0.14.1", "lerobot[notebook]"] +notebook = ["jupyter>=1.0.0,<2.0.0", "ipykernel>=6.0.0,<7.0.0"] test = ["pytest>=8.1.0,<9.0.0", "pytest-timeout>=2.4.0,<3.0.0", "pytest-cov>=5.0.0,<8.0.0", "mock-serial>=0.0.1,<0.1.0 ; sys_platform != 'win32'"] video_benchmark = ["scikit-image>=0.23.2,<0.26.0", "pandas>=2.2.2,<2.4.0"] @@ -206,6 +212,20 @@ aloha = ["lerobot[dataset]", "gym-aloha>=0.1.2,<0.2.0", "lerobot[scipy-dep]"] pusht = ["lerobot[dataset]", "gym-pusht>=0.1.5,<0.2.0", "pymunk>=6.6.0,<7.0.0"] # TODO: Fix pymunk version in gym-pusht instead libero = ["lerobot[dataset]", "lerobot[transformers-dep]", "hf-libero>=0.1.3,<0.2.0; sys_platform == 'linux'", "lerobot[scipy-dep]"] metaworld = ["lerobot[dataset]", "metaworld==3.0.0", "lerobot[scipy-dep]"] +# NOTE: vlabench is NOT exposed as a `lerobot` extra. Its only distribution +# is the OpenMOSS/VLABench GitHub repo (package name `VLABench`, no PyPI +# release), so any `vlabench>=X` pip spec is unresolvable. Install it +# manually alongside MuJoCo / dm-control — see docs/source/vlabench.mdx +# for the recipe. +# NOTE: robomme is NOT a pyproject extra — mani-skill hard-pins numpy<2 +# which conflicts with lerobot's numpy>=2 base pin, so the two trees can't +# resolve into a single env. Install it only in the RoboMME Docker image +# via `uv pip install --override` (see docker/Dockerfile.benchmark.robomme). +# NOTE: robocasa is NOT exposed as a `lerobot` extra. Its setup.py pins +# `lerobot==0.3.3` in install_requires, which cyclically shadows our own +# workspace `lerobot` and makes the graph unsolvable under any resolver +# (uv, pip). Install it manually alongside robosuite — see +# docs/source/robocasa.mdx for the recipe. # All all = [ @@ -269,6 +289,7 @@ lerobot-find-joint-limits="lerobot.scripts.lerobot_find_joint_limits:main" lerobot-imgtransform-viz="lerobot.scripts.lerobot_imgtransform_viz:main" lerobot-edit-dataset="lerobot.scripts.lerobot_edit_dataset:main" lerobot-setup-can="lerobot.scripts.lerobot_setup_can:main" +lerobot-rollout="lerobot.scripts.lerobot_rollout:main" # ---------------- Tool Configurations ---------------- [tool.setuptools.package-data] diff --git a/scripts/ci/extract_task_descriptions.py b/scripts/ci/extract_task_descriptions.py index 5fbc1c35a..0d27885cf 100644 --- a/scripts/ci/extract_task_descriptions.py +++ b/scripts/ci/extract_task_descriptions.py @@ -31,9 +31,23 @@ from __future__ import annotations import argparse import json +import re import sys from pathlib import Path +# LIBERO-plus derives task.language by space-joining the perturbation-variant +# filename (grab_language_from_filename in libero/libero/benchmark/__init__.py), +# so non-_language_ variants inherit a trailing metadata blob like +# "view 0 0 100 0 0 initstate 0 noise 45" or "add 16". Strip those tokens so +# the description matches the base instruction used in the training dataset. +_LIBERO_PERTURBATION_TAIL_RE = re.compile( + r"(?:\s(?:view|initstate|noise|add|tb|table|light|level)(?:\s\d+)+)+$" +) + + +def _strip_libero_perturbation_tail(instruction: str) -> str: + return _LIBERO_PERTURBATION_TAIL_RE.sub("", instruction).strip() + def _libero_descriptions(task_suite: str) -> dict[str, str]: from libero.libero import benchmark # type: ignore[import-untyped] @@ -47,7 +61,10 @@ def _libero_descriptions(task_suite: str) -> dict[str, str]: ) return {} suite = suite_dict[task_suite]() - return {f"{task_suite}_{i}": suite.get_task(i).language for i in range(suite.n_tasks)} + return { + f"{task_suite}_{i}": _strip_libero_perturbation_tail(suite.get_task(i).language) + for i in range(suite.n_tasks) + } def _metaworld_descriptions(task_name: str) -> dict[str, str]: @@ -57,19 +74,120 @@ def _metaworld_descriptions(task_name: str) -> dict[str, str]: return {f"{task_name}_0": label} +def _robotwin_descriptions(task_names: str) -> dict[str, str]: + """Return descriptions for each requested RoboTwin task. Reads + `description/task_instruction/.json` from the RoboTwin clone + (cwd is /opt/robotwin in CI). Falls back to the task name if missing.""" + out: dict[str, str] = {} + root = Path("description/task_instruction") + for name in (t.strip() for t in task_names.split(",") if t.strip()): + desc_file = root / f"{name}.json" + desc = name.replace("_", " ") + if desc_file.is_file(): + data = json.loads(desc_file.read_text()) + full = data.get("full_description") or desc + # Strip the schema placeholders ({A}, {a}) — keep the sentence readable. + desc = full.replace("<", "").replace(">", "") + out[f"{name}_0"] = desc + return out + + +def _robocasa_descriptions(task_spec: str) -> dict[str, str]: + """For each task in the comma-separated list, emit a cleaned-name label. + + RoboCasa episodes carry their language instruction in the env's + `ep_meta['lang']`, populated per reset. Pulling it requires spinning + up the full kitchen env per task (~seconds each); we use the task + name as the key here and let the eval's episode info carry the + actual instruction. + """ + out: dict[str, str] = {} + for task in (t.strip() for t in task_spec.split(",") if t.strip()): + # Split CamelCase into words: "CloseFridge" → "close fridge". + label = "".join(f" {c.lower()}" if c.isupper() else c for c in task).strip() + out[f"{task}_0"] = label or task + return out + + +_ROBOMME_DESCRIPTIONS = { + "BinFill": "Fill the target bin with the correct number of cubes", + "PickXtimes": "Pick the indicated cube the specified number of times", + "SwingXtimes": "Swing the object the specified number of times", + "StopCube": "Grasp and stop the moving cube", + "VideoUnmask": "Pick the cube shown in the reference video", + "VideoUnmaskSwap": "Pick the cube matching the reference video after a swap", + "ButtonUnmask": "Press the button indicated by the reference", + "ButtonUnmaskSwap": "Press the correct button after objects are swapped", + "PickHighlight": "Pick the highlighted cube", + "VideoRepick": "Repick the cube shown in the reference video", + "VideoPlaceButton": "Place the cube on the button shown in the video", + "VideoPlaceOrder": "Place cubes in the order shown in the video", + "MoveCube": "Move the cube to the target location", + "InsertPeg": "Insert the peg into the target hole", + "PatternLock": "Unlock the pattern by pressing buttons in sequence", + "RouteStick": "Route the stick through the required waypoints", +} + + +def _robomme_descriptions(task_names: str, task_ids: list[int] | None = None) -> dict[str, str]: + """Return descriptions for each requested RoboMME task. Keys match the + video filename pattern `_` used by the eval script.""" + if task_ids is None: + task_ids = [0] + out: dict[str, str] = {} + for name in (t.strip() for t in task_names.split(",") if t.strip()): + desc = _ROBOMME_DESCRIPTIONS.get(name, name) + for tid in task_ids: + out[f"{name}_{tid}"] = desc + return out + + +def _vlabench_descriptions(task_spec: str) -> dict[str, str]: + """For each task in the comma-separated list, emit a cleaned-name label. + + VLABench tasks carry language instructions on their dm_control task + object, but pulling them requires loading the full env per task + (~seconds each). The CI smoke-eval already captures the instruction + inside its episode info; this mapping is just enough to key + `metrics.json` by `_0`. + """ + out: dict[str, str] = {} + for task in (t.strip() for t in task_spec.split(",") if t.strip()): + out[f"{task}_0"] = task.replace("_", " ").strip() + return out + + def main() -> int: parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--env", required=True, help="Environment family (libero, metaworld, ...)") parser.add_argument("--task", required=True, help="Task/suite name (e.g. libero_spatial)") + parser.add_argument( + "--task-ids", + type=str, + default=None, + help="Comma-separated task IDs (e.g. '0,1,2'). Default: [0]", + ) parser.add_argument("--output", required=True, help="Path to write task_descriptions.json") args = parser.parse_args() + task_ids: list[int] | None = None + if args.task_ids: + task_ids = [int(x.strip()) for x in args.task_ids.split(",")] + descriptions: dict[str, str] = {} try: - if args.env == "libero": + if args.env == ("libero", "libero_plus"): descriptions = _libero_descriptions(args.task) elif args.env == "metaworld": descriptions = _metaworld_descriptions(args.task) + elif args.env == "robotwin": + descriptions = _robotwin_descriptions(args.task) + elif args.env == "robocasa": + descriptions = _robocasa_descriptions(args.task) + elif args.env == "robomme": + descriptions = _robomme_descriptions(args.task, task_ids=task_ids) + elif args.env == "vlabench": + descriptions = _vlabench_descriptions(args.task) else: print( f"[extract_task_descriptions] No description extractor for env '{args.env}'.", diff --git a/src/lerobot/cameras/reachy2_camera/reachy2_camera.py b/src/lerobot/cameras/reachy2_camera/reachy2_camera.py index 9bef957bc..9b7e7a2e0 100644 --- a/src/lerobot/cameras/reachy2_camera/reachy2_camera.py +++ b/src/lerobot/cameras/reachy2_camera/reachy2_camera.py @@ -33,7 +33,7 @@ import cv2 # type: ignore # TODO: add type stubs for OpenCV import numpy as np # type: ignore # TODO: add type stubs for numpy from lerobot.utils.decorators import check_if_not_connected -from lerobot.utils.import_utils import _reachy2_sdk_available +from lerobot.utils.import_utils import _reachy2_sdk_available, require_package if TYPE_CHECKING or _reachy2_sdk_available: from reachy2_sdk.media.camera import CameraView @@ -76,6 +76,7 @@ class Reachy2Camera(Camera): Args: config: The configuration settings for the camera. """ + require_package("reachy2_sdk", extra="reachy2") super().__init__(config) self.config = config diff --git a/src/lerobot/cameras/realsense/camera_realsense.py b/src/lerobot/cameras/realsense/camera_realsense.py index d80ec8093..e156e6d14 100644 --- a/src/lerobot/cameras/realsense/camera_realsense.py +++ b/src/lerobot/cameras/realsense/camera_realsense.py @@ -17,18 +17,21 @@ Provides the RealSenseCamera class for capturing frames from Intel RealSense cam """ import logging +import sys import time from threading import Event, Lock, Thread -from typing import Any +from typing import TYPE_CHECKING, Any import cv2 # type: ignore # TODO: add type stubs for OpenCV import numpy as np # type: ignore # TODO: add type stubs for numpy from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing -try: - import pyrealsense2 as rs # type: ignore # TODO: add type stubs for pyrealsense2 -except Exception as e: - logging.info(f"Could not import realsense: {e}") +from lerobot.utils.import_utils import _pyrealsense2_available, require_package + +if TYPE_CHECKING or _pyrealsense2_available: + import pyrealsense2 as rs +else: + rs = None from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected from lerobot.utils.errors import DeviceNotConnectedError @@ -39,6 +42,7 @@ from ..utils import get_cv2_rotation from .configuration_realsense import RealSenseCameraConfig logger = logging.getLogger(__name__) +pkg_name = "pyrealsense2-macosx" if sys.platform == "darwin" else "pyrealsense2" class RealSenseCamera(Camera): @@ -112,7 +116,7 @@ class RealSenseCamera(Camera): Args: config: The configuration settings for the camera. """ - + require_package(pkg_name, extra="intelrealsense", import_name="pyrealsense2") super().__init__(config) self.config = config diff --git a/src/lerobot/cameras/zmq/camera_zmq.py b/src/lerobot/cameras/zmq/camera_zmq.py index 2fbe50d8b..1b0be5de6 100644 --- a/src/lerobot/cameras/zmq/camera_zmq.py +++ b/src/lerobot/cameras/zmq/camera_zmq.py @@ -28,12 +28,19 @@ import json import logging import time from threading import Event, Lock, Thread -from typing import Any +from typing import TYPE_CHECKING, Any import cv2 import numpy as np from numpy.typing import NDArray +from lerobot.utils.import_utils import _zmq_available, require_package + +if TYPE_CHECKING or _zmq_available: + import zmq +else: + zmq = None + from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected from lerobot.utils.errors import DeviceNotConnectedError @@ -74,8 +81,8 @@ class ZMQCamera(Camera): """ def __init__(self, config: ZMQCameraConfig): + require_package("pyzmq", extra="pyzmq-dep", import_name="zmq") super().__init__(config) - import zmq self.config = config self.server_address = config.server_address @@ -117,8 +124,6 @@ class ZMQCamera(Camera): logger.info(f"Connecting to {self}...") try: - import zmq - self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.setsockopt_string(zmq.SUBSCRIBE, "") @@ -180,11 +185,8 @@ class ZMQCamera(Camera): try: message = self.socket.recv_string() - except Exception as e: - # zmq is lazy-imported in connect(), so check by name to avoid a top-level import - if type(e).__name__ == "Again": - raise TimeoutError(f"{self} timeout after {self.timeout_ms}ms") from e - raise + except zmq.Again as e: + raise TimeoutError(f"{self} timeout after {self.timeout_ms}ms") from e # Decode JSON message data = json.loads(message) diff --git a/src/lerobot/common/control_utils.py b/src/lerobot/common/control_utils.py index 530955078..efbcc082f 100644 --- a/src/lerobot/common/control_utils.py +++ b/src/lerobot/common/control_utils.py @@ -28,6 +28,12 @@ import numpy as np import torch from lerobot.policies import PreTrainedPolicy, prepare_observation_for_inference +from lerobot.utils.import_utils import _deepdiff_available, require_package + +if TYPE_CHECKING or _deepdiff_available: + from deepdiff import DeepDiff +else: + DeepDiff = None if TYPE_CHECKING: from lerobot.datasets import LeRobotDataset @@ -217,10 +223,7 @@ def sanity_check_dataset_robot_compatibility( Raises: ValueError: If any of the checked metadata fields do not match. """ - from lerobot.utils.import_utils import require_package - - require_package("deepdiff", extra="hardware") - from deepdiff import DeepDiff + require_package("deepdiff", extra="deepdiff-dep") from lerobot.utils.constants import DEFAULT_FEATURES diff --git a/src/lerobot/configs/__init__.py b/src/lerobot/configs/__init__.py index 3ddaec1af..ab74c3cd3 100644 --- a/src/lerobot/configs/__init__.py +++ b/src/lerobot/configs/__init__.py @@ -21,6 +21,7 @@ are intentionally NOT re-exported here to avoid circular dependencies Import them directly: ``from lerobot.configs.train import TrainPipelineConfig`` """ +from .dataset import DatasetRecordConfig from .default import DatasetConfig, EvalConfig, PeftConfig, WandBConfig from .policies import PreTrainedConfig from .types import ( @@ -39,6 +40,7 @@ __all__ = [ "PolicyFeature", "RTCAttentionSchedule", # Config classes + "DatasetRecordConfig", "DatasetConfig", "EvalConfig", "PeftConfig", diff --git a/src/lerobot/configs/dataset.py b/src/lerobot/configs/dataset.py new file mode 100644 index 000000000..e3e17e62b --- /dev/null +++ b/src/lerobot/configs/dataset.py @@ -0,0 +1,80 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared dataset recording configuration used by both ``lerobot-record`` and ``lerobot-rollout``.""" + +from dataclasses import dataclass +from datetime import datetime +from pathlib import Path + + +@dataclass +class DatasetRecordConfig: + # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). + repo_id: str = "" + # A short but accurate description of the task performed during the recording (e.g. "Pick the Lego block and drop it in the box on the right.") + single_task: str = "" + # Root directory where the dataset will be stored (e.g. 'dataset/path'). If None, defaults to $HF_LEROBOT_HOME/repo_id. + root: str | Path | None = None + # Limit the frames per second. + fps: int = 30 + # Number of seconds for data recording for each episode. + episode_time_s: int | float = 60 + # Number of seconds for resetting the environment after each episode. + reset_time_s: int | float = 60 + # Number of episodes to record. + num_episodes: int = 50 + # Encode frames in the dataset into video + video: bool = True + # Upload dataset to Hugging Face hub. + push_to_hub: bool = True + # Upload on private repository on the Hugging Face hub. + private: bool = False + # Add tags to your dataset on the hub. + tags: list[str] | None = None + # Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only; + # set to ≥1 to use subprocesses, each using threads to write images. The best number of processes + # and threads depends on your system. We recommend 4 threads per camera with 0 processes. + # If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses. + num_image_writer_processes: int = 0 + # Number of threads writing the frames as png images on disk, per camera. + # Too many threads might cause unstable teleoperation fps due to main thread being blocked. + # Not enough threads might cause low camera fps. + num_image_writer_threads_per_camera: int = 4 + # Number of episodes to record before batch encoding videos + # Set to 1 for immediate encoding (default behavior), or higher for batched encoding + video_encoding_batch_size: int = 1 + # Video codec for encoding videos. Options: 'h264', 'hevc', 'libsvtav1', 'auto', + # or hardware-specific: 'h264_videotoolbox', 'h264_nvenc', 'h264_vaapi', 'h264_qsv'. + # Use 'auto' to auto-detect the best available hardware encoder. + vcodec: str = "libsvtav1" + # Enable streaming video encoding: encode frames in real-time during capture instead + # of writing PNG images first. Makes save_episode() near-instant. More info in the documentation: https://huggingface.co/docs/lerobot/streaming_video_encoding + streaming_encoding: bool = False + # Maximum number of frames to buffer per camera when using streaming encoding. + # ~1s buffer at 30fps. Provides backpressure if the encoder can't keep up. + encoder_queue_maxsize: int = 30 + # Number of threads per encoder instance. None = auto (codec default). + # Lower values reduce CPU usage, maps to 'lp' (via svtav1-params) for libsvtav1 and 'threads' for h264/hevc.. + encoder_threads: int | None = None + + def stamp_repo_id(self) -> None: + """Append a date-time tag to ``repo_id`` so each recording session gets a unique name. + + Must be called explicitly at dataset *creation* time — not on resume, + where the existing ``repo_id`` (already stamped) must be preserved. + """ + if self.repo_id: + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + self.repo_id = f"{self.repo_id}_{timestamp}" diff --git a/src/lerobot/configs/default.py b/src/lerobot/configs/default.py index b05e96fde..be906edbd 100644 --- a/src/lerobot/configs/default.py +++ b/src/lerobot/configs/default.py @@ -35,6 +35,9 @@ class DatasetConfig: revision: str | None = None use_imagenet_stats: bool = True video_backend: str = field(default_factory=get_safe_default_codec) + # When True, video frames are returned as uint8 tensors (0-255) instead of float32 (0.0-1.0). + # This reduces memory and speeds up DataLoader IPC. The training pipeline handles the conversion. + return_uint8: bool = False streaming: bool = False def __post_init__(self) -> None: diff --git a/src/lerobot/configs/train.py b/src/lerobot/configs/train.py index e681db2e8..6185f922b 100644 --- a/src/lerobot/configs/train.py +++ b/src/lerobot/configs/train.py @@ -56,6 +56,8 @@ class TrainPipelineConfig(HubMixin): # Number of workers for the dataloader. num_workers: int = 4 batch_size: int = 8 + prefetch_factor: int = 4 + persistent_workers: bool = True steps: int = 100_000 eval_freq: int = 20_000 log_freq: int = 200 diff --git a/src/lerobot/datasets/dataset_reader.py b/src/lerobot/datasets/dataset_reader.py index fc7ce36ce..bd1298590 100644 --- a/src/lerobot/datasets/dataset_reader.py +++ b/src/lerobot/datasets/dataset_reader.py @@ -16,6 +16,7 @@ """Private reader component for LeRobotDataset. Handles random-access reading (HF dataset, delta indices, video decoding).""" from collections.abc import Callable +from concurrent.futures import ThreadPoolExecutor from pathlib import Path import datasets @@ -49,6 +50,7 @@ class DatasetReader: video_backend: str, delta_timestamps: dict[str, list[float]] | None, image_transforms: Callable | None, + return_uint8: bool = False, ): """Initialize the reader with metadata, filtering, and transform config. @@ -73,6 +75,7 @@ class DatasetReader: self._tolerance_s = tolerance_s self._video_backend = video_backend self._image_transforms = image_transforms + self._return_uint8 = return_uint8 self.hf_dataset: datasets.Dataset | None = None self._absolute_to_relative_idx: dict[int, int] | None = None @@ -105,10 +108,8 @@ class DatasetReader: """Build absolute-to-relative index mapping from loaded hf_dataset.""" self._absolute_to_relative_idx = None if self.episodes is not None and self.hf_dataset is not None: - self._absolute_to_relative_idx = { - abs_idx.item() if isinstance(abs_idx, torch.Tensor) else abs_idx: rel_idx - for rel_idx, abs_idx in enumerate(self.hf_dataset["index"]) - } + indices = self.hf_dataset.data.column("index").to_numpy() + self._absolute_to_relative_idx = dict(zip(indices.tolist(), range(len(indices)), strict=True)) @property def num_frames(self) -> int: @@ -235,16 +236,30 @@ class DatasetReader: Segmentation Fault. """ ep = self._meta.episodes[ep_idx] - item = {} - for vid_key, query_ts in query_timestamps.items(): + + def _decode_single(vid_key: str, query_ts: list[float]) -> tuple[str, torch.Tensor]: from_timestamp = ep[f"videos/{vid_key}/from_timestamp"] shifted_query_ts = [from_timestamp + ts for ts in query_ts] - video_path = self.root / self._meta.get_video_file_path(ep_idx, vid_key) - frames = decode_video_frames(video_path, shifted_query_ts, self._tolerance_s, self._video_backend) - item[vid_key] = frames.squeeze(0) + frames = decode_video_frames( + video_path, + shifted_query_ts, + self._tolerance_s, + self._video_backend, + return_uint8=self._return_uint8, + ) + return vid_key, frames.squeeze(0) - return item + items = list(query_timestamps.items()) + + # Single camera: no threading overhead + if len(items) <= 1: + return {vid_key: _decode_single(vid_key, query_ts)[1] for vid_key, query_ts in items} + + # Multi-camera: decode in parallel (video decoding releases the GIL) + with ThreadPoolExecutor(max_workers=len(items)) as pool: + futures = [pool.submit(_decode_single, k, ts) for k, ts in items] + return dict(f.result() for f in futures) def get_item(self, idx) -> dict: """Core __getitem__ logic. Assumes hf_dataset is loaded. diff --git a/src/lerobot/datasets/dataset_writer.py b/src/lerobot/datasets/dataset_writer.py index 60ec9e348..cf306a86a 100644 --- a/src/lerobot/datasets/dataset_writer.py +++ b/src/lerobot/datasets/dataset_writer.py @@ -597,7 +597,7 @@ class DatasetWriter: def cleanup_interrupted_episode(self, episode_index: int) -> None: """Remove temporary image directories for an interrupted episode.""" - for key in self._meta.video_keys: + for key in self._meta.camera_keys: img_dir = self._get_image_file_path( episode_index=episode_index, image_key=key, frame_index=0 ).parent diff --git a/src/lerobot/datasets/factory.py b/src/lerobot/datasets/factory.py index 040cba5cb..73df3f04b 100644 --- a/src/lerobot/datasets/factory.py +++ b/src/lerobot/datasets/factory.py @@ -92,6 +92,7 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas image_transforms=image_transforms, revision=cfg.dataset.revision, video_backend=cfg.dataset.video_backend, + return_uint8=True, tolerance_s=cfg.tolerance_s, ) else: @@ -104,6 +105,7 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas revision=cfg.dataset.revision, max_num_shards=cfg.num_workers, tolerance_s=cfg.tolerance_s, + return_uint8=True, ) else: raise NotImplementedError("The MultiLeRobotDataset isn't supported for now.") diff --git a/src/lerobot/datasets/image_writer.py b/src/lerobot/datasets/image_writer.py index 603067757..8fb5804a5 100644 --- a/src/lerobot/datasets/image_writer.py +++ b/src/lerobot/datasets/image_writer.py @@ -30,13 +30,13 @@ def safe_stop_image_writer(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) - except Exception as e: + except BaseException: dataset = kwargs.get("dataset") writer = getattr(dataset, "writer", None) if dataset else None if writer is not None and writer.image_writer is not None: logger.warning("Waiting for image writer to terminate...") writer.image_writer.stop() - raise e + raise return wrapper diff --git a/src/lerobot/datasets/lerobot_dataset.py b/src/lerobot/datasets/lerobot_dataset.py index 7cda5d677..b6ab0f5f0 100644 --- a/src/lerobot/datasets/lerobot_dataset.py +++ b/src/lerobot/datasets/lerobot_dataset.py @@ -56,6 +56,7 @@ class LeRobotDataset(torch.utils.data.Dataset): force_cache_sync: bool = False, download_videos: bool = True, video_backend: str | None = None, + return_uint8: bool = False, batch_encoding_size: int = 1, vcodec: str = "libsvtav1", streaming_encoding: bool = False, @@ -202,6 +203,7 @@ class LeRobotDataset(torch.utils.data.Dataset): self.tolerance_s = tolerance_s self.revision = revision if revision else CODEBASE_VERSION self._video_backend = video_backend if video_backend else get_safe_default_codec() + self._return_uint8 = return_uint8 self._batch_encoding_size = batch_encoding_size self._vcodec = resolve_vcodec(vcodec) self._encoder_threads = encoder_threads @@ -225,6 +227,7 @@ class LeRobotDataset(torch.utils.data.Dataset): video_backend=self._video_backend, delta_timestamps=delta_timestamps, image_transforms=image_transforms, + return_uint8=self._return_uint8, ) # Load actual data @@ -288,6 +291,7 @@ class LeRobotDataset(torch.utils.data.Dataset): video_backend=self._video_backend, delta_timestamps=self.delta_timestamps, image_transforms=self.image_transforms, + return_uint8=self._return_uint8, ) return self.reader @@ -626,6 +630,8 @@ class LeRobotDataset(torch.utils.data.Dataset): streaming_encoding: bool = False, encoder_queue_maxsize: int = 30, encoder_threads: int | None = None, + video_files_size_in_mb: int | None = None, + data_files_size_in_mb: int | None = None, ) -> "LeRobotDataset": """Create a new LeRobotDataset from scratch for recording data. @@ -673,6 +679,8 @@ class LeRobotDataset(torch.utils.data.Dataset): root=root, use_videos=use_videos, metadata_buffer_size=metadata_buffer_size, + video_files_size_in_mb=video_files_size_in_mb, + data_files_size_in_mb=data_files_size_in_mb, ) obj.repo_id = obj.meta.repo_id obj._requested_root = obj.meta.root @@ -683,6 +691,7 @@ class LeRobotDataset(torch.utils.data.Dataset): obj.delta_timestamps = None obj.episodes = None obj._video_backend = video_backend if video_backend is not None else get_safe_default_codec() + obj._return_uint8 = False obj._batch_encoding_size = batch_encoding_size obj._vcodec = vcodec obj._encoder_threads = encoder_threads @@ -775,6 +784,7 @@ class LeRobotDataset(torch.utils.data.Dataset): obj.delta_timestamps = None obj.episodes = None obj._video_backend = video_backend if video_backend else get_safe_default_codec() + obj._return_uint8 = False obj._batch_encoding_size = batch_encoding_size obj._vcodec = vcodec obj._encoder_threads = encoder_threads diff --git a/src/lerobot/datasets/streaming_dataset.py b/src/lerobot/datasets/streaming_dataset.py index f47d71367..4de2ed69c 100644 --- a/src/lerobot/datasets/streaming_dataset.py +++ b/src/lerobot/datasets/streaming_dataset.py @@ -251,6 +251,7 @@ class StreamingLeRobotDataset(torch.utils.data.IterableDataset): seed: int = 42, rng: np.random.Generator | None = None, shuffle: bool = True, + return_uint8: bool = False, ): """Initialize a StreamingLeRobotDataset. @@ -288,6 +289,7 @@ class StreamingLeRobotDataset(torch.utils.data.IterableDataset): self.streaming = streaming self.buffer_size = buffer_size + self._return_uint8 = return_uint8 # We cache the video decoders to avoid re-initializing them at each frame (avoiding a ~10x slowdown) self.video_decoder_cache = None @@ -553,7 +555,11 @@ class StreamingLeRobotDataset(torch.utils.data.IterableDataset): root = self.meta.url_root if self.streaming and not self.streaming_from_local else self.root video_path = f"{root}/{self.meta.get_video_file_path(ep_idx, video_key)}" frames = decode_video_frames_torchcodec( - video_path, query_ts, self.tolerance_s, decoder_cache=self.video_decoder_cache + video_path, + query_ts, + self.tolerance_s, + decoder_cache=self.video_decoder_cache, + return_uint8=self._return_uint8, ) item[video_key] = frames.squeeze(0) if len(query_ts) == 1 else frames diff --git a/src/lerobot/datasets/video_utils.py b/src/lerobot/datasets/video_utils.py index cabe592d0..158e68cdb 100644 --- a/src/lerobot/datasets/video_utils.py +++ b/src/lerobot/datasets/video_utils.py @@ -123,6 +123,7 @@ def decode_video_frames( timestamps: list[float], tolerance_s: float, backend: str | None = None, + return_uint8: bool = False, ) -> torch.Tensor: """ Decodes video frames using the specified backend. @@ -131,19 +132,23 @@ def decode_video_frames( video_path (Path): Path to the video file. timestamps (list[float]): List of timestamps to extract frames. tolerance_s (float): Allowed deviation in seconds for frame retrieval. - backend (str, optional): Backend to use for decoding. Defaults to "torchcodec" when available in the platform; otherwise, defaults to "pyav".. + backend (str, optional): Backend to use for decoding. Defaults to "torchcodec" when available in the platform; otherwise, defaults to "pyav". + return_uint8 (bool): If True, return raw uint8 frames without float32 normalization. + This reduces memory for DataLoader IPC; normalization can be done on GPU afterward. Returns: - torch.Tensor: Decoded frames. + torch.Tensor: Decoded frames (float32 in [0,1] by default, or uint8 if return_uint8=True). Currently supports torchcodec on cpu and pyav. """ if backend is None: backend = get_safe_default_codec() if backend == "torchcodec": - return decode_video_frames_torchcodec(video_path, timestamps, tolerance_s) + return decode_video_frames_torchcodec(video_path, timestamps, tolerance_s, return_uint8=return_uint8) elif backend in ["pyav", "video_reader"]: - return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend) + return decode_video_frames_torchvision( + video_path, timestamps, tolerance_s, backend, return_uint8=return_uint8 + ) else: raise ValueError(f"Unsupported video backend: {backend}") @@ -154,6 +159,7 @@ def decode_video_frames_torchvision( tolerance_s: float, backend: str = "pyav", log_loaded_timestamps: bool = False, + return_uint8: bool = False, ) -> torch.Tensor: """Loads frames associated to the requested timestamps of a video @@ -240,14 +246,17 @@ def decode_video_frames_torchvision( if log_loaded_timestamps: logger.info(f"{closest_ts=}") - # convert to the pytorch format which is float32 in [0,1] range (and channel first) - closest_frames = closest_frames.type(torch.float32) / 255 - if len(timestamps) != len(closest_frames): raise FrameTimestampError( f"Number of retrieved frames ({len(closest_frames)}) does not match " f"number of queried timestamps ({len(timestamps)})" ) + + if return_uint8: + return closest_frames + + # convert to the pytorch format which is float32 in [0,1] range (and channel first) + closest_frames = closest_frames.type(torch.float32) / 255 return closest_frames @@ -306,6 +315,7 @@ def decode_video_frames_torchcodec( tolerance_s: float, log_loaded_timestamps: bool = False, decoder_cache: VideoDecoderCache | None = None, + return_uint8: bool = False, ) -> torch.Tensor: """Loads frames associated with the requested timestamps of a video using torchcodec. @@ -373,14 +383,16 @@ def decode_video_frames_torchcodec( if log_loaded_timestamps: logger.info(f"{closest_ts=}") - # convert to float32 in [0,1] range - closest_frames = (closest_frames / 255.0).type(torch.float32) - if not len(timestamps) == len(closest_frames): raise FrameTimestampError( f"Retrieved timestamps differ from queried {set(closest_frames) - set(timestamps)}" ) + if return_uint8: + return closest_frames + + # convert to float32 in [0,1] range + closest_frames = (closest_frames / 255.0).type(torch.float32) return closest_frames diff --git a/src/lerobot/envs/configs.py b/src/lerobot/envs/configs.py index 2a7c52d45..84c40472f 100644 --- a/src/lerobot/envs/configs.py +++ b/src/lerobot/envs/configs.py @@ -331,6 +331,7 @@ class LiberoEnv(EnvConfig): camera_name_mapping: dict[str, str] | None = None observation_height: int = 360 observation_width: int = 360 + is_libero_plus: bool = False features: dict[str, PolicyFeature] = field( default_factory=lambda: { ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(7,)), @@ -432,6 +433,7 @@ class LiberoEnv(EnvConfig): control_mode=self.control_mode, episode_length=self.episode_length, camera_name_mapping=self.camera_name_mapping, + is_libero_plus=self.is_libero_plus, ) def get_env_processors(self): @@ -496,6 +498,146 @@ class MetaworldEnv(EnvConfig): ) +@EnvConfig.register_subclass("robocasa") +@dataclass +class RoboCasaEnv(EnvConfig): + task: str = "CloseFridge" + fps: int = 20 + episode_length: int = 1000 + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + camera_name: str = "robot0_agentview_left,robot0_eye_in_hand,robot0_agentview_right" + observation_height: int = 256 + observation_width: int = 256 + visualization_height: int = 512 + visualization_width: int = 512 + split: str | None = None + # Object-mesh registries to sample from. Upstream default is + # ("objaverse", "lightwheel"), but objaverse is ~30GB and the CI image + # only ships the lightwheel pack. Override to include objaverse once + # you've run `python -m robocasa.scripts.download_kitchen_assets + # --type objaverse` locally. + obj_registries: list[str] = field(default_factory=lambda: ["lightwheel"]) + features: dict[str, PolicyFeature] = field( + default_factory=lambda: {ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(12,))} + ) + features_map: dict[str, str] = field(default_factory=lambda: {ACTION: ACTION, "agent_pos": OBS_STATE}) + + def __post_init__(self): + if self.obs_type not in ("pixels", "pixels_agent_pos"): + raise ValueError(f"Unsupported obs_type: {self.obs_type}") + + # Preserve raw RoboCasa camera names end-to-end (e.g. + # `observation.images.robot0_agentview_left`). This matches the + # naming convention used by the RoboCasa datasets on the Hub, so + # trained policies don't need a `--rename_map` at eval time. + cams = [c.strip() for c in self.camera_name.split(",") if c.strip()] + for cam in cams: + self.features[f"pixels/{cam}"] = PolicyFeature( + type=FeatureType.VISUAL, + shape=(self.observation_height, self.observation_width, 3), + ) + self.features_map[f"pixels/{cam}"] = f"{OBS_IMAGES}.{cam}" + + if self.obs_type == "pixels_agent_pos": + self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(16,)) + + @property + def gym_kwargs(self) -> dict: + kwargs: dict[str, Any] = { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + "observation_height": self.observation_height, + "observation_width": self.observation_width, + "visualization_height": self.visualization_height, + "visualization_width": self.visualization_width, + } + if self.split is not None: + kwargs["split"] = self.split + return kwargs + + def create_envs(self, n_envs: int, use_async_envs: bool = False): + from .robocasa import create_robocasa_envs + + if self.task is None: + raise ValueError("RoboCasaEnv requires a task to be specified") + env_cls = _make_vec_env_cls(use_async_envs, n_envs) + return create_robocasa_envs( + task=self.task, + n_envs=n_envs, + camera_name=self.camera_name, + gym_kwargs=self.gym_kwargs, + env_cls=env_cls, + episode_length=self.episode_length, + obj_registries=tuple(self.obj_registries), + ) + + +@EnvConfig.register_subclass("vlabench") +@dataclass +class VLABenchEnv(EnvConfig): + task: str = "select_fruit" + fps: int = 10 + episode_length: int = 500 + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + render_resolution: tuple[int, int] = (480, 480) + robot: str = "franka" + action_mode: str = "eef" + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(7,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + ACTION: ACTION, + "agent_pos": OBS_STATE, + "pixels/image": f"{OBS_IMAGES}.image", + "pixels/second_image": f"{OBS_IMAGES}.second_image", + "pixels/wrist_image": f"{OBS_IMAGES}.wrist_image", + } + ) + + def __post_init__(self): + h, w = self.render_resolution + if self.obs_type == "pixels": + self.features["pixels/image"] = PolicyFeature(type=FeatureType.VISUAL, shape=(h, w, 3)) + self.features["pixels/second_image"] = PolicyFeature(type=FeatureType.VISUAL, shape=(h, w, 3)) + self.features["pixels/wrist_image"] = PolicyFeature(type=FeatureType.VISUAL, shape=(h, w, 3)) + elif self.obs_type == "pixels_agent_pos": + self.features["pixels/image"] = PolicyFeature(type=FeatureType.VISUAL, shape=(h, w, 3)) + self.features["pixels/second_image"] = PolicyFeature(type=FeatureType.VISUAL, shape=(h, w, 3)) + self.features["pixels/wrist_image"] = PolicyFeature(type=FeatureType.VISUAL, shape=(h, w, 3)) + self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(7,)) + else: + raise ValueError(f"Unsupported obs_type: {self.obs_type}") + + @property + def gym_kwargs(self) -> dict: + return { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + "render_resolution": self.render_resolution, + "robot": self.robot, + "max_episode_steps": self.episode_length, + "action_mode": self.action_mode, + } + + def create_envs(self, n_envs: int, use_async_envs: bool = False): + from .vlabench import create_vlabench_envs + + if self.task is None: + raise ValueError("VLABenchEnv requires a task to be specified") + env_cls = _make_vec_env_cls(use_async_envs, n_envs) + return create_vlabench_envs( + task=self.task, + n_envs=n_envs, + gym_kwargs=self.gym_kwargs, + env_cls=env_cls, + ) + + @EnvConfig.register_subclass("isaaclab_arena") @dataclass class IsaaclabArenaEnv(HubEnvConfig): @@ -574,3 +716,171 @@ class IsaaclabArenaEnv(HubEnvConfig): ), PolicyProcessorPipeline(steps=[]), ) + + +@EnvConfig.register_subclass("libero_plus") +@dataclass +class LiberoPlusEnv(LiberoEnv): + """Config for LIBERO-plus robustness benchmark evaluation. + + LIBERO-plus extends LIBERO with 7 perturbation dimensions (camera viewpoints, + object layouts, robot initial states, language instructions, lighting, background + textures, sensor noise) producing ~10k task variants. + + The gym interface is identical to LIBERO so this class reuses ``LiberoEnv`` + entirely — only the registered name and default task suite differ. + + Install: see docker/Dockerfile.benchmark.libero_plus — LIBERO-plus ships + as a namespace package from a git fork and must be cloned + PYTHONPATH'd + rather than installed as a pyproject extra. + + See Also: + https://github.com/sylvestf/LIBERO-plus + """ + + task: str = "libero_spatial" + is_libero_plus: bool = True + + +@EnvConfig.register_subclass("robotwin") +@dataclass +class RoboTwinEnvConfig(EnvConfig): + """Configuration for RoboTwin 2.0 benchmark environments. + + RoboTwin 2.0 is a dual-arm manipulation benchmark with 50 tasks built on the + SAPIEN simulator. The robot is an Aloha-AgileX bimanual platform with 14 DOF + (7 per arm). All three cameras are enabled by default. + + See: https://robotwin-platform.github.io + Dataset: https://huggingface.co/datasets/lerobot/robotwin_unified + """ + + task: str = "beat_block_hammer" # single task or comma-separated list + fps: int = 25 + episode_length: int = 300 + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + # Available cameras from RoboTwin's aloha-agilex embodiment: head_camera + # (torso-mounted) + left_camera / right_camera (wrists). + camera_names: str = "head_camera,left_camera,right_camera" + # Match the D435 dims in task_config/demo_clean.yml (_camera_config.yml). + # Gym's vector-env concatenate pre-allocates buffers of this shape, so it + # must equal what SAPIEN actually renders. + observation_height: int = 240 + observation_width: int = 320 + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(14,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + ACTION: ACTION, + "pixels/head_camera": f"{OBS_IMAGES}.head_camera", + "pixels/left_camera": f"{OBS_IMAGES}.left_camera", + "pixels/right_camera": f"{OBS_IMAGES}.right_camera", + "agent_pos": OBS_STATE, + } + ) + + def __post_init__(self): + cam_list = [c.strip() for c in self.camera_names.split(",") if c.strip()] + for cam in cam_list: + self.features[f"pixels/{cam}"] = PolicyFeature( + type=FeatureType.VISUAL, + shape=(self.observation_height, self.observation_width, 3), + ) + # Keep features_map entry if already set (default_factory); add if missing. + key = f"pixels/{cam}" + if key not in self.features_map: + self.features_map[key] = f"{OBS_IMAGES}.{cam}" + + if self.obs_type == "pixels_agent_pos": + self.features["agent_pos"] = PolicyFeature( + type=FeatureType.STATE, + shape=(14,), # 14 DOF: 7 per arm + ) + elif self.obs_type != "pixels": + raise ValueError( + f"Unsupported obs_type '{self.obs_type}'. " + "RoboTwinEnvConfig supports 'pixels' and 'pixels_agent_pos'." + ) + + @property + def gym_kwargs(self) -> dict: + return {} + + def create_envs(self, n_envs: int, use_async_envs: bool = True): + from lerobot.envs.robotwin import create_robotwin_envs + + if not self.task: + raise ValueError("RoboTwinEnvConfig requires `task` to be specified.") + + env_cls = _make_vec_env_cls(use_async_envs, n_envs) + cam_list = [c.strip() for c in self.camera_names.split(",") if c.strip()] + return create_robotwin_envs( + task=self.task, + n_envs=n_envs, + env_cls=env_cls, + camera_names=cam_list, + observation_height=self.observation_height, + observation_width=self.observation_width, + episode_length=self.episode_length, + ) + + +@EnvConfig.register_subclass("robomme") +@dataclass +class RoboMMEEnv(EnvConfig): + """RoboMME memory-augmented manipulation benchmark (ManiSkill/SAPIEN). + + 16 tasks across 4 suites: Counting, Permanence, Reference, Imitation. + Dataset: lerobot/robomme (LeRobot v3.0, 1,600 episodes). + Benchmark: https://github.com/RoboMME/robomme_benchmark + + Requires the `robomme` git package installed separately (Linux only); + see docker/Dockerfile.benchmark.robomme for the canonical install. + """ + + task: str = "PickXtimes" + fps: int = 10 + episode_length: int = 300 + action_space: str = "joint_angle" # or "ee_pose" (7-D) + dataset_split: str = "test" # "train" | "val" | "test" + task_ids: list[int] | None = None + features: dict[str, PolicyFeature] = field(default_factory=dict) + features_map: dict[str, str] = field( + default_factory=lambda: { + ACTION: ACTION, + "pixels/image": f"{OBS_IMAGES}.image", + "pixels/wrist_image": f"{OBS_IMAGES}.wrist_image", + "agent_pos": OBS_STATE, + } + ) + + def __post_init__(self): + action_dim = 8 if self.action_space == "joint_angle" else 7 + self.features = { + ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(action_dim,)), + "pixels/image": PolicyFeature(type=FeatureType.VISUAL, shape=(256, 256, 3)), + "pixels/wrist_image": PolicyFeature(type=FeatureType.VISUAL, shape=(256, 256, 3)), + "agent_pos": PolicyFeature(type=FeatureType.STATE, shape=(8,)), + } + + @property + def gym_kwargs(self) -> dict: + return {} + + def create_envs(self, n_envs: int, use_async_envs: bool = True): + from lerobot.envs.robomme import create_robomme_envs + + env_cls = _make_vec_env_cls(use_async_envs, n_envs) + return create_robomme_envs( + task=self.task, + n_envs=n_envs, + action_space_type=self.action_space, + dataset=self.dataset_split, + episode_length=self.episode_length, + task_ids=self.task_ids, + env_cls=env_cls, + ) diff --git a/src/lerobot/envs/libero.py b/src/lerobot/envs/libero.py index ec90d0ffd..12be9e196 100644 --- a/src/lerobot/envs/libero.py +++ b/src/lerobot/envs/libero.py @@ -16,6 +16,7 @@ from __future__ import annotations import os +import re from collections import defaultdict from collections.abc import Callable, Iterable, Mapping, Sequence from functools import partial @@ -31,20 +32,7 @@ from libero.libero.envs import OffScreenRenderEnv from lerobot.types import RobotObservation -from .utils import _LazyAsyncVectorEnv - - -def _parse_camera_names(camera_name: str | Sequence[str]) -> list[str]: - """Normalize camera_name into a non-empty list of strings.""" - if isinstance(camera_name, str): - cams = [c.strip() for c in camera_name.split(",") if c.strip()] - elif isinstance(camera_name, (list | tuple)): - cams = [str(c).strip() for c in camera_name if str(c).strip()] - else: - raise TypeError(f"camera_name must be str or sequence[str], got {type(camera_name).__name__}") - if not cams: - raise ValueError("camera_name resolved to an empty list.") - return cams +from .utils import _LazyAsyncVectorEnv, parse_camera_names def _get_suite(name: str) -> benchmark.Benchmark: @@ -69,14 +57,34 @@ def _select_task_ids(total_tasks: int, task_ids: Iterable[int] | None) -> list[i return ids -def get_task_init_states(task_suite: Any, i: int) -> np.ndarray: - init_states_path = ( - Path(get_libero_path("init_states")) - / task_suite.tasks[i].problem_folder - / task_suite.tasks[i].init_states_file - ) - init_states = torch.load(init_states_path, weights_only=False) # nosec B614 - return init_states +# LIBERO-plus perturbation variants encode the perturbation in the filename +# but on disk only the base `.pruned_init` exists — strip the suffix to match +# LIBERO-plus's own suite.get_task_init_states() (we reimplement it here so we +# can pass weights_only=False for PyTorch 2.6+ numpy pickles). +_LIBERO_PERTURBATION_SUFFIX_RE = re.compile(r"_(?:language|view|light)_[^.]*|_(?:table|tb)_\d+") + + +def get_task_init_states(task_suite: Any, i: int, is_libero_plus: bool = False) -> np.ndarray: + task = task_suite.tasks[i] + filename = Path(task.init_states_file) + root = Path(get_libero_path("init_states")) + + if not is_libero_plus: + init_states_path = root / task.problem_folder / filename.name + return torch.load(init_states_path, weights_only=False) # nosec B614 + + # LIBERO-plus: `_add_` / `_level` variants store extra-object layouts under + # libero_newobj/ as a flat array that must be reshaped to (1, -1). + if "_add_" in filename.name or "_level" in filename.name: + init_states_path = root / "libero_newobj" / task.problem_folder / filename.name + init_states = torch.load(init_states_path, weights_only=False) # nosec B614 + return init_states.reshape(1, -1) + + # LIBERO-plus perturbation variants encode the perturbation in the filename + # but on disk only the base `.pruned_init` exists — strip the suffix to match. + stripped = _LIBERO_PERTURBATION_SUFFIX_RE.sub("", filename.stem) + filename.suffix + init_states_path = root / task.problem_folder / stripped + return torch.load(init_states_path, weights_only=False) # nosec B614 def get_libero_dummy_action(): @@ -118,9 +126,11 @@ class LiberoEnv(gym.Env): camera_name_mapping: dict[str, str] | None = None, num_steps_wait: int = 10, control_mode: str = "relative", + is_libero_plus: bool = False, ): super().__init__() self.task_id = task_id + self.is_libero_plus = is_libero_plus self.obs_type = obs_type self.render_mode = render_mode self.observation_width = observation_width @@ -128,7 +138,7 @@ class LiberoEnv(gym.Env): self.visualization_width = visualization_width self.visualization_height = visualization_height self.init_states = init_states - self.camera_name = _parse_camera_names( + self.camera_name = parse_camera_names( camera_name ) # agentview_image (main) or robot0_eye_in_hand_image (wrist) @@ -147,7 +157,11 @@ class LiberoEnv(gym.Env): self.episode_index = episode_index self.episode_length = episode_length # Load once and keep - self._init_states = get_task_init_states(task_suite, self.task_id) if self.init_states else None + self._init_states = ( + get_task_init_states(task_suite, self.task_id, is_libero_plus=self.is_libero_plus) + if self.init_states + else None + ) self._reset_stride = n_envs # when performing a reset, append `_reset_stride` to `init_state_id`. self.init_state_id = self.episode_index # tie each sub-env to a fixed init state @@ -380,6 +394,7 @@ def _make_env_fns( gym_kwargs: Mapping[str, Any], control_mode: str, camera_name_mapping: dict[str, str] | None = None, + is_libero_plus: bool = False, ) -> list[Callable[[], LiberoEnv]]: """Build n_envs factory callables for a single (suite, task_id).""" @@ -396,6 +411,7 @@ def _make_env_fns( n_envs=n_envs, control_mode=control_mode, camera_name_mapping=camera_name_mapping, + is_libero_plus=is_libero_plus, **local_kwargs, ) @@ -418,6 +434,7 @@ def create_libero_envs( control_mode: str = "relative", episode_length: int | None = None, camera_name_mapping: dict[str, str] | None = None, + is_libero_plus: bool = False, ) -> dict[str, dict[int, Any]]: """ Create vectorized LIBERO environments with a consistent return shape. @@ -437,7 +454,7 @@ def create_libero_envs( gym_kwargs = dict(gym_kwargs or {}) task_ids_filter = gym_kwargs.pop("task_ids", None) # optional: limit to specific tasks - camera_names = _parse_camera_names(camera_name) + camera_names = parse_camera_names(camera_name) suite_names = [s.strip() for s in str(task).split(",") if s.strip()] if not suite_names: raise ValueError("`task` must contain at least one LIBERO suite name.") @@ -462,6 +479,7 @@ def create_libero_envs( # Probe once and reuse to avoid creating a temp env per task. cached_obs_space: spaces.Space | None = None cached_act_space: spaces.Space | None = None + cached_metadata: dict[str, Any] | None = None for tid in selected: fns = _make_env_fns( @@ -475,12 +493,14 @@ def create_libero_envs( gym_kwargs=gym_kwargs, control_mode=control_mode, camera_name_mapping=camera_name_mapping, + is_libero_plus=is_libero_plus, ) if is_async: - lazy = _LazyAsyncVectorEnv(fns, cached_obs_space, cached_act_space) + lazy = _LazyAsyncVectorEnv(fns, cached_obs_space, cached_act_space, cached_metadata) if cached_obs_space is None: cached_obs_space = lazy.observation_space cached_act_space = lazy.action_space + cached_metadata = lazy.metadata out[suite_name][tid] = lazy else: out[suite_name][tid] = env_cls(fns) diff --git a/src/lerobot/envs/metaworld.py b/src/lerobot/envs/metaworld.py index 1dc513a68..bffcf6b6e 100644 --- a/src/lerobot/envs/metaworld.py +++ b/src/lerobot/envs/metaworld.py @@ -311,6 +311,7 @@ def create_metaworld_envs( is_async = env_cls is gym.vector.AsyncVectorEnv cached_obs_space = None cached_act_space = None + cached_metadata = None out: dict[str, dict[int, Any]] = defaultdict(dict) for group in task_groups: @@ -324,10 +325,11 @@ def create_metaworld_envs( fns = [(lambda tn=task_name: MetaworldEnv(task=tn, **gym_kwargs)) for _ in range(n_envs)] if is_async: - lazy = _LazyAsyncVectorEnv(fns, cached_obs_space, cached_act_space) + lazy = _LazyAsyncVectorEnv(fns, cached_obs_space, cached_act_space, cached_metadata) if cached_obs_space is None: cached_obs_space = lazy.observation_space cached_act_space = lazy.action_space + cached_metadata = lazy.metadata out[group][tid] = lazy else: out[group][tid] = env_cls(fns) diff --git a/src/lerobot/envs/robocasa.py b/src/lerobot/envs/robocasa.py new file mode 100644 index 000000000..a84a7c766 --- /dev/null +++ b/src/lerobot/envs/robocasa.py @@ -0,0 +1,425 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import logging +from collections import defaultdict +from collections.abc import Callable, Sequence +from functools import partial +from typing import Any + +import gymnasium as gym +import numpy as np +from gymnasium import spaces + +from lerobot.types import RobotObservation + +from .utils import _LazyAsyncVectorEnv, parse_camera_names + +logger = logging.getLogger(__name__) + +# Dimensions for the flat action/state vectors used by the LeRobot wrapper. +# These correspond to the PandaOmron robot in RoboCasa365. +OBS_STATE_DIM = 16 # base_pos(3) + base_quat(4) + ee_pos_rel(3) + ee_quat_rel(4) + gripper_qpos(2) +ACTION_DIM = 12 # base_motion(4) + control_mode(1) + ee_pos(3) + ee_rot(3) + gripper(1) +ACTION_LOW = -1.0 +ACTION_HIGH = 1.0 + +# Default PandaOmron cameras. We surface these raw names directly as +# `observation.images.` so the LeRobot dataset/policy keys match +# RoboCasa's native convention (no implicit renaming). +DEFAULT_CAMERAS = [ + "robot0_agentview_left", + "robot0_eye_in_hand", + "robot0_agentview_right", +] + +# Object-mesh registries to sample from. RoboCasa's upstream default is +# ("objaverse", "lightwheel"), but the objaverse pack is huge (~30GB) and +# most users — including our CI image — only download the lightwheel pack +# (`--type objs_lw` in `download_kitchen_assets`). When a sampled object +# category has zero candidates in every registry, robocasa crashes with +# `ValueError: Probabilities contain NaN` (0/0 divide in the probability +# normalization). Restricting to registries that are actually on disk +# avoids the NaN and matches what the asset download provides. +DEFAULT_OBJ_REGISTRIES: tuple[str, ...] = ("lightwheel",) + +# Task-group shortcuts accepted as `--env.task`. When the user passes one of +# these names, we expand it to the upstream RoboCasa task list and auto-set +# the dataset split. Individual task names (optionally comma-separated) still +# take precedence; this only triggers on an exact group-name match. +_TASK_GROUP_SPLITS = { + "atomic_seen": "target", + "composite_seen": "target", + "composite_unseen": "target", + "pretrain50": "pretrain", + "pretrain100": "pretrain", + "pretrain200": "pretrain", + "pretrain300": "pretrain", +} + + +def _resolve_tasks(task: str) -> tuple[list[str], str | None]: + """Resolve a `--env.task` value to (task_names, split_override). + + If `task` is a known task-group name (e.g. `atomic_seen`, `pretrain100`), + expand it via `robocasa.utils.dataset_registry.{TARGET,PRETRAINING}_TASKS` + and return the matching split. Otherwise treat `task` as a single task or + comma-separated list and leave the split untouched (None). + """ + key = task.strip() + if key in _TASK_GROUP_SPLITS: + from robocasa.utils.dataset_registry import PRETRAINING_TASKS, TARGET_TASKS + + combined = {**TARGET_TASKS, **PRETRAINING_TASKS} + if key not in combined: + raise ValueError( + f"Task group '{key}' is not available in this version of robocasa. " + f"Known groups: {sorted(combined.keys())}." + ) + return list(combined[key]), _TASK_GROUP_SPLITS[key] + + names = [t.strip() for t in task.split(",") if t.strip()] + if not names: + raise ValueError("`task` must contain at least one RoboCasa task name.") + return names, None + + +def convert_action(flat_action: np.ndarray) -> dict[str, Any]: + """Split a flat (12,) action vector into a RoboCasa action dict. + + Layout: base_motion(4) + control_mode(1) + ee_pos(3) + ee_rot(3) + gripper(1) + """ + return { + "action.base_motion": flat_action[0:4], + "action.control_mode": flat_action[4:5], + "action.end_effector_position": flat_action[5:8], + "action.end_effector_rotation": flat_action[8:11], + "action.gripper_close": flat_action[11:12], + } + + +class RoboCasaEnv(gym.Env): + """LeRobot gym.Env wrapper for RoboCasa365 kitchen environments. + + Wraps RoboCasaGymEnv from the robocasa package and converts its + dict-based observations and actions into the flat arrays LeRobot expects. + Raw RoboCasa camera names are preserved verbatim under `pixels/`. + """ + + metadata = {"render_modes": ["rgb_array"], "render_fps": 20} + + def __init__( + self, + task: str, + camera_name: str | Sequence[str] = ",".join(DEFAULT_CAMERAS), + obs_type: str = "pixels_agent_pos", + render_mode: str = "rgb_array", + observation_width: int = 256, + observation_height: int = 256, + visualization_width: int = 512, + visualization_height: int = 512, + split: str | None = None, + episode_length: int | None = None, + obj_registries: Sequence[str] = DEFAULT_OBJ_REGISTRIES, + episode_index: int = 0, + ): + super().__init__() + self.task = task + self.obs_type = obs_type + self.render_mode = render_mode + self.observation_width = observation_width + self.observation_height = observation_height + self.visualization_width = visualization_width + self.visualization_height = visualization_height + self.split = split + self.obj_registries = tuple(obj_registries) + # Per-worker index (0..n_envs-1) used to spread the user-provided + # seed across factories so each sub-env explores a distinct layout + # even when the same seed is passed to `reset()`. + self.episode_index = int(episode_index) + + self.camera_name = parse_camera_names(camera_name) + + self._max_episode_steps = episode_length if episode_length is not None else 1000 + + # Deferred — created on first reset() inside the worker subprocess + # to avoid inheriting stale GPU/EGL contexts across fork(). + self._env: Any = None + self.task_description = "" + + images = { + cam: spaces.Box( + low=0, + high=255, + shape=(self.observation_height, self.observation_width, 3), + dtype=np.uint8, + ) + for cam in self.camera_name + } + + if self.obs_type == "pixels": + self.observation_space = spaces.Dict({"pixels": spaces.Dict(images)}) + elif self.obs_type == "pixels_agent_pos": + self.observation_space = spaces.Dict( + { + "pixels": spaces.Dict(images), + "agent_pos": spaces.Box( + low=-np.inf, + high=np.inf, + shape=(OBS_STATE_DIM,), + dtype=np.float32, + ), + } + ) + else: + raise ValueError(f"Unsupported obs_type '{self.obs_type}'. Use 'pixels' or 'pixels_agent_pos'.") + + self.action_space = spaces.Box( + low=ACTION_LOW, + high=ACTION_HIGH, + shape=(ACTION_DIM,), + dtype=np.float32, + ) + + def _ensure_env(self) -> None: + """Create the underlying RoboCasaGymEnv on first use. + + Called inside the worker subprocess after fork(), so each worker gets + its own clean rendering context rather than inheriting a stale one from + the parent process (which causes crashes with AsyncVectorEnv). + """ + if self._env is not None: + return + from robocasa.wrappers.gym_wrapper import RoboCasaGymEnv + + # RoboCasaGymEnv defaults split="test", which create_env rejects + # (only None/"all"/"pretrain"/"target" are valid). Always pass a + # valid value so we don't hit that default. Extra kwargs are + # forwarded to the underlying kitchen env via create_env/robosuite.make. + self._env = RoboCasaGymEnv( + env_name=self.task, + camera_widths=self.observation_width, + camera_heights=self.observation_height, + split=self.split if self.split is not None else "all", + obj_registries=self.obj_registries, + ) + + ep_meta = self._env.env.get_ep_meta() + self.task_description = ep_meta.get("lang", self.task) + + def _format_raw_obs(self, raw_obs: dict) -> RobotObservation: + """Convert RoboCasaGymEnv observation dict to LeRobot format.""" + # RoboCasaGymEnv emits camera frames under "video.". + images = {cam: raw_obs[f"video.{cam}"] for cam in self.camera_name if f"video.{cam}" in raw_obs} + + if self.obs_type == "pixels": + return {"pixels": images} + + # `state.*` keys come from PandaOmronKeyConverter inside the wrapper. + agent_pos = np.concatenate( + [ + raw_obs.get("state.base_position", np.zeros(3)), + raw_obs.get("state.base_rotation", np.zeros(4)), + raw_obs.get("state.end_effector_position_relative", np.zeros(3)), + raw_obs.get("state.end_effector_rotation_relative", np.zeros(4)), + raw_obs.get("state.gripper_qpos", np.zeros(2)), + ], + axis=-1, + ).astype(np.float32) + + return {"pixels": images, "agent_pos": agent_pos} + + def render(self) -> np.ndarray: + self._ensure_env() + assert self._env is not None + return self._env.render() + + def reset(self, seed=None, **kwargs): + self._ensure_env() + assert self._env is not None + super().reset(seed=seed) + # Spread the seed across workers so n_envs factories don't all + # roll the same scene. With an explicit user seed we shift it by + # episode_index; with no seed we fall back to episode_index so + # each worker is still distinct rather than inheriting the same + # global RNG state. + worker_seed = seed + self.episode_index if seed is not None else self.episode_index + raw_obs, info = self._env.reset(seed=worker_seed) + + ep_meta = self._env.env.get_ep_meta() + self.task_description = ep_meta.get("lang", self.task) + + observation = self._format_raw_obs(raw_obs) + info = {"is_success": False} + return observation, info + + def step(self, action: np.ndarray) -> tuple[RobotObservation, float, bool, bool, dict[str, Any]]: + self._ensure_env() + assert self._env is not None + if action.ndim != 1: + raise ValueError( + f"Expected action to be 1-D (shape (action_dim,)), " + f"but got shape {action.shape} with ndim={action.ndim}" + ) + + action_dict = convert_action(action) + raw_obs, reward, done, truncated, info = self._env.step(action_dict) + + is_success = bool(info.get("success", False)) + terminated = done or is_success + info.update({"task": self.task, "done": done, "is_success": is_success}) + + observation = self._format_raw_obs(raw_obs) + if terminated: + info["final_info"] = { + "task": self.task, + "done": bool(done), + "is_success": bool(is_success), + } + self.reset() + + return observation, reward, terminated, truncated, info + + def close(self): + if self._env is not None: + self._env.close() + + +def _make_env_fns( + *, + task: str, + n_envs: int, + camera_names: list[str], + obs_type: str, + render_mode: str, + observation_width: int, + observation_height: int, + visualization_width: int, + visualization_height: int, + split: str | None, + episode_length: int | None, + obj_registries: Sequence[str], +) -> list[Callable[[], RoboCasaEnv]]: + """Build n_envs factory callables for a single task. + + Each factory carries a distinct ``episode_index`` (``0..n_envs-1``) so + ``RoboCasaEnv.reset()`` can derive a per-worker seed series from the + user-provided seed. + """ + + def _make_env(episode_index: int) -> RoboCasaEnv: + return RoboCasaEnv( + task=task, + camera_name=camera_names, + obs_type=obs_type, + render_mode=render_mode, + observation_width=observation_width, + observation_height=observation_height, + visualization_width=visualization_width, + visualization_height=visualization_height, + split=split, + episode_length=episode_length, + obj_registries=obj_registries, + episode_index=episode_index, + ) + + return [partial(_make_env, i) for i in range(n_envs)] + + +def create_robocasa_envs( + task: str, + n_envs: int, + gym_kwargs: dict[str, Any] | None = None, + camera_name: str | Sequence[str] = ",".join(DEFAULT_CAMERAS), + env_cls: Callable[[Sequence[Callable[[], Any]]], Any] | None = None, + episode_length: int | None = None, + obj_registries: Sequence[str] = DEFAULT_OBJ_REGISTRIES, +) -> dict[str, dict[int, Any]]: + """Create vectorized RoboCasa365 environments with a consistent return shape. + + Returns: + dict[task_name][task_id] -> vec_env (env_cls([...]) with exactly n_envs factories) + + `task` can be: + - a single task name (e.g. `CloseFridge`) + - a comma-separated list of task names (e.g. `CloseFridge,PickPlaceCoffee`) + - a benchmark-group shortcut (`atomic_seen`, `composite_seen`, + `composite_unseen`, `pretrain50`, `pretrain100`, `pretrain200`, + `pretrain300`), which auto-expands to the upstream task list and + auto-sets the dataset `split` ("target" or "pretrain"). + """ + if env_cls is None or not callable(env_cls): + raise ValueError("env_cls must be a callable that wraps a list of environment factory callables.") + if not isinstance(n_envs, int) or n_envs <= 0: + raise ValueError(f"n_envs must be a positive int; got {n_envs}.") + + gym_kwargs = dict(gym_kwargs or {}) + obs_type = gym_kwargs.pop("obs_type", "pixels_agent_pos") + render_mode = gym_kwargs.pop("render_mode", "rgb_array") + observation_width = gym_kwargs.pop("observation_width", 256) + observation_height = gym_kwargs.pop("observation_height", 256) + visualization_width = gym_kwargs.pop("visualization_width", 512) + visualization_height = gym_kwargs.pop("visualization_height", 512) + split = gym_kwargs.pop("split", None) + + camera_names = parse_camera_names(camera_name) + task_names, group_split = _resolve_tasks(str(task)) + if group_split is not None and split is None: + split = group_split + + logger.info( + "Creating RoboCasa envs | tasks=%s | split=%s | n_envs(per task)=%d", + task_names, + split, + n_envs, + ) + + is_async = env_cls is gym.vector.AsyncVectorEnv + + cached_obs_space: spaces.Space | None = None + cached_act_space: spaces.Space | None = None + cached_metadata: dict[str, Any] | None = None + out: dict[str, dict[int, Any]] = defaultdict(dict) + + for task_name in task_names: + fns = _make_env_fns( + task=task_name, + n_envs=n_envs, + camera_names=camera_names, + obs_type=obs_type, + render_mode=render_mode, + observation_width=observation_width, + observation_height=observation_height, + visualization_width=visualization_width, + visualization_height=visualization_height, + split=split, + episode_length=episode_length, + obj_registries=obj_registries, + ) + + if is_async: + lazy = _LazyAsyncVectorEnv(fns, cached_obs_space, cached_act_space, cached_metadata) + if cached_obs_space is None: + cached_obs_space = lazy.observation_space + cached_act_space = lazy.action_space + cached_metadata = lazy.metadata + out[task_name][0] = lazy + else: + out[task_name][0] = env_cls(fns) + logger.info("Built vec env | task=%s | n_envs=%d", task_name, n_envs) + + return {name: dict(task_map) for name, task_map in out.items()} diff --git a/src/lerobot/envs/robomme.py b/src/lerobot/envs/robomme.py new file mode 100644 index 000000000..69d665bd4 --- /dev/null +++ b/src/lerobot/envs/robomme.py @@ -0,0 +1,245 @@ +"""RoboMME environment wrapper for LeRobot evaluation. + +Wraps the RoboMME ``BenchmarkEnvBuilder`` into a Gymnasium-compatible +``VectorEnv`` suitable for ``lerobot_eval``. + +RoboMME tasks: + Counting: BinFill, PickXtimes, SwingXtimes, StopCube + Permanence: VideoUnmask, VideoUnmaskSwap, ButtonUnmask, ButtonUnmaskSwap + Reference: PickHighlight, VideoRepick, VideoPlaceButton, VideoPlaceOrder + Imitation: MoveCube, InsertPeg, PatternLock, RouteStick + +Dataset: lerobot/robomme (LeRobot v3.0, 1,600 episodes) +Install: see docker/Dockerfile.benchmark.robomme (Linux only — mani-skill vs numpy pin conflict) +Benchmark: https://github.com/RoboMME/robomme_benchmark +""" + +from __future__ import annotations + +from collections.abc import Callable, Sequence +from functools import partial +from typing import Any + +import gymnasium as gym +import numpy as np +from gymnasium import spaces + +from .utils import _LazyAsyncVectorEnv + +ROBOMME_TASKS = [ + "BinFill", + "PickXtimes", + "SwingXtimes", + "StopCube", + "VideoUnmask", + "VideoUnmaskSwap", + "ButtonUnmask", + "ButtonUnmaskSwap", + "PickHighlight", + "VideoRepick", + "VideoPlaceButton", + "VideoPlaceOrder", + "MoveCube", + "InsertPeg", + "PatternLock", + "RouteStick", +] + + +class RoboMMEGymEnv(gym.Env): + """Thin Gymnasium wrapper around a single RoboMME episode env.""" + + metadata = {"render_modes": ["rgb_array"], "render_fps": 10} + + def __init__( + self, + task: str = "PickXtimes", + action_space_type: str = "joint_angle", + dataset: str = "test", + episode_idx: int = 0, + max_steps: int = 300, + ): + super().__init__() + from robomme.env_record_wrapper import BenchmarkEnvBuilder + + self._task = task + self._action_space_type = action_space_type + self._dataset = dataset + self._episode_idx = episode_idx + self._max_steps = max_steps + self._max_episode_steps = max_steps + + self._builder = BenchmarkEnvBuilder( + env_id=task, + dataset=dataset, + action_space=action_space_type, + gui_render=False, + max_steps=max_steps, + ) + self._env = None + self._last_raw_obs: dict | None = None + + action_dim = 8 if action_space_type == "joint_angle" else 7 + self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(action_dim,), dtype=np.float32) + # `pixels` must be a nested Dict so `preprocess_observation()` in + # envs/utils.py picks it up and maps each camera to + # `observation.images.`. A flat layout (`pixels/image`, + # `pixels/wrist_image`) silently drops every image from the batch. + self.observation_space = spaces.Dict( + { + "pixels": spaces.Dict( + { + "image": spaces.Box(0, 255, shape=(256, 256, 3), dtype=np.uint8), + "wrist_image": spaces.Box(0, 255, shape=(256, 256, 3), dtype=np.uint8), + } + ), + "agent_pos": spaces.Box(-np.inf, np.inf, shape=(8,), dtype=np.float32), + } + ) + + def reset(self, *, seed=None, options=None): + super().reset(seed=seed) + self._env = self._builder.make_env_for_episode( + episode_idx=self._episode_idx, + max_steps=self._max_steps, + ) + obs, info = self._env.reset() + self._last_raw_obs = obs + return self._convert_obs(obs), self._convert_info(info) + + def step(self, action): + obs, reward, terminated, truncated, info = self._env.step(action) + self._last_raw_obs = obs + + terminated_bool = bool(terminated.item()) if hasattr(terminated, "item") else bool(terminated) + truncated_bool = bool(truncated.item()) if hasattr(truncated, "item") else bool(truncated) + + status = info.get("status", "ongoing") + is_success = status == "success" + conv_info = self._convert_info(info) + conv_info["is_success"] = is_success + + return self._convert_obs(obs), float(reward), terminated_bool, truncated_bool, conv_info + + def render(self) -> np.ndarray | None: + """Return the front camera image from the last observation for video recording.""" + if self._last_raw_obs is None: + return np.zeros((256, 256, 3), dtype=np.uint8) + front = self._last_raw_obs.get("front_rgb_list") + if front is None: + return np.zeros((256, 256, 3), dtype=np.uint8) + frame = front[-1] if isinstance(front, list) else front + return np.asarray(frame, dtype=np.uint8) + + def _convert_obs(self, obs: dict) -> dict: + front_rgb = ( + obs["front_rgb_list"][-1] if isinstance(obs["front_rgb_list"], list) else obs["front_rgb_list"] + ) + wrist_rgb = ( + obs["wrist_rgb_list"][-1] if isinstance(obs["wrist_rgb_list"], list) else obs["wrist_rgb_list"] + ) + joint_state = ( + obs["joint_state_list"][-1] + if isinstance(obs["joint_state_list"], list) + else obs["joint_state_list"] + ) + gripper_state = ( + obs["gripper_state_list"][-1] + if isinstance(obs["gripper_state_list"], list) + else obs["gripper_state_list"] + ) + + front_rgb = np.asarray(front_rgb, dtype=np.uint8) + wrist_rgb = np.asarray(wrist_rgb, dtype=np.uint8) + joint = np.asarray(joint_state, dtype=np.float32).flatten()[:7] + gripper = np.asarray(gripper_state, dtype=np.float32).flatten()[:1] + state = np.concatenate([joint, gripper]) + + return { + "pixels": {"image": front_rgb, "wrist_image": wrist_rgb}, + "agent_pos": state, + } + + def _convert_info(self, info: dict) -> dict: + return { + "status": info.get("status", "ongoing"), + "task_goal": info.get("task_goal", ""), + } + + +def _make_env_fns( + *, + task: str, + n_envs: int, + action_space_type: str, + dataset: str, + episode_length: int, + task_id: int, +) -> list[Callable[[], RoboMMEGymEnv]]: + """Build n_envs factory callables for one RoboMME task id.""" + + def _make_one(episode_index: int) -> RoboMMEGymEnv: + return RoboMMEGymEnv( + task=task, + action_space_type=action_space_type, + dataset=dataset, + episode_idx=episode_index, + max_steps=episode_length, + ) + + return [partial(_make_one, task_id + i) for i in range(n_envs)] + + +def create_robomme_envs( + task: str, + n_envs: int = 1, + action_space_type: str = "joint_angle", + dataset: str = "test", + episode_length: int = 300, + task_ids: list[int] | None = None, + env_cls: Callable[[Sequence[Callable[[], Any]]], Any] | None = None, +) -> dict[str, dict[int, gym.vector.VectorEnv]]: + """Create vectorized RoboMME environments for evaluation. + + `task` may be a single RoboMME task name (e.g. "PickXtimes") or a + comma-separated list (e.g. "PickXtimes,BinFill,StopCube"). Each task + becomes its own suite in the returned mapping. + + Returns {suite_name: {task_id: VectorEnv}} matching lerobot's expected format. + """ + if env_cls is None or not callable(env_cls): + raise ValueError("env_cls must be a callable that wraps a list of env factory callables.") + if not isinstance(n_envs, int) or n_envs <= 0: + raise ValueError(f"n_envs must be a positive int; got {n_envs}.") + + if task_ids is None: + task_ids = [0] + + task_names = [t.strip() for t in task.split(",") if t.strip()] + is_async = env_cls is gym.vector.AsyncVectorEnv + cached_obs_space: spaces.Space | None = None + cached_act_space: spaces.Space | None = None + cached_metadata: dict[str, Any] | None = None + out: dict[str, dict[int, gym.vector.VectorEnv]] = {} + for task_name in task_names: + envs_by_task: dict[int, gym.vector.VectorEnv] = {} + for task_id in task_ids: + fns = _make_env_fns( + task=task_name, + n_envs=n_envs, + action_space_type=action_space_type, + dataset=dataset, + episode_length=episode_length, + task_id=task_id, + ) + if is_async: + lazy = _LazyAsyncVectorEnv(fns, cached_obs_space, cached_act_space, cached_metadata) + if cached_obs_space is None: + cached_obs_space = lazy.observation_space + cached_act_space = lazy.action_space + cached_metadata = lazy.metadata + envs_by_task[task_id] = lazy + else: + envs_by_task[task_id] = env_cls(fns) + out[task_name] = envs_by_task + return out diff --git a/src/lerobot/envs/robotwin.py b/src/lerobot/envs/robotwin.py new file mode 100644 index 000000000..823f14fa0 --- /dev/null +++ b/src/lerobot/envs/robotwin.py @@ -0,0 +1,488 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import importlib +import logging +from collections import defaultdict +from collections.abc import Callable, Sequence +from functools import partial +from typing import Any + +import gymnasium as gym +import numpy as np +import torch +from gymnasium import spaces + +from lerobot.types import RobotObservation + +from .utils import _LazyAsyncVectorEnv + +logger = logging.getLogger(__name__) + +# Camera names as used by RoboTwin 2.0. The wrapper appends "_rgb" when looking +# up keys in get_obs() output (e.g. "head_camera" → "head_camera_rgb"). +ROBOTWIN_CAMERA_NAMES: tuple[str, ...] = ( + "head_camera", + "left_camera", + "right_camera", +) + +ACTION_DIM = 14 # 7 DOF × 2 arms +ACTION_LOW = -1.0 +ACTION_HIGH = 1.0 +DEFAULT_EPISODE_LENGTH = 300 +# D435 dims from task_config/_camera_config.yml (what demo_clean.yml selects). +DEFAULT_CAMERA_H = 240 +DEFAULT_CAMERA_W = 320 + +# Task list from RoboTwin 2.0's `envs/` directory — mirrors upstream exactly +# (50 tasks as of main; earlier revisions had 60 with a different split). +# Keep this in sync with: +# gh api /repos/RoboTwin-Platform/RoboTwin/contents/envs --paginate \ +# | jq -r '.[].name' | grep -E '\.py$' | grep -v '^_' | sed 's/\.py$//' +ROBOTWIN_TASKS: tuple[str, ...] = ( + "adjust_bottle", + "beat_block_hammer", + "blocks_ranking_rgb", + "blocks_ranking_size", + "click_alarmclock", + "click_bell", + "dump_bin_bigbin", + "grab_roller", + "handover_block", + "handover_mic", + "hanging_mug", + "lift_pot", + "move_can_pot", + "move_pillbottle_pad", + "move_playingcard_away", + "move_stapler_pad", + "open_laptop", + "open_microwave", + "pick_diverse_bottles", + "pick_dual_bottles", + "place_a2b_left", + "place_a2b_right", + "place_bread_basket", + "place_bread_skillet", + "place_burger_fries", + "place_can_basket", + "place_cans_plasticbox", + "place_container_plate", + "place_dual_shoes", + "place_empty_cup", + "place_fan", + "place_mouse_pad", + "place_object_basket", + "place_object_scale", + "place_object_stand", + "place_phone_stand", + "place_shoe", + "press_stapler", + "put_bottles_dustbin", + "put_object_cabinet", + "rotate_qrcode", + "scan_object", + "shake_bottle", + "shake_bottle_horizontally", + "stack_blocks_three", + "stack_blocks_two", + "stack_bowls_three", + "stack_bowls_two", + "stamp_seal", + "turn_switch", +) + + +_ROBOTWIN_SETUP_CACHE: dict[str, dict[str, Any]] = {} + + +def _load_robotwin_setup_kwargs(task_name: str) -> dict[str, Any]: + """Build the kwargs dict RoboTwin's setup_demo expects. + + Mirrors the config loading done by RoboTwin's ``script/eval_policy.py``: + reads ``task_config/demo_clean.yml``, resolves the embodiment file from + ``_embodiment_config.yml``, loads the robot's own ``config.yml``, and + reads camera dimensions from ``_camera_config.yml``. + + Uses ``aloha-agilex`` single-robot dual-arm by default (the only embodiment + used by beat_block_hammer and most smoke-test tasks). + """ + if task_name in _ROBOTWIN_SETUP_CACHE: + return dict(_ROBOTWIN_SETUP_CACHE[task_name]) + + import os + + import yaml # type: ignore[import-untyped] + from envs import CONFIGS_PATH # type: ignore[import-not-found] + + task_config = "demo_clean" + with open(os.path.join(CONFIGS_PATH, f"{task_config}.yml"), encoding="utf-8") as f: + args = yaml.safe_load(f) + + # Resolve embodiment — demo_clean.yml uses [aloha-agilex] (dual-arm single robot) + with open(os.path.join(CONFIGS_PATH, "_embodiment_config.yml"), encoding="utf-8") as f: + embodiment_types = yaml.safe_load(f) + embodiment = args.get("embodiment", ["aloha-agilex"]) + if len(embodiment) == 1: + robot_file = embodiment_types[embodiment[0]]["file_path"] + args["left_robot_file"] = robot_file + args["right_robot_file"] = robot_file + args["dual_arm_embodied"] = True + elif len(embodiment) == 3: + args["left_robot_file"] = embodiment_types[embodiment[0]]["file_path"] + args["right_robot_file"] = embodiment_types[embodiment[1]]["file_path"] + args["embodiment_dis"] = embodiment[2] + args["dual_arm_embodied"] = False + else: + raise ValueError(f"embodiment must have 1 or 3 items, got {len(embodiment)}") + + with open(os.path.join(args["left_robot_file"], "config.yml"), encoding="utf-8") as f: + args["left_embodiment_config"] = yaml.safe_load(f) + with open(os.path.join(args["right_robot_file"], "config.yml"), encoding="utf-8") as f: + args["right_embodiment_config"] = yaml.safe_load(f) + + # Camera dimensions + with open(os.path.join(CONFIGS_PATH, "_camera_config.yml"), encoding="utf-8") as f: + camera_config = yaml.safe_load(f) + head_cam = args["camera"]["head_camera_type"] + args["head_camera_h"] = camera_config[head_cam]["h"] + args["head_camera_w"] = camera_config[head_cam]["w"] + + # Headless overrides + args["render_freq"] = 0 + args["task_name"] = task_name + args["task_config"] = task_config + + _ROBOTWIN_SETUP_CACHE[task_name] = args + return dict(args) + + +def _load_robotwin_task(task_name: str) -> type: + """Dynamically import and return a RoboTwin 2.0 task class. + + RoboTwin tasks live in ``envs/.py`` relative to the repository + root and are expected to be on ``sys.path`` after installation. + """ + try: + module = importlib.import_module(f"envs.{task_name}") + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Could not import RoboTwin task '{task_name}'. " + "Ensure RoboTwin 2.0 is installed and its 'envs/' directory is on PYTHONPATH. " + "See the RoboTwin installation guide: https://robotwin-platform.github.io/doc/usage/robotwin-install.html" + ) from e + task_cls = getattr(module, task_name, None) + if task_cls is None: + raise AttributeError(f"Task class '{task_name}' not found in envs/{task_name}.py") + return task_cls + + +class RoboTwinEnv(gym.Env): + """Gymnasium wrapper around a single RoboTwin 2.0 task. + + RoboTwin uses a custom SAPIEN-based API (``setup_demo`` / ``get_obs`` / + ``take_action`` / ``check_success``) rather than the standard gym interface. + This class bridges that API to Gymnasium so that ``lerobot-eval`` can drive + RoboTwin exactly like LIBERO or Meta-World. + + The underlying SAPIEN environment is created lazily on the first ``reset()`` + call *inside the worker process*. This is required for + ``gym.vector.AsyncVectorEnv`` compatibility: SAPIEN allocates EGL/GPU + contexts that must not be forked from the parent process. + + Observations + ------------ + The ``pixels`` dict uses the raw RoboTwin camera names as keys (e.g. + ``"head_camera"``, ``"left_camera"``). ``preprocess_observation`` in + ``envs/utils.py`` then converts these to ``observation.images.``. + + Actions + ------- + 14-dim float32 array in ``[-1, 1]`` (joint-space, 7 DOF per arm). + + Autograd + -------- + ``setup_demo`` and ``take_action`` drive CuRobo's Newton trajectory + optimizer, which calls ``cost.backward()`` internally. lerobot_eval wraps + the rollout in ``torch.no_grad()``, so both call sites re-enable grad. + """ + + metadata = {"render_modes": ["rgb_array"], "render_fps": 25} + + def __init__( + self, + task_name: str, + episode_index: int = 0, + n_envs: int = 1, + camera_names: Sequence[str] = ROBOTWIN_CAMERA_NAMES, + observation_height: int | None = None, + observation_width: int | None = None, + episode_length: int = DEFAULT_EPISODE_LENGTH, + render_mode: str = "rgb_array", + ): + super().__init__() + self.task_name = task_name + self.task = task_name # used by add_envs_task() in utils.py + self.task_description = task_name.replace("_", " ") + self.episode_index = episode_index + self._reset_stride = n_envs + self.camera_names = list(camera_names) + # Default to D435 dims (the camera type baked into task_config/demo_clean.yml). + # The YAML-driven lookup is deferred to reset() so construction doesn't + # import RoboTwin's `envs` module — fast-tests run without RoboTwin installed. + self.observation_height = observation_height or DEFAULT_CAMERA_H + self.observation_width = observation_width or DEFAULT_CAMERA_W + self.episode_length = episode_length + self._max_episode_steps = episode_length # lerobot_eval.rollout reads this + self.render_mode = render_mode + + self._env: Any | None = None # deferred — created on first reset() inside worker + self._step_count: int = 0 + self._black_frame = np.zeros((self.observation_height, self.observation_width, 3), dtype=np.uint8) + + image_spaces = { + cam: spaces.Box( + low=0, + high=255, + shape=(self.observation_height, self.observation_width, 3), + dtype=np.uint8, + ) + for cam in self.camera_names + } + self.observation_space = spaces.Dict( + { + "pixels": spaces.Dict(image_spaces), + "agent_pos": spaces.Box(low=-np.inf, high=np.inf, shape=(ACTION_DIM,), dtype=np.float32), + } + ) + self.action_space = spaces.Box( + low=ACTION_LOW, high=ACTION_HIGH, shape=(ACTION_DIM,), dtype=np.float32 + ) + + def _ensure_env(self) -> None: + """Create the SAPIEN environment on first use. + + Called inside the worker subprocess after fork(), so each worker gets + its own EGL/GPU context rather than inheriting a stale one from the + parent process (which causes crashes with AsyncVectorEnv). + """ + if self._env is not None: + return + task_cls = _load_robotwin_task(self.task_name) + self._env = task_cls() + + def _get_obs(self) -> RobotObservation: + assert self._env is not None, "_get_obs called before _ensure_env()" + raw = self._env.get_obs() + cameras_raw = raw.get("observation", {}) + + images: dict[str, np.ndarray] = {} + for cam in self.camera_names: + cam_data = cameras_raw.get(cam) + img = cam_data.get("rgb") if cam_data else None + if img is None: + images[cam] = self._black_frame + continue + img = np.asarray(img, dtype=np.uint8) + if img.ndim == 2: + img = np.stack([img, img, img], axis=-1) + elif img.shape[-1] != 3: + img = img[..., :3] + images[cam] = img + + ja = raw.get("joint_action") or {} + vec = ja.get("vector") + if vec is not None: + arr = np.asarray(vec, dtype=np.float32).ravel() + joint_state = ( + arr[:ACTION_DIM] if arr.size >= ACTION_DIM else np.zeros(ACTION_DIM, dtype=np.float32) + ) + else: + joint_state = np.zeros(ACTION_DIM, dtype=np.float32) + + return {"pixels": images, "agent_pos": joint_state} + + def reset(self, seed: int | None = None, **kwargs) -> tuple[RobotObservation, dict]: + self._ensure_env() + super().reset(seed=seed) + assert self._env is not None # set by _ensure_env() above + + actual_seed = self.episode_index if seed is None else seed + setup_kwargs = _load_robotwin_setup_kwargs(self.task_name) + setup_kwargs.update(seed=actual_seed, is_test=True) + with torch.enable_grad(): + self._env.setup_demo(**setup_kwargs) + self.episode_index += self._reset_stride + self._step_count = 0 + + obs = self._get_obs() + return obs, {"is_success": False, "task": self.task_name} + + def step(self, action: np.ndarray) -> tuple[RobotObservation, float, bool, bool, dict[str, Any]]: + assert self._env is not None, "step() called before reset()" + if action.ndim != 1 or action.shape[0] != ACTION_DIM: + raise ValueError(f"Expected 1-D action of shape ({ACTION_DIM},), got {action.shape}") + + with torch.enable_grad(): + if hasattr(self._env, "take_action"): + self._env.take_action(action) + else: + self._env.step(action) + + self._step_count += 1 + + is_success = bool(getattr(self._env, "eval_success", False)) + if not is_success and hasattr(self._env, "check_success"): + is_success = bool(self._env.check_success()) + + obs = self._get_obs() + reward = float(is_success) + terminated = is_success + truncated = self._step_count >= self.episode_length + + info: dict[str, Any] = { + "task": self.task_name, + "is_success": is_success, + "step": self._step_count, + } + if terminated or truncated: + info["final_info"] = { + "task": self.task_name, + "is_success": is_success, + } + self.reset() + + return obs, reward, terminated, truncated, info + + def render(self) -> np.ndarray: + self._ensure_env() + obs = self._get_obs() + # Prefer head camera for rendering; fall back to first available. + if "head_camera" in obs["pixels"]: + return obs["pixels"]["head_camera"] + return next(iter(obs["pixels"].values())) + + def close(self) -> None: + if self._env is not None: + if hasattr(self._env, "close_env"): + import contextlib + + with contextlib.suppress(TypeError): + self._env.close_env() + self._env = None + + +# ---- Multi-task factory -------------------------------------------------------- + + +def _make_env_fns( + *, + task_name: str, + n_envs: int, + camera_names: list[str], + observation_height: int, + observation_width: int, + episode_length: int, +) -> list[Callable[[], RoboTwinEnv]]: + """Return n_envs factory callables for a single task.""" + + def _make_one(episode_index: int) -> RoboTwinEnv: + return RoboTwinEnv( + task_name=task_name, + episode_index=episode_index, + n_envs=n_envs, + camera_names=camera_names, + observation_height=observation_height, + observation_width=observation_width, + episode_length=episode_length, + ) + + return [partial(_make_one, i) for i in range(n_envs)] + + +def create_robotwin_envs( + task: str, + n_envs: int, + env_cls: Callable[[Sequence[Callable[[], Any]]], Any] | None = None, + camera_names: Sequence[str] = ROBOTWIN_CAMERA_NAMES, + observation_height: int = DEFAULT_CAMERA_H, + observation_width: int = DEFAULT_CAMERA_W, + episode_length: int = DEFAULT_EPISODE_LENGTH, +) -> dict[str, dict[int, Any]]: + """Create vectorized RoboTwin 2.0 environments. + + Returns: + ``dict[task_name][0] -> VectorEnv`` — one entry per task, each wrapping + ``n_envs`` parallel rollouts. + + Args: + task: Comma-separated list of task names (e.g. ``"beat_block_hammer"`` + or ``"beat_block_hammer,click_bell"``). + n_envs: Number of parallel rollouts per task. + env_cls: Vector env constructor (e.g. ``gym.vector.AsyncVectorEnv``). + camera_names: Cameras to include in observations. + observation_height: Pixel height for all cameras. + observation_width: Pixel width for all cameras. + episode_length: Max steps before truncation. + """ + if env_cls is None or not callable(env_cls): + raise ValueError("env_cls must be callable (e.g. gym.vector.AsyncVectorEnv).") + if not isinstance(n_envs, int) or n_envs <= 0: + raise ValueError(f"n_envs must be a positive int; got {n_envs}.") + + task_names = [t.strip() for t in str(task).split(",") if t.strip()] + if not task_names: + raise ValueError("`task` must contain at least one RoboTwin task name.") + + unknown = [t for t in task_names if t not in ROBOTWIN_TASKS] + if unknown: + raise ValueError(f"Unknown RoboTwin tasks: {unknown}. Available tasks: {sorted(ROBOTWIN_TASKS)}") + + logger.info( + "Creating RoboTwin envs | tasks=%s | n_envs(per task)=%d", + task_names, + n_envs, + ) + + is_async = env_cls is gym.vector.AsyncVectorEnv + cached_obs_space: spaces.Space | None = None + cached_act_space: spaces.Space | None = None + cached_metadata: dict[str, Any] | None = None + + out: dict[str, dict[int, Any]] = defaultdict(dict) + for task_name in task_names: + fns = _make_env_fns( + task_name=task_name, + n_envs=n_envs, + camera_names=list(camera_names), + observation_height=observation_height, + observation_width=observation_width, + episode_length=episode_length, + ) + if is_async: + lazy = _LazyAsyncVectorEnv(fns, cached_obs_space, cached_act_space, cached_metadata) + if cached_obs_space is None: + cached_obs_space = lazy.observation_space + cached_act_space = lazy.action_space + cached_metadata = lazy.metadata + out[task_name][0] = lazy + else: + out[task_name][0] = env_cls(fns) + logger.info("Built vec env | task=%s | n_envs=%d", task_name, n_envs) + + return {k: dict(v) for k, v in out.items()} diff --git a/src/lerobot/envs/utils.py b/src/lerobot/envs/utils.py index b0d834a05..6e6f352e9 100644 --- a/src/lerobot/envs/utils.py +++ b/src/lerobot/envs/utils.py @@ -34,6 +34,25 @@ from lerobot.utils.utils import get_channel_first_image_shape from .configs import EnvConfig +def parse_camera_names(camera_name: str | Sequence[str]) -> list[str]: + """Normalize ``camera_name`` into a non-empty list of strings. + + Accepts a comma-separated string (``"cam_a,cam_b"``) or a sequence of + strings (tuples/lists). Whitespace is stripped; empty entries are + dropped. Raises ``TypeError`` for unsupported input types and + ``ValueError`` when the normalized list is empty. + """ + if isinstance(camera_name, str): + cams = [c.strip() for c in camera_name.split(",") if c.strip()] + elif isinstance(camera_name, (list | tuple)): + cams = [str(c).strip() for c in camera_name if str(c).strip()] + else: + raise TypeError(f"camera_name must be str or sequence[str], got {type(camera_name).__name__}") + if not cams: + raise ValueError("camera_name resolved to an empty list.") + return cams + + def _convert_nested_dict(d): result = {} for k, v in d.items(): @@ -153,17 +172,20 @@ class _LazyAsyncVectorEnv: env_fns: list[Callable], observation_space=None, action_space=None, + metadata=None, ): self._env_fns = env_fns self._env: gym.vector.AsyncVectorEnv | None = None self.num_envs = len(env_fns) - if observation_space is not None and action_space is not None: + if observation_space is not None and action_space is not None and metadata is not None: self.observation_space = observation_space self.action_space = action_space + self.metadata = metadata else: tmp = env_fns[0]() self.observation_space = tmp.observation_space self.action_space = tmp.action_space + self.metadata = tmp.metadata tmp.close() self.single_observation_space = self.observation_space self.single_action_space = self.action_space @@ -172,6 +194,10 @@ class _LazyAsyncVectorEnv: if self._env is None: self._env = gym.vector.AsyncVectorEnv(self._env_fns, context="forkserver", shared_memory=True) + @property + def unwrapped(self): + return self + def reset(self, **kwargs): self._ensure() return self._env.reset(**kwargs) diff --git a/src/lerobot/envs/vlabench.py b/src/lerobot/envs/vlabench.py new file mode 100644 index 000000000..922973a16 --- /dev/null +++ b/src/lerobot/envs/vlabench.py @@ -0,0 +1,589 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""VLABench environment wrapper for LeRobot. + +VLABench is a large-scale benchmark for language-conditioned robotic manipulation +with long-horizon reasoning, built on MuJoCo/dm_control. + +- Paper: https://arxiv.org/abs/2412.18194 +- GitHub: https://github.com/OpenMOSS/VLABench +- Website: https://vlabench.github.io +""" + +from __future__ import annotations + +import contextlib +import logging +from collections import defaultdict +from collections.abc import Callable, Sequence +from typing import Any + +import cv2 +import gymnasium as gym +import numpy as np +from gymnasium import spaces +from scipy.spatial.transform import Rotation + +from lerobot.types import RobotObservation + +from .utils import _LazyAsyncVectorEnv + +logger = logging.getLogger(__name__) + +ACTION_DIM = 7 # pos(3) + euler(3) + gripper(1) +ACTION_LOW = np.array([-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0], dtype=np.float32) +ACTION_HIGH = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=np.float32) + +# Default max episode steps per task type +DEFAULT_MAX_EPISODE_STEPS = 500 + +# VLABench task suites +PRIMITIVE_TASKS = [ + "select_fruit", + "select_toy", + "select_chemistry_tube", + "add_condiment", + "select_book", + "select_painting", + "select_drink", + "insert_flower", + "select_billiards", + "select_ingredient", + "select_mahjong", + "select_poker", + # Physical series + "density_qa", + "friction_qa", + "magnetism_qa", + "reflection_qa", + "simple_cuestick_usage", + "simple_seesaw_usage", + "sound_speed_qa", + "thermal_expansion_qa", + "weight_qa", +] + +COMPOSITE_TASKS = [ + "cluster_billiards", + "cluster_book", + "cluster_drink", + "cluster_toy", + "cook_dishes", + "cool_drink", + "find_unseen_object", + "get_coffee", + "hammer_nail", + "heat_food", + "make_juice", + "play_mahjong", + "play_math_game", + "play_poker", + "play_snooker", + "rearrange_book", + "rearrange_chemistry_tube", + "set_dining_table", + "set_study_table", + "store_food", + "take_chemistry_experiment", + "use_seesaw_complex", +] + +SUITE_TASKS: dict[str, list[str]] = { + "primitive": PRIMITIVE_TASKS, + "composite": COMPOSITE_TASKS, +} + + +class VLABenchEnv(gym.Env): + """Gymnasium wrapper for VLABench environments. + + Wraps the dm_control-based VLABench simulator behind a standard gym.Env interface. + Supports multiple cameras (front, second, wrist) and end-effector control. + """ + + metadata = {"render_modes": ["rgb_array"], "render_fps": 10} + + def __init__( + self, + task: str = "select_fruit", + obs_type: str = "pixels_agent_pos", + render_mode: str = "rgb_array", + render_resolution: tuple[int, int] = (480, 480), + robot: str = "franka", + max_episode_steps: int = DEFAULT_MAX_EPISODE_STEPS, + action_mode: str = "eef", + ): + super().__init__() + self.task = task + self.obs_type = obs_type + self.render_mode = render_mode + self.render_resolution = render_resolution + self.robot = robot + self._max_episode_steps = max_episode_steps + self.action_mode = action_mode + + # Deferred — created on first reset() inside worker subprocess to avoid + # inheriting stale GPU/EGL contexts when AsyncVectorEnv spawns workers. + # We never cache `env.physics`: dm_control exposes it as a weakref + # proxy that goes stale across resets (rebuilds the sim), so we always + # refetch it via `self._env.physics` at the call site. + self._env = None + self.task_description = "" # populated on first reset + # Cached world-frame XYZ of the robot base link. The VLABench datasets + # log both `observation.state` positions and `actions` positions in + # robot-base frame (see VLABench/scripts/convert_to_lerobot.py which + # subtracts `robot_frame_pos` from ee_pos). The robot is attached at a + # fixed offset per task so this is safe to cache once per env build. + self._robot_base_xyz: np.ndarray | None = None + + h, w = self.render_resolution + + if self.obs_type == "state": + raise NotImplementedError( + "The 'state' observation type is not supported in VLABenchEnv. " + "Please use 'pixels' or 'pixels_agent_pos'." + ) + elif self.obs_type == "pixels": + self.observation_space = spaces.Dict( + { + "pixels": spaces.Dict( + { + "image": spaces.Box(low=0, high=255, shape=(h, w, 3), dtype=np.uint8), + "second_image": spaces.Box(low=0, high=255, shape=(h, w, 3), dtype=np.uint8), + "wrist_image": spaces.Box(low=0, high=255, shape=(h, w, 3), dtype=np.uint8), + } + ), + } + ) + elif self.obs_type == "pixels_agent_pos": + self.observation_space = spaces.Dict( + { + "pixels": spaces.Dict( + { + "image": spaces.Box(low=0, high=255, shape=(h, w, 3), dtype=np.uint8), + "second_image": spaces.Box(low=0, high=255, shape=(h, w, 3), dtype=np.uint8), + "wrist_image": spaces.Box(low=0, high=255, shape=(h, w, 3), dtype=np.uint8), + } + ), + "agent_pos": spaces.Box(low=-np.inf, high=np.inf, shape=(7,), dtype=np.float64), + } + ) + else: + raise ValueError(f"Unsupported obs_type: {self.obs_type}") + + self.action_space = spaces.Box(low=ACTION_LOW, high=ACTION_HIGH, dtype=np.float32) + + # Max attempts to rebuild the underlying env when MuJoCo throws + # `PhysicsError` (e.g. mjWARN_BADQACC) during VLABench's 20-step + # reset warm-up. Some random task/layout samples land in unstable + # initial configurations; re-sampling the layout almost always + # gives a stable one. A handful of upstream tasks (notably + # `select_mahjong`) have layout samplers that diverge often enough + # to need >>5 retries, so we pick a generous ceiling. + _ENSURE_ENV_MAX_ATTEMPTS = 20 + + def _ensure_env(self) -> None: + """Create the underlying VLABench env on first use. + + Called inside the worker subprocess after fork(), so each worker gets + its own clean rendering context rather than inheriting a stale one from + the parent process (which causes crashes with AsyncVectorEnv). + + Retries on `PhysicsError`: VLABench's `LM4ManipDMEnv.reset()` runs 20 + warm-up `step()` calls while toggling gravity/fluids to let the scene + settle; for some random layouts MuJoCo's integrator diverges and + raises `mjWARN_BADQACC`. Re-sampling the layout almost always yields + a stable one, so we retry a number of times before giving up. Between + attempts we reseed NumPy's global RNG from OS entropy so the upstream + task sampler explores fresh initial states — without this, retries + can replay the same diverging configuration when the sampler is + deterministic given the current RNG state. + """ + if self._env is not None: + return + + import VLABench.robots # noqa: F401 # type: ignore[import-untyped] + import VLABench.tasks # noqa: F401 # type: ignore[import-untyped] + from dm_control.rl.control import PhysicsError # type: ignore[import-untyped] + from VLABench.envs import load_env # type: ignore[import-untyped] + + h, w = self.render_resolution + last_exc: PhysicsError | None = None + for attempt in range(1, self._ENSURE_ENV_MAX_ATTEMPTS + 1): + try: + env = load_env(task=self.task, robot=self.robot, render_resolution=(h, w)) + self._env = env + break + except PhysicsError as exc: + last_exc = exc + logger.warning( + "PhysicsError on attempt %d/%d while building task '%s': %s. Retrying with fresh layout…", + attempt, + self._ENSURE_ENV_MAX_ATTEMPTS, + self.task, + exc, + ) + np.random.seed(None) + if self._env is None: + assert last_exc is not None + raise RuntimeError( + f"VLABench task '{self.task}' failed to produce a stable " + f"initial layout after {self._ENSURE_ENV_MAX_ATTEMPTS} " + f"attempts. This task's upstream sampler diverges too " + f"often for the configured robot; consider removing it " + f"from the eval set. Last physics error: {last_exc}" + ) from last_exc + + # Extract task description from the dm_control task + task_obj = self._env.task + if hasattr(task_obj, "task_description"): + self.task_description = task_obj.task_description + elif hasattr(task_obj, "language_instruction"): + self.task_description = task_obj.language_instruction + else: + self.task_description = self.task + + # Cache robot base world position so `_build_ctrl_from_action` and + # `_get_obs` can translate between robot-frame (dataset) and + # world-frame (dm_control) without hitting physics every call. + try: + self._robot_base_xyz = np.asarray(self._env.get_robot_frame_position(), dtype=np.float64).reshape( + 3 + ) + except Exception: + # Fallback to VLABench's default Franka base position. + self._robot_base_xyz = np.array([0.0, -0.4, 0.78], dtype=np.float64) + + def _get_obs(self) -> dict: + """Get current observation from the environment.""" + assert self._env is not None + + obs = self._env.get_observation() + h, w = self.render_resolution + + def _to_hwc3(arr: np.ndarray) -> np.ndarray: + """Coerce any camera array to the declared (h, w, 3) uint8 shape.""" + a = np.asarray(arr) + # Drop a leading singleton batch dim if present. + while a.ndim > 3 and a.shape[0] == 1: + a = a[0] + if a.ndim == 3 and a.shape[0] in (1, 3, 4) and a.shape[-1] not in (1, 3, 4): + # CHW → HWC + a = np.transpose(a, (1, 2, 0)) + if a.ndim == 2: + a = np.stack([a] * 3, axis=-1) + if a.ndim != 3: + return np.zeros((h, w, 3), dtype=np.uint8) + # Force 3 channels. + if a.shape[-1] == 1: + a = np.repeat(a, 3, axis=-1) + elif a.shape[-1] == 4: + a = a[..., :3] + elif a.shape[-1] != 3: + return np.zeros((h, w, 3), dtype=np.uint8) + if a.shape[:2] != (h, w): + a = cv2.resize(a, (w, h), interpolation=cv2.INTER_AREA) + return a.astype(np.uint8) + + # Extract camera images — VLABench returns (n_cameras, C, H, W) or individual arrays + raw_frames: list[np.ndarray] = [] + if "rgb" in obs: + rgb = obs["rgb"] + if isinstance(rgb, np.ndarray): + if rgb.ndim == 4: + raw_frames = [rgb[i] for i in range(rgb.shape[0])] + elif rgb.ndim == 3: + raw_frames = [rgb] + + image_keys = ["image", "second_image", "wrist_image"] + images: dict[str, np.ndarray] = {} + for i, key in enumerate(image_keys): + if i < len(raw_frames): + images[key] = _to_hwc3(raw_frames[i]) + else: + images[key] = np.zeros((h, w, 3), dtype=np.uint8) + + # Convert VLABench's raw ee_state `[pos_world(3), quat_wxyz(4), open(1)]` + # to the dataset's observation.state layout `[pos_robot(3), euler_xyz(3), + # gripper(1)]`. See VLABench/scripts/convert_to_lerobot.py — positions + # are stored in robot-base frame and orientations as scipy extrinsic + # 'xyz' euler angles. + raw = np.asarray(obs.get("ee_state", np.zeros(8)), dtype=np.float64).ravel() + pos_world = raw[:3] if raw.size >= 3 else np.zeros(3, dtype=np.float64) + quat_wxyz = raw[3:7] if raw.size >= 7 else np.array([1.0, 0.0, 0.0, 0.0], dtype=np.float64) + gripper = float(raw[7]) if raw.size >= 8 else 0.0 + + base = self._robot_base_xyz if self._robot_base_xyz is not None else np.zeros(3, dtype=np.float64) + pos_robot = pos_world - base + euler_xyz = Rotation.from_quat([quat_wxyz[1], quat_wxyz[2], quat_wxyz[3], quat_wxyz[0]]).as_euler( + "xyz", degrees=False + ) + + ee_state = np.concatenate([pos_robot, euler_xyz, [gripper]]).astype(np.float64) + + if self.obs_type == "pixels": + return {"pixels": images} + elif self.obs_type == "pixels_agent_pos": + return { + "pixels": images, + "agent_pos": ee_state.astype(np.float64), + } + else: + raise ValueError(f"Unknown obs_type: {self.obs_type}") + + # ---- Action adaptation (EEF → joint ctrl) -------------------------------- + # + # The HF vlabench datasets log 7D actions + # `[x, y, z (robot frame), rx, ry, rz (scipy extrinsic xyz), gripper]`, + # exactly matching VLABench's own eval pipeline (evaluator.base): + # pos, euler, g = policy(...) + # quat = euler_to_quaternion(*euler) # extrinsic xyz -> wxyz + # _, qpos = robot.get_qpos_from_ee_pos(physics, pos=pos + base, quat=quat) + # env.step(np.concatenate([qpos, [g, g]])) + # + # VLABench's dm_control task writes `data.ctrl[:] = action` directly — for + # Franka that's 9 entries (7 arm joints + 2 gripper fingers). We mirror the + # above conversion so the policy's EEF commands actually drive the robot. + + _FRANKA_FINGER_OPEN = 0.04 # qpos when gripper fully open + + def _build_ctrl_from_action(self, action: np.ndarray, ctrl_dim: int) -> np.ndarray: + """Convert a 7D EEF action into the `ctrl_dim`-sized joint command vector. + + For the Franka default (ctrl_dim=9): 7 arm joint qposes (via IK) + + 2 gripper finger qposes (open/closed based on the gripper scalar). + If the action is already joint-space (shape matches ctrl_dim), pass + through. + """ + if action.shape[0] == ctrl_dim: + return action.astype(np.float64, copy=False) + + if action.shape[0] != 7: + # Unknown layout — fall back to zero-pad so the sim doesn't crash. + padded = np.zeros(ctrl_dim, dtype=np.float64) + padded[: min(action.shape[0], ctrl_dim)] = action[:ctrl_dim] + return padded + + from dm_control.utils.inverse_kinematics import qpos_from_site_pose + + # Action position is in robot-base frame (see convert_to_lerobot.py); + # dm_control's IK expects a world-frame target. + base = self._robot_base_xyz if self._robot_base_xyz is not None else np.zeros(3, dtype=np.float64) + pos_world = np.asarray(action[:3], dtype=np.float64) + base + rx, ry, rz = float(action[3]), float(action[4]), float(action[5]) + gripper = float(np.clip(action[6], 0.0, 1.0)) + + # Dataset euler is scipy extrinsic 'xyz' (same as VLABench's + # `euler_to_quaternion`). scipy emits `[x, y, z, w]`; dm_control's IK + # and MuJoCo use `[w, x, y, z]`, so reorder. + qxyzw = Rotation.from_euler("xyz", [rx, ry, rz], degrees=False).as_quat() + quat = np.array([qxyzw[3], qxyzw[0], qxyzw[1], qxyzw[2]], dtype=np.float64) + + assert self._env is not None + robot = self._env.task.robot + site_name = robot.end_effector_site.full_identifier + + # inplace=False so IK doesn't mutate physics state mid-step — we only + # want the solved qpos. Fetch a fresh physics handle — caching it can + # yield a stale weakref after a reset. + ik_result = qpos_from_site_pose( + self._env.physics, + site_name=site_name, + target_pos=pos_world, + target_quat=quat, + inplace=False, + max_steps=100, + ) + n_dof = robot.n_dof # 7 for Franka + arm_qpos = ik_result.qpos[:n_dof] + + # Dataset gripper convention: 1 = open (finger qpos = 0.04), + # 0 = closed (finger qpos = 0.0). See VLABench/scripts/convert_to_lerobot.py + # where `trajectory[i][-1] > 0.03` is encoded as `1`. + finger_qpos = gripper * self._FRANKA_FINGER_OPEN + + ctrl = np.zeros(ctrl_dim, dtype=np.float64) + ctrl[:n_dof] = arm_qpos + # Remaining entries are gripper fingers (usually 2 for Franka). + ctrl[n_dof:] = finger_qpos + return ctrl + + def reset(self, seed=None, **kwargs) -> tuple[RobotObservation, dict[str, Any]]: + self._ensure_env() + assert self._env is not None + super().reset(seed=seed) + + if seed is not None: + self._seed_inner_env(int(self.np_random.integers(0, 2**31 - 1))) + + self._env.reset() + + observation = self._get_obs() + info = {"is_success": False} + return observation, info + + def _seed_inner_env(self, seed: int) -> None: + """Propagate `seed` to the inner dm_control env. `Environment.reset()` + doesn't accept a seed, so we re-seed the task and environment + `RandomState`s directly. Best-effort: silently skipped when the + expected attributes are absent on a given VLABench version. + """ + for owner_attr, rng_attr in (("task", "random"), (None, "_random_state")): + owner = getattr(self._env, owner_attr) if owner_attr else self._env + rng = getattr(owner, rng_attr, None) + rng_seed = getattr(rng, "seed", None) + if callable(rng_seed): + rng_seed(seed) + + def step(self, action: np.ndarray) -> tuple[RobotObservation, float, bool, bool, dict[str, Any]]: + from dm_control.rl.control import PhysicsError # type: ignore[import-untyped] + + self._ensure_env() + assert self._env is not None + + if action.ndim != 1: + raise ValueError( + f"Expected action to be 1-D (shape (action_dim,)), " + f"but got shape {action.shape} with ndim={action.ndim}" + ) + + if self.action_mode not in ("eef", "joint", "delta_eef"): + raise ValueError(f"Unknown action_mode: {self.action_mode}") + + # Always refetch physics — dm_control returns a weakref proxy that can + # go stale across resets. + physics = self._env.physics + ctrl_dim = int(physics.data.ctrl.shape[0]) + ctrl = self._build_ctrl_from_action(action, ctrl_dim) + try: + timestep = self._env.step(ctrl) + except PhysicsError as exc: + # Physics integrator diverged (e.g. mjWARN_BADQACC). Treat it as + # a graceful failed termination rather than a hard crash — the + # rest of the multi-task eval should still run. + logger.warning( + "PhysicsError during step on task '%s': %s. Terminating episode.", + self.task, + exc, + ) + observation = self._get_obs() + info = {"task": self.task, "is_success": False, "physics_error": True} + # Drop the stale env so the next reset() rebuilds it cleanly. + with contextlib.suppress(Exception): + self._env.close() + self._env = None + return observation, 0.0, True, False, info + + # Extract reward from dm_control timestep + reward = float(timestep.reward) if timestep.reward is not None else 0.0 + + # Check success via the task's termination condition + is_success = False + if hasattr(self._env, "task") and hasattr(self._env.task, "should_terminate_episode"): + is_success = bool(self._env.task.should_terminate_episode(self._env.physics)) + + terminated = is_success + truncated = False + info = { + "task": self.task, + "is_success": is_success, + } + + observation = self._get_obs() + + if terminated: + self.reset() + + return observation, reward, terminated, truncated, info + + def render(self) -> np.ndarray: + self._ensure_env() + obs = self._get_obs() + return obs["pixels"]["image"] + + def close(self): + if self._env is not None: + self._env.close() + self._env = None + + +# ---- Main API ---------------------------------------------------------------- + + +def create_vlabench_envs( + task: str, + n_envs: int, + gym_kwargs: dict[str, Any] | None = None, + env_cls: Callable[[Sequence[Callable[[], Any]]], Any] | None = None, +) -> dict[str, dict[int, Any]]: + """ + Create vectorized VLABench environments with a consistent return shape. + + Returns: + dict[suite_name][task_id] -> vec_env (env_cls([...]) with exactly n_envs factories) + + Notes: + - n_envs is the number of rollouts *per task*. + - `task` can be a suite name ("primitive", "composite"), a comma-separated list of + suite names, or individual task names (e.g. "select_fruit,heat_food"). + """ + if env_cls is None or not callable(env_cls): + raise ValueError("env_cls must be a callable that wraps a list of environment factory callables.") + if not isinstance(n_envs, int) or n_envs <= 0: + raise ValueError(f"n_envs must be a positive int; got {n_envs}.") + + gym_kwargs = dict(gym_kwargs or {}) + task_groups = [t.strip() for t in task.split(",") if t.strip()] + if not task_groups: + raise ValueError("`task` must contain at least one VLABench task or suite name.") + + logger.info( + "Creating VLABench envs | task_groups=%s | n_envs(per task)=%d", + task_groups, + n_envs, + ) + + is_async = env_cls is gym.vector.AsyncVectorEnv + cached_obs_space = None + cached_act_space = None + cached_metadata = None + out: dict[str, dict[int, Any]] = defaultdict(dict) + + for group in task_groups: + # Check if it's a suite name, otherwise treat as individual task + tasks = SUITE_TASKS.get(group, [group]) + + for tid, task_name in enumerate(tasks): + logger.info( + "Building vec env | group=%s | task_id=%d | task=%s", + group, + tid, + task_name, + ) + + fns = [(lambda tn=task_name: VLABenchEnv(task=tn, **gym_kwargs)) for _ in range(n_envs)] + + if is_async: + lazy = _LazyAsyncVectorEnv(fns, cached_obs_space, cached_act_space, cached_metadata) + if cached_obs_space is None: + cached_obs_space = lazy.observation_space + cached_act_space = lazy.action_space + cached_metadata = lazy.metadata + out[group][tid] = lazy + else: + out[group][tid] = env_cls(fns) + + return {group: dict(task_map) for group, task_map in out.items()} diff --git a/src/lerobot/model/kinematics.py b/src/lerobot/model/kinematics.py index 95d3b235c..01705ded5 100644 --- a/src/lerobot/model/kinematics.py +++ b/src/lerobot/model/kinematics.py @@ -12,8 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + +from typing import TYPE_CHECKING + import numpy as np +from lerobot.utils.import_utils import _placo_available, require_package + +if TYPE_CHECKING or _placo_available: + import placo # type: ignore[import-not-found] +else: + placo = None + class RobotKinematics: """Robot kinematics using placo library for forward and inverse kinematics.""" @@ -32,13 +43,7 @@ class RobotKinematics: target_frame_name (str): Name of the end-effector frame in the URDF joint_names (list[str] | None): List of joint names to use for the kinematics solver """ - try: - import placo # type: ignore[import-not-found] # C++ library with Python bindings, no type stubs available. TODO: Create stub file or request upstream typing support. - except ImportError as e: - raise ImportError( - "placo is required for RobotKinematics. " - "Please install the optional dependencies of `kinematics` in the package." - ) from e + require_package("placo", extra="placo-dep") self.robot = placo.RobotWrapper(urdf_path) self.solver = placo.KinematicsSolver(self.robot) diff --git a/src/lerobot/motors/damiao/damiao.py b/src/lerobot/motors/damiao/damiao.py index ae619f159..572741cb4 100644 --- a/src/lerobot/motors/damiao/damiao.py +++ b/src/lerobot/motors/damiao/damiao.py @@ -24,7 +24,7 @@ from functools import cached_property from typing import TYPE_CHECKING, Any, TypedDict from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected -from lerobot.utils.import_utils import _can_available +from lerobot.utils.import_utils import _can_available, require_package if TYPE_CHECKING or _can_available: import can @@ -111,6 +111,7 @@ class DamiaoMotorsBus(MotorsBusBase): bitrate: Nominal bitrate in bps (default: 1000000 = 1 Mbps) data_bitrate: Data bitrate for CAN FD in bps (default: 5000000 = 5 Mbps), ignored if use_can_fd is False """ + require_package("python-can", extra="damiao", import_name="can") super().__init__(port, motors, calibration) self.port = port self.can_interface = can_interface diff --git a/src/lerobot/motors/motors_bus.py b/src/lerobot/motors/motors_bus.py index 209489bb9..4688eaa7f 100644 --- a/src/lerobot/motors/motors_bus.py +++ b/src/lerobot/motors/motors_bus.py @@ -356,8 +356,8 @@ class SerialMotorsBus(MotorsBusBase): motors: dict[str, Motor], calibration: dict[str, MotorCalibration] | None = None, ): - require_package("pyserial", extra="hardware", import_name="serial") - require_package("deepdiff", extra="hardware") + require_package("pyserial", extra="pyserial-dep", import_name="serial") + require_package("deepdiff", extra="deepdiff-dep") super().__init__(port, motors, calibration) self.port_handler: PortHandler diff --git a/src/lerobot/motors/robstride/robstride.py b/src/lerobot/motors/robstride/robstride.py index f47e41509..ecde01e9a 100644 --- a/src/lerobot/motors/robstride/robstride.py +++ b/src/lerobot/motors/robstride/robstride.py @@ -23,12 +23,12 @@ from types import SimpleNamespace from typing import TYPE_CHECKING, Any, TypedDict from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected -from lerobot.utils.import_utils import _can_available +from lerobot.utils.import_utils import _can_available, require_package if TYPE_CHECKING or _can_available: import can else: - can = SimpleNamespace(Message=object, interface=None) + can = SimpleNamespace(Message=object, interface=None, BusABC=object) import numpy as np from lerobot.utils.errors import DeviceNotConnectedError @@ -106,6 +106,7 @@ class RobstrideMotorsBus(MotorsBusBase): bitrate: Nominal bitrate in bps (default: 1000000 = 1 Mbps) data_bitrate: Data bitrate for CAN FD in bps (default: 5000000 = 5 Mbps), ignored if use_can_fd is False """ + require_package("python-can", extra="robstride", import_name="can") super().__init__(port, motors, calibration) self.port = port self.can_interface = can_interface diff --git a/src/lerobot/optim/schedulers.py b/src/lerobot/optim/schedulers.py index 914edd2db..250650089 100644 --- a/src/lerobot/optim/schedulers.py +++ b/src/lerobot/optim/schedulers.py @@ -18,14 +18,21 @@ import logging import math from dataclasses import asdict, dataclass from pathlib import Path +from typing import TYPE_CHECKING import draccus from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR, LRScheduler from lerobot.utils.constants import SCHEDULER_STATE +from lerobot.utils.import_utils import _diffusers_available, require_package from lerobot.utils.io_utils import deserialize_json_into_object, write_json +if TYPE_CHECKING or _diffusers_available: + from diffusers.optimization import get_scheduler +else: + get_scheduler = None + @dataclass class LRSchedulerConfig(draccus.ChoiceRegistry, abc.ABC): @@ -47,10 +54,7 @@ class DiffuserSchedulerConfig(LRSchedulerConfig): num_warmup_steps: int | None = None def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: - from lerobot.utils.import_utils import require_package - require_package("diffusers", extra="diffusion") - from diffusers.optimization import get_scheduler kwargs = {**asdict(self), "num_training_steps": num_training_steps, "optimizer": optimizer} return get_scheduler(**kwargs) diff --git a/src/lerobot/policies/__init__.py b/src/lerobot/policies/__init__.py index ea14e22a0..ab25ed004 100644 --- a/src/lerobot/policies/__init__.py +++ b/src/lerobot/policies/__init__.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from lerobot.utils.action_interpolator import ActionInterpolator as ActionInterpolator + from .act.configuration_act import ACTConfig as ACTConfig from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig from .factory import get_policy_class, make_policy, make_policy_config, make_pre_post_processors @@ -25,7 +27,6 @@ from .pi0.configuration_pi0 import PI0Config as PI0Config from .pi0_fast.configuration_pi0_fast import PI0FastConfig as PI0FastConfig from .pi05.configuration_pi05 import PI05Config as PI05Config from .pretrained import PreTrainedPolicy as PreTrainedPolicy -from .rtc import ActionInterpolator as ActionInterpolator from .sarm.configuration_sarm import SARMConfig as SARMConfig from .smolvla.configuration_smolvla import SmolVLAConfig as SmolVLAConfig from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig diff --git a/src/lerobot/policies/act/modeling_act.py b/src/lerobot/policies/act/modeling_act.py index 0120258ee..5651fbfb1 100644 --- a/src/lerobot/policies/act/modeling_act.py +++ b/src/lerobot/policies/act/modeling_act.py @@ -142,9 +142,10 @@ class ACTPolicy(PreTrainedPolicy): actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch) - l1_loss = ( - F.l1_loss(batch[ACTION], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1) - ).mean() + abs_err = F.l1_loss(batch[ACTION], actions_hat, reduction="none") + valid_mask = ~batch["action_is_pad"].unsqueeze(-1) + num_valid = valid_mask.sum() * abs_err.shape[-1] + l1_loss = (abs_err * valid_mask).sum() / num_valid.clamp_min(1) loss_dict = {"l1_loss": l1_loss.item()} if self.config.use_vae: diff --git a/src/lerobot/policies/diffusion/modeling_diffusion.py b/src/lerobot/policies/diffusion/modeling_diffusion.py index 5b3b97571..9fbe1f703 100644 --- a/src/lerobot/policies/diffusion/modeling_diffusion.py +++ b/src/lerobot/policies/diffusion/modeling_diffusion.py @@ -23,6 +23,7 @@ TODO(alexander-soare): import math from collections import deque from collections.abc import Callable +from typing import TYPE_CHECKING import einops import numpy as np @@ -32,6 +33,14 @@ import torchvision from torch import Tensor, nn from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE +from lerobot.utils.import_utils import _diffusers_available, require_package + +if TYPE_CHECKING or _diffusers_available: + from diffusers.schedulers.scheduling_ddim import DDIMScheduler + from diffusers.schedulers.scheduling_ddpm import DDPMScheduler +else: + DDIMScheduler = None + DDPMScheduler = None from ..pretrained import PreTrainedPolicy from ..utils import ( @@ -64,6 +73,7 @@ class DiffusionPolicy(PreTrainedPolicy): dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ + require_package("diffusers", extra="diffusion") super().__init__(config) config.validate_features() self.config = config @@ -155,11 +165,7 @@ def _make_noise_scheduler(name: str, **kwargs: dict): Factory for noise scheduler instances of the requested type. All kwargs are passed to the scheduler. """ - from lerobot.utils.import_utils import require_package - require_package("diffusers", extra="diffusion") - from diffusers.schedulers.scheduling_ddim import DDIMScheduler - from diffusers.schedulers.scheduling_ddpm import DDPMScheduler if name == "DDPM": return DDPMScheduler(**kwargs) @@ -374,7 +380,9 @@ class DiffusionModel(nn.Module): f"{self.config.do_mask_loss_for_padding=}." ) in_episode_bound = ~batch["action_is_pad"] - loss = loss * in_episode_bound.unsqueeze(-1) + mask = in_episode_bound.unsqueeze(-1) + num_valid = mask.sum() * loss.shape[-1] + return (loss * mask).sum() / num_valid.clamp_min(1) return loss.mean() diff --git a/src/lerobot/policies/groot/groot_n1.py b/src/lerobot/policies/groot/groot_n1.py index abcbb8a8c..381c5fbd6 100644 --- a/src/lerobot/policies/groot/groot_n1.py +++ b/src/lerobot/policies/groot/groot_n1.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from dataclasses import dataclass, field from pathlib import Path from typing import TYPE_CHECKING @@ -174,17 +173,14 @@ N_COLOR_CHANNELS = 3 # config -@dataclass class GR00TN15Config(PretrainedConfig): model_type = "gr00t_n1_5" - backbone_cfg: dict = field(init=False, metadata={"help": "Backbone configuration."}) - action_head_cfg: dict = field(init=False, metadata={"help": "Action head configuration."}) - - action_horizon: int = field(init=False, metadata={"help": "Action horizon."}) - - action_dim: int = field(init=False, metadata={"help": "Action dimension."}) - compute_dtype: str = field(default="float32", metadata={"help": "Compute dtype."}) + backbone_cfg: dict + action_head_cfg: dict + action_horizon: int + action_dim: int + compute_dtype: str = "float32" def __init__(self, **kwargs): super().__init__(**kwargs) diff --git a/src/lerobot/policies/groot/modeling_groot.py b/src/lerobot/policies/groot/modeling_groot.py index 4b612bca4..2e2e9ca89 100644 --- a/src/lerobot/policies/groot/modeling_groot.py +++ b/src/lerobot/policies/groot/modeling_groot.py @@ -43,6 +43,7 @@ from torch import Tensor from lerobot.configs import FeatureType, PolicyFeature from lerobot.utils.constants import ACTION, OBS_IMAGES +from lerobot.utils.import_utils import require_package from ..pretrained import PreTrainedPolicy from .configuration_groot import GrootConfig @@ -59,6 +60,7 @@ class GrootPolicy(PreTrainedPolicy): def __init__(self, config: GrootConfig, **kwargs): """Initialize Groot policy wrapper.""" + require_package("transformers", extra="groot") super().__init__(config) config.validate_features() self.config = config diff --git a/src/lerobot/policies/multi_task_dit/modeling_multi_task_dit.py b/src/lerobot/policies/multi_task_dit/modeling_multi_task_dit.py index 8e5d1e3cb..ceb4e211c 100644 --- a/src/lerobot/policies/multi_task_dit/modeling_multi_task_dit.py +++ b/src/lerobot/policies/multi_task_dit/modeling_multi_task_dit.py @@ -36,7 +36,7 @@ import torch.nn.functional as F # noqa: N812 import torchvision from torch import Tensor -from lerobot.utils.import_utils import _transformers_available +from lerobot.utils.import_utils import _diffusers_available, _transformers_available, require_package from .configuration_multi_task_dit import MultiTaskDiTConfig @@ -46,6 +46,13 @@ if TYPE_CHECKING or _transformers_available: else: CLIPTextModel = None CLIPVisionModel = None + +if TYPE_CHECKING or _diffusers_available: + from diffusers.schedulers.scheduling_ddim import DDIMScheduler + from diffusers.schedulers.scheduling_ddpm import DDPMScheduler +else: + DDIMScheduler = None + DDPMScheduler = None from lerobot.utils.constants import ( ACTION, OBS_IMAGES, @@ -65,6 +72,8 @@ class MultiTaskDiTPolicy(PreTrainedPolicy): name = "multi_task_dit" def __init__(self, config: MultiTaskDiTConfig, **kwargs): + require_package("transformers", extra="multi_task_dit") + require_package("diffusers", extra="multi_task_dit") super().__init__(config) config.validate_features() self.config = config @@ -643,12 +652,6 @@ class DiffusionObjective(nn.Module): "prediction_type": config.prediction_type, } - from lerobot.utils.import_utils import require_package - - require_package("diffusers", extra="multi_task_dit") - from diffusers.schedulers.scheduling_ddim import DDIMScheduler - from diffusers.schedulers.scheduling_ddpm import DDPMScheduler - if config.noise_scheduler_type == "DDPM": self.noise_scheduler: DDPMScheduler | DDIMScheduler = DDPMScheduler(**scheduler_kwargs) elif config.noise_scheduler_type == "DDIM": @@ -685,8 +688,9 @@ class DiffusionObjective(nn.Module): loss = F.mse_loss(predicted, target, reduction="none") if self.do_mask_loss_for_padding and "action_is_pad" in batch: - valid_actions = ~batch["action_is_pad"] - loss = loss * valid_actions.unsqueeze(-1) + mask = ~batch["action_is_pad"].unsqueeze(-1) + num_valid = mask.sum() * loss.shape[-1] + return (loss * mask).sum() / num_valid.clamp_min(1) return loss.mean() @@ -749,8 +753,9 @@ class FlowMatchingObjective(nn.Module): loss = F.mse_loss(predicted_velocity, target_velocity, reduction="none") if self.do_mask_loss_for_padding and "action_is_pad" in batch: - valid_mask = ~batch["action_is_pad"] - loss = loss * valid_mask.unsqueeze(-1) + mask = ~batch["action_is_pad"].unsqueeze(-1) + num_valid = mask.sum() * loss.shape[-1] + return (loss * mask).sum() / num_valid.clamp_min(1) return loss.mean() diff --git a/src/lerobot/policies/pi0/modeling_pi0.py b/src/lerobot/policies/pi0/modeling_pi0.py index 22e4e6a26..3534c7ae8 100644 --- a/src/lerobot/policies/pi0/modeling_pi0.py +++ b/src/lerobot/policies/pi0/modeling_pi0.py @@ -26,7 +26,7 @@ import torch import torch.nn.functional as F # noqa: N812 from torch import Tensor, nn -from lerobot.utils.import_utils import _transformers_available +from lerobot.utils.import_utils import _transformers_available, require_package # Conditional import for type checking and lazy loading if TYPE_CHECKING or _transformers_available: @@ -947,6 +947,7 @@ class PI0Policy(PreTrainedPolicy): Args: config: Policy configuration class instance. """ + require_package("transformers", extra="pi") super().__init__(config) config.validate_features() self.config = config diff --git a/src/lerobot/policies/pi05/modeling_pi05.py b/src/lerobot/policies/pi05/modeling_pi05.py index a44817a74..56786fbcd 100644 --- a/src/lerobot/policies/pi05/modeling_pi05.py +++ b/src/lerobot/policies/pi05/modeling_pi05.py @@ -26,7 +26,7 @@ import torch import torch.nn.functional as F # noqa: N812 from torch import Tensor, nn -from lerobot.utils.import_utils import _transformers_available +from lerobot.utils.import_utils import _transformers_available, require_package # Conditional import for type checking and lazy loading if TYPE_CHECKING or _transformers_available: @@ -918,6 +918,7 @@ class PI05Policy(PreTrainedPolicy): Args: config: Policy configuration class instance. """ + require_package("transformers", extra="pi") super().__init__(config) config.validate_features() self.config = config diff --git a/src/lerobot/policies/pi0_fast/modeling_pi0_fast.py b/src/lerobot/policies/pi0_fast/modeling_pi0_fast.py index e86b8ad27..0bc301609 100644 --- a/src/lerobot/policies/pi0_fast/modeling_pi0_fast.py +++ b/src/lerobot/policies/pi0_fast/modeling_pi0_fast.py @@ -26,7 +26,7 @@ import torch import torch.nn.functional as F # noqa: N812 from torch import Tensor, nn -from lerobot.utils.import_utils import _scipy_available, _transformers_available +from lerobot.utils.import_utils import _scipy_available, _transformers_available, require_package # Conditional import for type checking and lazy loading if TYPE_CHECKING or _scipy_available: @@ -35,7 +35,7 @@ else: idct = None if TYPE_CHECKING or _transformers_available: - from transformers import AutoTokenizer + from transformers import AutoProcessor, AutoTokenizer from transformers.models.auto import CONFIG_MAPPING from ..pi_gemma import ( @@ -44,6 +44,7 @@ if TYPE_CHECKING or _transformers_available: ) else: CONFIG_MAPPING = None + AutoProcessor = None AutoTokenizer = None PiGemmaModel = None PaliGemmaForConditionalGenerationWithPiGemma = None @@ -226,6 +227,7 @@ class PI0FastPaliGemma(nn.Module): # forward(..., adarms_cond=...) is supported (same as pi0/pi05). if use_adarms[0]: text_config = self.paligemma.config.text_config + del self.paligemma.model.language_model self.paligemma.model.language_model = PiGemmaModel(text_config) self.to_bfloat16_for_selected_params(precision) @@ -826,14 +828,14 @@ class PI0FastPolicy(PreTrainedPolicy): Args: config: Policy configuration class instance. """ + require_package("transformers", extra="pi") + require_package("scipy", extra="pi") super().__init__(config) config.validate_features() self.config = config # Load tokenizers first try: - from transformers import AutoProcessor, AutoTokenizer - # Load FAST tokenizer self.action_tokenizer = AutoProcessor.from_pretrained( config.action_tokenizer_name, trust_remote_code=True diff --git a/src/lerobot/policies/pi_gemma.py b/src/lerobot/policies/pi_gemma.py index 05f031d08..9986f9b79 100644 --- a/src/lerobot/policies/pi_gemma.py +++ b/src/lerobot/policies/pi_gemma.py @@ -197,6 +197,9 @@ class PiGemmaModel(GemmaModel): # type: ignore[misc] def __init__(self, config: GemmaConfig, **kwargs): super().__init__(config, **kwargs) + # Free parent-allocated layers/norm before replacing to avoid ~2x peak memory. + del self.layers + del self.norm # if not getattr(config, "use_adarms", False): # return cond_dim = getattr(config, "adarms_cond_dim", None) @@ -328,6 +331,7 @@ class PiGemmaForCausalLM(GemmaForCausalLM): # type: ignore[misc] def __init__(self, config: GemmaConfig, **kwargs): super().__init__(config, **kwargs) + del self.model self.model = PiGemmaModel(config) @@ -336,6 +340,7 @@ class PaliGemmaModelWithPiGemma(PaliGemmaModel): def __init__(self, config): super().__init__(config) + del self.language_model self.language_model = PiGemmaModel(config.text_config) @@ -344,6 +349,7 @@ class PaliGemmaForConditionalGenerationWithPiGemma(PaliGemmaForConditionalGenera def __init__(self, config): super().__init__(config) + del self.model self.model = PaliGemmaModelWithPiGemma(config) # Make modules available through conditional class for BC diff --git a/src/lerobot/policies/rtc/__init__.py b/src/lerobot/policies/rtc/__init__.py index 7a29dcac0..16417b3cd 100644 --- a/src/lerobot/policies/rtc/__init__.py +++ b/src/lerobot/policies/rtc/__init__.py @@ -19,6 +19,7 @@ from .action_queue import ActionQueue from .configuration_rtc import RTCConfig from .latency_tracker import LatencyTracker from .modeling_rtc import RTCProcessor +from .relative import reanchor_relative_rtc_prefix __all__ = [ "ActionInterpolator", @@ -26,4 +27,5 @@ __all__ = [ "LatencyTracker", "RTCConfig", "RTCProcessor", + "reanchor_relative_rtc_prefix", ] diff --git a/src/lerobot/policies/rtc/action_interpolator.py b/src/lerobot/policies/rtc/action_interpolator.py index 222dc33b5..c30481d3b 100644 --- a/src/lerobot/policies/rtc/action_interpolator.py +++ b/src/lerobot/policies/rtc/action_interpolator.py @@ -1,116 +1,4 @@ -# Copyright 2025 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Moved to lerobot.utils.action_interpolator — re-exported for backwards compatibility. +from lerobot.utils.action_interpolator import ActionInterpolator -"""Action interpolation for smoother robot control. - -Provides configurable Nx control rate by interpolating between consecutive actions. -Useful with RTC and action-chunking policies to reduce jerkiness. -""" - -from torch import Tensor - - -class ActionInterpolator: - """Interpolates between consecutive actions for smoother control. - - When enabled with multiplier N, produces N actions per policy action - by linearly interpolating between the previous and current action. - - Example with multiplier=3: - prev_action -> [1/3 interpolated, 2/3 interpolated, current_action] - - This effectively multiplies the control rate for smoother motion. - - Usage: - interpolator = ActionInterpolator(multiplier=2) # 2x control rate - - # In control loop: - if interpolator.needs_new_action(): - new_action = queue.get() - if new_action: - interpolator.add(new_action.cpu()) - - action = interpolator.get() - if action: - robot.send_action(action) - """ - - def __init__(self, multiplier: int = 1): - """Initialize the interpolator. - - Args: - multiplier: Control rate multiplier (1 = no interpolation, 2 = 2x, 3 = 3x, etc.) - """ - if multiplier < 1: - raise ValueError(f"multiplier must be >= 1, got {multiplier}") - self.multiplier = multiplier - self._prev: Tensor | None = None - self._buffer: list[Tensor] = [] - self._idx = 0 - - @property - def enabled(self) -> bool: - """Whether interpolation is active (multiplier > 1).""" - return self.multiplier > 1 - - def reset(self): - """Reset interpolation state (call between episodes).""" - self._prev = None - self._buffer = [] - self._idx = 0 - - def needs_new_action(self) -> bool: - """Check if a new action is needed from the queue.""" - return self._idx >= len(self._buffer) - - def add(self, action: Tensor) -> None: - """Add a new action and compute interpolated sequence. - - Args: - action: New action tensor from policy/queue (already on CPU). - """ - if self.multiplier > 1 and self._prev is not None: - self._buffer = [] - for i in range(1, self.multiplier + 1): - t = i / self.multiplier - interp = self._prev + t * (action - self._prev) - self._buffer.append(interp) - else: - # First step: no previous action yet, so run at base FPS without interpolation. - self._buffer = [action.clone()] - self._prev = action.clone() - self._idx = 0 - - def get(self) -> Tensor | None: - """Get the next interpolated action. - - Returns: - Next action tensor, or None if buffer is exhausted. - """ - if self._idx >= len(self._buffer): - return None - action = self._buffer[self._idx] - self._idx += 1 - return action - - def get_control_interval(self, fps: float) -> float: - """Get the control interval based on interpolation multiplier. - - Args: - fps: Base frames per second. - - Returns: - Control interval in seconds (divided by multiplier). - """ - return 1.0 / (fps * self.multiplier) +__all__ = ["ActionInterpolator"] diff --git a/src/lerobot/policies/rtc/action_queue.py b/src/lerobot/policies/rtc/action_queue.py index dbbdc41df..199257b12 100644 --- a/src/lerobot/policies/rtc/action_queue.py +++ b/src/lerobot/policies/rtc/action_queue.py @@ -92,10 +92,10 @@ class ActionQueue: Returns: int: Number of unconsumed actions. """ - if self.queue is None: - return 0 - length = len(self.queue) - return length - self.last_index + with self.lock: + if self.queue is None: + return 0 + return len(self.queue) - self.last_index def empty(self) -> bool: """Check if the queue is empty. @@ -103,11 +103,10 @@ class ActionQueue: Returns: bool: True if no actions remain, False otherwise. """ - if self.queue is None: - return True - - length = len(self.queue) - return length - self.last_index <= 0 + with self.lock: + if self.queue is None: + return True + return len(self.queue) - self.last_index <= 0 def get_action_index(self) -> int: """Get the current action consumption index. @@ -115,7 +114,8 @@ class ActionQueue: Returns: int: Index of the next action to be consumed. """ - return self.last_index + with self.lock: + return self.last_index def get_left_over(self) -> Tensor | None: """Get leftover original actions for RTC prev_chunk_left_over. diff --git a/src/lerobot/policies/rtc/configuration_rtc.py b/src/lerobot/policies/rtc/configuration_rtc.py index c70fe3de0..3d71edf26 100644 --- a/src/lerobot/policies/rtc/configuration_rtc.py +++ b/src/lerobot/policies/rtc/configuration_rtc.py @@ -35,7 +35,7 @@ class RTCConfig: """ # Infrastructure - enabled: bool = False + enabled: bool = True # Core RTC settings # Todo change to exp diff --git a/src/lerobot/policies/rtc/relative.py b/src/lerobot/policies/rtc/relative.py new file mode 100644 index 000000000..61063f3e2 --- /dev/null +++ b/src/lerobot/policies/rtc/relative.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Relative-action helpers for Real-Time Chunking (RTC).""" + +from __future__ import annotations + +import torch + +from lerobot.processor import ( + NormalizerProcessorStep, + RelativeActionsProcessorStep, + TransitionKey, + create_transition, + to_relative_actions, +) + + +def reanchor_relative_rtc_prefix( + prev_actions_absolute: torch.Tensor, + current_state: torch.Tensor, + relative_step: RelativeActionsProcessorStep, + normalizer_step: NormalizerProcessorStep | None, + policy_device: torch.device | str, +) -> torch.Tensor: + """Convert absolute leftover actions into model-space for relative-action RTC policies. + + When using relative actions, the RTC prefix (previous chunk's unexecuted tail) + is stored in absolute coordinates. Before feeding it back to the policy, this + helper re-expresses those actions relative to the robot's current joint state + and optionally normalizes them so the policy receives correctly scaled inputs. + """ + state = current_state.detach().cpu() + if state.dim() == 1: + state = state.unsqueeze(0) + + action_cpu = prev_actions_absolute.detach().cpu() + mask = relative_step._build_mask(action_cpu.shape[-1]) + relative_actions = to_relative_actions(action_cpu, state, mask) + + transition = create_transition(action=relative_actions) + if normalizer_step is not None: + transition = normalizer_step(transition) + + return transition[TransitionKey.ACTION].to(policy_device) diff --git a/src/lerobot/policies/sarm/processor_sarm.py b/src/lerobot/policies/sarm/processor_sarm.py index e939b3485..b60271b49 100644 --- a/src/lerobot/policies/sarm/processor_sarm.py +++ b/src/lerobot/policies/sarm/processor_sarm.py @@ -455,7 +455,13 @@ class SARMEncodingProcessorStep(ProcessorStep): inputs = {k: v.to(self.device) for k, v in inputs.items()} # Get image embeddings - embeddings = self.clip_model.get_image_features(**inputs).detach().cpu() + # transformers 5.x returns BaseModelOutputWithPooling instead of a plain tensor + output = self.clip_model.get_image_features(**inputs) + if not isinstance(output, torch.Tensor): + output = output.pooler_output + if output is None: + raise ValueError("pooler_output should not be None for CLIP models.") + embeddings = output.detach().cpu() # Handle single frame case if embeddings.dim() == 1: @@ -482,7 +488,13 @@ class SARMEncodingProcessorStep(ProcessorStep): inputs = self.clip_processor.tokenizer([text], return_tensors="pt", padding=True, truncation=True) inputs = {k: v.to(self.device) for k, v in inputs.items()} - text_embedding = self.clip_model.get_text_features(**inputs).detach().cpu() + # transformers 5.x returns BaseModelOutputWithPooling instead of a plain tensor + output = self.clip_model.get_text_features(**inputs) + if not isinstance(output, torch.Tensor): + output = output.pooler_output + if output is None: + raise ValueError("pooler_output should not be None for CLIP models.") + text_embedding = output.detach().cpu() text_embedding = text_embedding.expand(batch_size, -1) return text_embedding diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py index ee3ff4db9..8fd60c1fc 100644 --- a/src/lerobot/policies/smolvla/modeling_smolvla.py +++ b/src/lerobot/policies/smolvla/modeling_smolvla.py @@ -62,6 +62,7 @@ from torch import Tensor, nn from lerobot.utils.constants import ACTION, OBS_LANGUAGE_ATTENTION_MASK, OBS_LANGUAGE_TOKENS, OBS_STATE from lerobot.utils.device_utils import get_safe_dtype +from lerobot.utils.import_utils import require_package from ..pretrained import PreTrainedPolicy from ..rtc.modeling_rtc import RTCProcessor @@ -239,6 +240,7 @@ class SmolVLAPolicy(PreTrainedPolicy): the configuration class is used. """ + require_package("transformers", extra="smolvla") super().__init__(config) config.validate_features() self.config = config @@ -392,13 +394,21 @@ class SmolVLAPolicy(PreTrainedPolicy): loss_dict["losses_after_rm_padding"] = losses.clone().mean().item() if reduction == "none": - # Return per-sample losses (B,) by averaging over time and action dims - per_sample_loss = losses.mean(dim=(1, 2)) + # Return per-sample losses (B,) by averaging over valid (time, action) entries + if actions_is_pad is None: + per_sample_loss = losses.mean(dim=(1, 2)) + else: + num_valid = ((~actions_is_pad).sum(dim=1) * losses.shape[-1]).clamp_min(1) + per_sample_loss = losses.sum(dim=(1, 2)) / num_valid loss_dict["loss"] = per_sample_loss.mean().item() return per_sample_loss, loss_dict else: - # Default: return scalar mean loss - loss = losses.mean() + # Default: return scalar mean loss over valid (time, action) entries + if actions_is_pad is None: + loss = losses.mean() + else: + num_valid = ((~actions_is_pad).sum() * losses.shape[-1]).clamp_min(1) + loss = losses.sum() / num_valid loss_dict["loss"] = loss.item() return loss, loss_dict diff --git a/src/lerobot/policies/vqbet/vqbet_utils.py b/src/lerobot/policies/vqbet/vqbet_utils.py index f8bfcb06a..10180e0a4 100644 --- a/src/lerobot/policies/vqbet/vqbet_utils.py +++ b/src/lerobot/policies/vqbet/vqbet_utils.py @@ -27,7 +27,7 @@ import torch.distributed as distributed import torch.nn.functional as F # noqa: N812 from einops import pack, rearrange, reduce, repeat, unpack from torch import einsum, nn -from torch.cuda.amp import autocast +from torch.amp import autocast from torch.optim import Optimizer from .configuration_vqbet import VQBeTConfig @@ -1370,7 +1370,7 @@ class EuclideanCodebook(nn.Module): batch_samples = rearrange(batch_samples, "h ... d -> h (...) d") self.replace(batch_samples, batch_mask=expired_codes) - @autocast(enabled=False) + @autocast("cuda", enabled=False) def forward(self, x, sample_codebook_temp=None, mask=None, freeze_codebook=False): needs_codebook_dim = x.ndim < 4 sample_codebook_temp = ( diff --git a/src/lerobot/processor/relative_action_processor.py b/src/lerobot/processor/relative_action_processor.py index d9f97f2c6..e1e65acb1 100644 --- a/src/lerobot/processor/relative_action_processor.py +++ b/src/lerobot/processor/relative_action_processor.py @@ -142,6 +142,10 @@ class RelativeActionsProcessorStep(ProcessorStep): new_transition[TransitionKey.ACTION] = to_relative_actions(action, state, mask) return new_transition + def get_cached_state(self) -> torch.Tensor | None: + """Return the cached ``observation.state`` used as the reference point for relative/absolute action conversions.""" + return self._last_state + def get_config(self) -> dict[str, Any]: return { "enabled": self.enabled, @@ -182,7 +186,8 @@ class AbsoluteActionsProcessorStep(ProcessorStep): "but relative_step is None. Ensure relative_step is set when constructing the postprocessor." ) - if self.relative_step._last_state is None: + cached_state = self.relative_step.get_cached_state() + if cached_state is None: raise RuntimeError( "AbsoluteActionsProcessorStep requires state from RelativeActionsProcessorStep " "but no state has been cached. Ensure the preprocessor runs before the postprocessor." @@ -194,9 +199,7 @@ class AbsoluteActionsProcessorStep(ProcessorStep): return new_transition mask = self.relative_step._build_mask(action.shape[-1]) - new_transition[TransitionKey.ACTION] = to_absolute_actions( - action, self.relative_step._last_state, mask - ) + new_transition[TransitionKey.ACTION] = to_absolute_actions(action, cached_state, mask) return new_transition def get_config(self) -> dict[str, Any]: diff --git a/src/lerobot/rl/actor.py b/src/lerobot/rl/actor.py index 116970cc8..2da2fba6f 100644 --- a/src/lerobot/rl/actor.py +++ b/src/lerobot/rl/actor.py @@ -62,7 +62,6 @@ from lerobot.cameras import opencv # noqa: F401 from lerobot.configs import parser from lerobot.policies import PreTrainedPolicy, make_policy, make_pre_post_processors from lerobot.processor import TransitionKey -from lerobot.rl.process import ProcessSignalHandler from lerobot.rl.queue import get_last_item_from_queue from lerobot.rl.train_rl import TrainRLServerPipelineConfig from lerobot.robots import so_follower # noqa: F401 @@ -78,6 +77,7 @@ from lerobot.transport.utils import ( transitions_to_bytes, ) from lerobot.utils.device_utils import get_safe_torch_device +from lerobot.utils.process import ProcessSignalHandler from lerobot.utils.random_utils import set_seed from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.transition import ( diff --git a/src/lerobot/rl/learner.py b/src/lerobot/rl/learner.py index afeb44f22..7f0632237 100644 --- a/src/lerobot/rl/learner.py +++ b/src/lerobot/rl/learner.py @@ -75,7 +75,6 @@ from lerobot.rl.algorithms.base import RLAlgorithm from lerobot.rl.algorithms.factory import make_algorithm from lerobot.rl.buffer import ReplayBuffer from lerobot.rl.data_sources import OnlineOfflineMixer -from lerobot.rl.process import ProcessSignalHandler from lerobot.rl.train_rl import TrainRLServerPipelineConfig from lerobot.rl.trainer import RLTrainer from lerobot.robots import so_follower # noqa: F401 @@ -96,6 +95,7 @@ from lerobot.utils.constants import ( TRAINING_STATE_DIR, ) from lerobot.utils.device_utils import get_safe_torch_device +from lerobot.utils.process import ProcessSignalHandler from lerobot.utils.random_utils import set_seed from lerobot.utils.utils import ( format_big_number, diff --git a/src/lerobot/robots/reachy2/robot_reachy2.py b/src/lerobot/robots/reachy2/robot_reachy2.py index ef55f71b9..ac5c9ef2f 100644 --- a/src/lerobot/robots/reachy2/robot_reachy2.py +++ b/src/lerobot/robots/reachy2/robot_reachy2.py @@ -20,7 +20,7 @@ from typing import TYPE_CHECKING, Any from lerobot.cameras import make_cameras_from_configs from lerobot.types import RobotAction, RobotObservation -from lerobot.utils.import_utils import _reachy2_sdk_available +from lerobot.utils.import_utils import _reachy2_sdk_available, require_package from ..robot import Robot from ..utils import ensure_safe_goal_position @@ -81,6 +81,7 @@ class Reachy2Robot(Robot): name = "reachy2" def __init__(self, config: Reachy2RobotConfig): + require_package("reachy2_sdk", extra="reachy2") super().__init__(config) self.config = config diff --git a/src/lerobot/robots/unitree_g1/unitree_g1.py b/src/lerobot/robots/unitree_g1/unitree_g1.py index 785861a5a..25ec32716 100644 --- a/src/lerobot/robots/unitree_g1/unitree_g1.py +++ b/src/lerobot/robots/unitree_g1/unitree_g1.py @@ -27,7 +27,7 @@ import numpy as np from lerobot.cameras import make_cameras_from_configs from lerobot.types import RobotAction, RobotObservation -from lerobot.utils.import_utils import _unitree_sdk_available +from lerobot.utils.import_utils import _unitree_sdk_available, require_package from ..robot import Robot from .config_unitree_g1 import UnitreeG1Config @@ -111,6 +111,7 @@ class UnitreeG1(Robot): name = "unitree_g1" def __init__(self, config: UnitreeG1Config): + require_package("unitree-sdk2py", extra="unitree_g1", import_name="unitree_sdk2py") super().__init__(config) logger.info("Initialize UnitreeG1...") diff --git a/src/lerobot/rollout/__init__.py b/src/lerobot/rollout/__init__.py new file mode 100644 index 000000000..a4de8ee6c --- /dev/null +++ b/src/lerobot/rollout/__init__.py @@ -0,0 +1,87 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Policy deployment engine with pluggable rollout strategies.""" + +from lerobot.utils.import_utils import require_package + +require_package("datasets", extra="dataset") + +from .configs import ( + BaseStrategyConfig, + DAggerKeyboardConfig, + DAggerPedalConfig, + DAggerStrategyConfig, + HighlightStrategyConfig, + RolloutConfig, + RolloutStrategyConfig, + SentryStrategyConfig, +) +from .context import ( + DatasetContext, + HardwareContext, + PolicyContext, + ProcessorContext, + RolloutContext, + RuntimeContext, + build_rollout_context, +) +from .inference import ( + InferenceEngine, + InferenceEngineConfig, + RTCInferenceConfig, + RTCInferenceEngine, + SyncInferenceConfig, + SyncInferenceEngine, + create_inference_engine, +) +from .strategies import ( + BaseStrategy, + DAggerStrategy, + HighlightStrategy, + RolloutStrategy, + SentryStrategy, + create_strategy, +) + +__all__ = [ + "BaseStrategy", + "BaseStrategyConfig", + "DAggerKeyboardConfig", + "DAggerPedalConfig", + "DAggerStrategy", + "DAggerStrategyConfig", + "DatasetContext", + "HardwareContext", + "HighlightStrategy", + "HighlightStrategyConfig", + "InferenceEngine", + "InferenceEngineConfig", + "PolicyContext", + "ProcessorContext", + "RTCInferenceConfig", + "RTCInferenceEngine", + "RolloutConfig", + "RolloutContext", + "RolloutStrategy", + "RolloutStrategyConfig", + "RuntimeContext", + "SentryStrategy", + "SentryStrategyConfig", + "SyncInferenceConfig", + "SyncInferenceEngine", + "build_rollout_context", + "create_inference_engine", + "create_strategy", +] diff --git a/src/lerobot/rollout/configs.py b/src/lerobot/rollout/configs.py new file mode 100644 index 000000000..9d019c887 --- /dev/null +++ b/src/lerobot/rollout/configs.py @@ -0,0 +1,323 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Configuration dataclasses for the rollout deployment engine.""" + +from __future__ import annotations + +import abc +import logging +from dataclasses import dataclass, field + +import draccus + +from lerobot.configs import PreTrainedConfig, parser +from lerobot.configs.dataset import DatasetRecordConfig +from lerobot.robots.config import RobotConfig +from lerobot.teleoperators.config import TeleoperatorConfig +from lerobot.utils.device_utils import auto_select_torch_device, is_torch_device_available + +from .inference import InferenceEngineConfig, SyncInferenceConfig + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Strategy configs (polymorphic dispatch via draccus ChoiceRegistry) +# --------------------------------------------------------------------------- + + +@dataclass +class RolloutStrategyConfig(draccus.ChoiceRegistry, abc.ABC): + """Abstract base for rollout strategy configurations. + + Use ``--strategy.type=`` on the CLI to select a strategy. + """ + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +@RolloutStrategyConfig.register_subclass("base") +@dataclass +class BaseStrategyConfig(RolloutStrategyConfig): + """Autonomous rollout with no data recording.""" + + pass + + +@RolloutStrategyConfig.register_subclass("sentry") +@dataclass +class SentryStrategyConfig(RolloutStrategyConfig): + """Continuous autonomous rollout with always-on recording. + + Episode duration is derived from camera resolution, FPS, and + ``target_video_file_size_mb`` so that each saved episode produces a + video file that has crossed the target size. This aligns episode + boundaries with the dataset's video file chunking, so each + ``push_to_hub`` call uploads complete video files rather than + re-uploading a growing file that hasn't crossed the chunk boundary. + """ + + upload_every_n_episodes: int = 5 + # Target video file size in MB for episode rotation. Episodes are + # saved once the estimated video duration would exceed this limit. + # Defaults to DEFAULT_VIDEO_FILE_SIZE_IN_MB when set to None. + target_video_file_size_mb: int | None = None + + +@RolloutStrategyConfig.register_subclass("highlight") +@dataclass +class HighlightStrategyConfig(RolloutStrategyConfig): + """Autonomous rollout with on-demand recording via ring buffer. + + A memory-bounded ring buffer continuously captures telemetry. When + the user presses the save key, the buffer contents are flushed to + the dataset and live recording continues until the key is pressed + again. + """ + + ring_buffer_seconds: float = 10.0 + ring_buffer_max_memory_mb: int = 1024 + save_key: str = "s" + push_key: str = "h" + + +@dataclass +class DAggerKeyboardConfig: + """Keyboard key bindings for DAgger controls. + + Keys are specified as single characters (e.g. ``"c"``, ``"h"``) or + special key names (``"space"``). + """ + + pause_resume: str = "space" + correction: str = "tab" + upload: str = "enter" + + +@dataclass +class DAggerPedalConfig: + """Foot pedal configuration for DAgger controls. + + Pedal codes are evdev key code strings (e.g. ``"KEY_A"``). + """ + + device_path: str = "/dev/input/by-id/usb-PCsensor_FootSwitch-event-kbd" + pause_resume: str = "KEY_A" + correction: str = "KEY_B" + upload: str = "KEY_C" + + +@RolloutStrategyConfig.register_subclass("dagger") +@dataclass +class DAggerStrategyConfig(RolloutStrategyConfig): + """Human-in-the-loop data collection (DAgger / RaC). + + Alternates between autonomous policy execution and human intervention. + Intervention frames are tagged with ``intervention=True``. + + Input is controlled via either a keyboard or foot pedal, selected by + ``input_device``. Each device exposes three actions: + + 1. **pause_resume** — toggle policy execution on/off. + 2. **correction** — toggle human correction recording. + 3. **upload** — push dataset to hub on demand (corrections-only mode). + + When ``record_autonomous=False`` (default) only human-correction windows + are recorded — each correction becomes its own episode. Set to ``True`` + to record both autonomous and correction frames with size-based episode + rotation (same as Sentry) and background uploading. ``push_to_hub`` is + blocked while a correction is in progress. + """ + + # Number of correction episodes to collect (corrections-only mode). + # When None, falls back to ``--dataset.num_episodes``. + num_episodes: int | None = None + record_autonomous: bool = False + upload_every_n_episodes: int = 5 + # Target video file size in MB for episode rotation (record_autonomous + # mode only). Defaults to DEFAULT_VIDEO_FILE_SIZE_IN_MB when None. + target_video_file_size_mb: int | None = None + input_device: str = "keyboard" + keyboard: DAggerKeyboardConfig = field(default_factory=DAggerKeyboardConfig) + pedal: DAggerPedalConfig = field(default_factory=DAggerPedalConfig) + + def __post_init__(self): + if self.input_device not in ("keyboard", "pedal"): + raise ValueError(f"DAgger input_device must be 'keyboard' or 'pedal', got '{self.input_device}'") + + +# --------------------------------------------------------------------------- +# Top-level rollout config +# --------------------------------------------------------------------------- + + +@dataclass +class RolloutConfig: + """Top-level configuration for the ``lerobot-rollout`` CLI. + + Combines hardware, policy, strategy, and runtime settings. The + ``__post_init__`` method performs fail-fast validation to reject + invalid flag combinations early. + """ + + # Hardware + robot: RobotConfig | None = None + teleop: TeleoperatorConfig | None = None + + # Policy (loaded from --policy.path via __post_init__) + policy: PreTrainedConfig | None = None + + # Strategy (polymorphic: --strategy.type=base|sentry|highlight|dagger) + strategy: RolloutStrategyConfig = field(default_factory=BaseStrategyConfig) + + # Inference backend (polymorphic: --inference.type=sync|rtc) + inference: InferenceEngineConfig = field(default_factory=SyncInferenceConfig) + + # Dataset (required for sentry, highlight, dagger; None for base) + dataset: DatasetRecordConfig | None = None + + # Runtime + fps: float = 30.0 + duration: float = 0.0 # 0 = infinite (24/7 mode) + interpolation_multiplier: int = 1 + device: str | None = None + task: str = "" + display_data: bool = False + # Display data on a remote Rerun server + display_ip: str | None = None + # Port of the remote Rerun server + display_port: int | None = None + # Whether to display compressed images in Rerun + display_compressed_images: bool = False + # Use vocal synthesis to read events + play_sounds: bool = True + resume: bool = False + # Rename map for mapping robot/dataset observation keys to policy keys + rename_map: dict[str, str] = field(default_factory=dict) + + # Hardware teardown + # When True (default), smoothly interpolate the robot back to the joint + # positions captured at startup before disconnecting. Set to False to + # leave the robot in its final achieved pose at shutdown. + return_to_initial_position: bool = True + + # Torch compile + use_torch_compile: bool = False + torch_compile_backend: str = "inductor" + torch_compile_mode: str = "default" + compile_warmup_inferences: int = 2 + + def __post_init__(self): + """Validate config invariants and load the policy config from ``--policy.path``.""" + # --- Strategy-specific validation --- + if isinstance(self.strategy, DAggerStrategyConfig) and self.teleop is None: + raise ValueError("DAgger strategy requires --teleop.type to be set") + + # TODO(Steven): DAgger shouldn't require a dataset (user may want to just rollout+intervene without recording), but for now we require it to simplify the implementation. + needs_dataset = isinstance( + self.strategy, (SentryStrategyConfig, HighlightStrategyConfig, DAggerStrategyConfig) + ) + if needs_dataset and (self.dataset is None or not self.dataset.repo_id): + raise ValueError(f"{self.strategy.type} strategy requires --dataset.repo_id to be set") + + if isinstance(self.strategy, BaseStrategyConfig) and self.dataset is not None: + raise ValueError( + "Base strategy does not record data. Use sentry, highlight, or dagger for recording." + ) + + # Sentry MUST use streaming encoding to avoid disk I/O blocking the control loop + if ( + isinstance(self.strategy, SentryStrategyConfig) + and self.dataset is not None + and not self.dataset.streaming_encoding + ): + logger.warning("Sentry mode forces streaming_encoding=True") + self.dataset.streaming_encoding = True + + # Highlight writes frames while the policy is still running, so streaming is mandatory. + if ( + isinstance(self.strategy, HighlightStrategyConfig) + and self.dataset is not None + and not self.dataset.streaming_encoding + ): + logger.warning("Highlight mode forces streaming_encoding=True") + self.dataset.streaming_encoding = True + + # DAgger: streaming is mandatory only when the autonomous phase is also recorded. + if isinstance(self.strategy, DAggerStrategyConfig) and self.dataset is not None: + if self.strategy.record_autonomous and not self.dataset.streaming_encoding: + logger.warning("DAgger with record_autonomous=True forces streaming_encoding=True") + self.dataset.streaming_encoding = True + elif not self.strategy.record_autonomous and not self.dataset.streaming_encoding: + logger.info( + "Streaming encoding is disabled for DAgger corrections-only mode. " + "Consider enabling it for faster episode saving: " + "--dataset.streaming_encoding=true --dataset.encoder_threads=2" + ) + + # DAgger: resolve num_episodes from dataset config when not explicitly set. + if isinstance(self.strategy, DAggerStrategyConfig) and self.strategy.num_episodes is None: + if self.dataset is not None: + self.strategy.num_episodes = self.dataset.num_episodes + logger.info( + "DAgger num_episodes not set — using --dataset.num_episodes=%d", + self.strategy.num_episodes, + ) + else: + raise ValueError( + "DAgger num_episodes must be set either via --strategy.num_episodes or --dataset.num_episodes" + ) + + # --- Policy loading --- + if self.robot is None: + raise ValueError("--robot.type is required for rollout") + + policy_path = parser.get_path_arg("policy") + if policy_path: + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.pretrained_path = policy_path + if self.policy is None: + raise ValueError("--policy.path is required for rollout") + + # --- Task resolution --- + # When any --dataset.* flag is passed, draccus creates a DatasetRecordConfig with single_task="". + # If the user set the task via the top-level --task flag, propagate it so that all + # downstream consumers (inference engine, dataset frame builders) see it. + if self.dataset is not None and not self.dataset.single_task and self.task: + logger.info("Propagating top-level task '%s' to dataset config", self.task) + self.dataset.single_task = self.task + elif self.dataset is not None and self.dataset.single_task and not self.task: + logger.info("Propagating dataset single_task '%s' to top-level task", self.dataset.single_task) + self.task = self.dataset.single_task + + # --- Device resolution --- + # Resolve device from the policy config when not explicitly set so all + # components (policy.to, preprocessor, inference engine) use the same + # device string instead of inconsistent fallbacks. + if self.device is None or not is_torch_device_available(self.device): + resolved = self.policy.device + if resolved: + self.device = resolved + logger.info("Resolved device from policy config: %s", self.device) + else: + self.device = auto_select_torch_device().type + logger.info("No policy config to resolve device from; auto-selected device: %s", self.device) + + @classmethod + def __get_path_fields__(cls) -> list[str]: + return ["policy"] diff --git a/src/lerobot/rollout/context.py b/src/lerobot/rollout/context.py new file mode 100644 index 000000000..fe58554ab --- /dev/null +++ b/src/lerobot/rollout/context.py @@ -0,0 +1,459 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rollout context: shared state created once before strategy dispatch. + +Grouped into five topical sub-contexts — :class:`RuntimeContext`, +:class:`HardwareContext`, :class:`PolicyContext`, :class:`ProcessorContext`, +and :class:`DatasetContext` — assembled into :class:`RolloutContext`. +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass, field +from threading import Event + +import torch + +from lerobot.configs import FeatureType, PreTrainedConfig +from lerobot.datasets import ( + LeRobotDataset, + aggregate_pipeline_dataset_features, + create_initial_features, +) +from lerobot.policies import get_policy_class, make_pre_post_processors +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.processor import ( + PolicyProcessorPipeline, + RobotAction, + RobotObservation, + RobotProcessorPipeline, + make_default_processors, + rename_stats, +) +from lerobot.processor.relative_action_processor import RelativeActionsProcessorStep +from lerobot.robots import make_robot_from_config +from lerobot.teleoperators import Teleoperator, make_teleoperator_from_config +from lerobot.utils.feature_utils import combine_feature_dicts, hw_to_dataset_features + +from .configs import BaseStrategyConfig, DAggerStrategyConfig, RolloutConfig +from .inference import ( + InferenceEngine, + RTCInferenceConfig, + SyncInferenceConfig, + create_inference_engine, +) +from .robot_wrapper import ThreadSafeRobot + +logger = logging.getLogger(__name__) + + +def _resolve_action_key_order( + policy_action_names: list[str] | None, dataset_action_names: list[str] +) -> list[str]: + """Choose action name ordering for mapping policy tensor outputs to robot action dicts.""" + if not policy_action_names: + return dataset_action_names + policy_action_names = list(policy_action_names) + if len(policy_action_names) != len(dataset_action_names): + logger.warning( + "policy.action_feature_names length (%d) != dataset action dim (%d); using dataset order", + len(policy_action_names), + len(dataset_action_names), + ) + return dataset_action_names + if set(dataset_action_names) != set(policy_action_names): + logger.warning("policy.action_feature_names keys don't match dataset; using dataset order") + return dataset_action_names + return policy_action_names + + +# --------------------------------------------------------------------------- +# Sub-contexts +# --------------------------------------------------------------------------- + + +@dataclass +class RuntimeContext: + """Runtime knobs shared with every strategy.""" + + cfg: RolloutConfig + shutdown_event: Event + + +@dataclass +class HardwareContext: + """Connected hardware. + + The raw robot is available via ``robot_wrapper.inner`` when needed + (e.g. for disconnect); strategies should otherwise go through the + thread-safe wrapper. + + ``initial_position`` stores the robot's joint positions at connect + time. Strategies use it to return the robot to a safe pose before + shutting down. + """ + + robot_wrapper: ThreadSafeRobot + teleop: Teleoperator | None + initial_position: dict | None = None + + +@dataclass +class PolicyContext: + """Loaded policy and its inference engine.""" + + policy: PreTrainedPolicy + preprocessor: PolicyProcessorPipeline + postprocessor: PolicyProcessorPipeline + inference: InferenceEngine + + +@dataclass +class ProcessorContext: + """Robot-side pipelines (run outside the policy).""" + + teleop_action_processor: RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction] + robot_action_processor: RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction] + robot_observation_processor: RobotProcessorPipeline[RobotObservation, RobotObservation] + + +@dataclass +class DatasetContext: + """Dataset and feature bookkeeping.""" + + dataset: LeRobotDataset | None + dataset_features: dict = field(default_factory=dict) + hw_features: dict = field(default_factory=dict) + ordered_action_keys: list[str] = field(default_factory=list) + + +@dataclass +class RolloutContext: + """Bundle of sub-contexts passed to every rollout strategy. + + Built once by :func:`build_rollout_context` before strategy dispatch. + """ + + runtime: RuntimeContext + hardware: HardwareContext + policy: PolicyContext + processors: ProcessorContext + data: DatasetContext + + +# --------------------------------------------------------------------------- +# Build +# --------------------------------------------------------------------------- + + +def build_rollout_context( + cfg: RolloutConfig, + shutdown_event: Event, + teleop_action_processor: RobotProcessorPipeline | None = None, + robot_action_processor: RobotProcessorPipeline | None = None, + robot_observation_processor: RobotProcessorPipeline | None = None, +) -> RolloutContext: + """Wire up policy, processors, hardware, dataset, and inference engine. + + The order is policy-first / hardware-last so a bad ``--policy.path`` + fails fast without touching the robot. + """ + is_rtc = isinstance(cfg.inference, RTCInferenceConfig) + + # --- 1. Policy (heavy I/O, but no hardware yet) ------------------- + logger.info("Loading policy from '%s'...", cfg.policy.pretrained_path) + policy_config = cfg.policy + policy_class = get_policy_class(policy_config.type) + + full_config = PreTrainedConfig.from_pretrained(cfg.policy.pretrained_path) + for attr in ("device", "use_amp"): + if hasattr(cfg.policy, attr) and hasattr(full_config, attr): + cli_val = getattr(cfg.policy, attr) + if cli_val is not None: + setattr(full_config, attr, cli_val) + + if hasattr(full_config, "compile_model"): + full_config.compile_model = cfg.use_torch_compile + + if full_config.type == "vqbet" and cfg.device == "mps": + raise NotImplementedError( + "Current implementation of VQBeT does not support `mps` backend. " + "Please use `cpu` or `cuda` backend." + ) + + if full_config.use_peft: + from peft import PeftConfig, PeftModel + + peft_path = cfg.policy.pretrained_path + peft_config = PeftConfig.from_pretrained(peft_path) + policy = policy_class.from_pretrained( + pretrained_name_or_path=peft_config.base_model_name_or_path, config=full_config + ) + policy = PeftModel.from_pretrained(policy, peft_path, config=peft_config) + else: + policy = policy_class.from_pretrained(cfg.policy.pretrained_path, config=full_config) + + if is_rtc: + policy.config.rtc_config = cfg.inference.rtc + if hasattr(policy, "init_rtc_processor"): + policy.init_rtc_processor() + + policy = policy.to(cfg.device) + policy.eval() + logger.info("Policy loaded: type=%s, device=%s", policy_config.type, cfg.device) + + if cfg.use_torch_compile and policy.type not in ("pi0", "pi05"): + try: + if hasattr(torch, "compile"): + compile_kwargs = { + "backend": cfg.torch_compile_backend, + "mode": cfg.torch_compile_mode, + "options": {"triton.cudagraphs": False}, + } + policy.predict_action_chunk = torch.compile(policy.predict_action_chunk, **compile_kwargs) + logger.info("torch.compile applied to predict_action_chunk") + except Exception as e: + logger.warning("Failed to apply torch.compile: %s", e) + + # --- 2. Robot-side processors (user-supplied or defaults) -------- + if ( + teleop_action_processor is None + or robot_action_processor is None + or robot_observation_processor is None + ): + _t, _r, _o = make_default_processors() + teleop_action_processor = teleop_action_processor or _t + robot_action_processor = robot_action_processor or _r + robot_observation_processor = robot_observation_processor or _o + + # --- 3. Hardware (heaviest side-effect, deferred) ----------------- + logger.info("Connecting robot (%s)...", cfg.robot.type if cfg.robot else "?") + robot = make_robot_from_config(cfg.robot) + robot.connect() + logger.info("Robot connected: %s", robot.name) + + # Store the initial joint positions so we can return to a safe pose on shutdown. + initial_obs = robot.get_observation() + initial_position = {k: v for k, v in initial_obs.items() if k.endswith(".pos")} + logger.info("Captured initial robot position (%d keys)", len(initial_position)) + + robot_wrapper = ThreadSafeRobot(robot) + + teleop = None + if cfg.teleop is not None: + logger.info("Connecting teleoperator (%s)...", cfg.teleop.type if cfg.teleop else "?") + teleop = make_teleoperator_from_config(cfg.teleop) + teleop.connect() + logger.info("Teleoperator connected") + + # TODO(Steven): once Teleoperator motor-control methods are standardised + # (``enable_torque`` / ``disable_torque`` / ``write_goal_positions``), gate + # the DAgger strategy on their presence here and fail fast with a helpful + # message instead of relying on the operator to pre-align the leader by + # hand. See :func:`DAggerStrategy._apply_transition` for the matching + # disabled call sites. + # if isinstance(cfg.strategy, DAggerStrategyConfig) and teleop is not None: + # required_teleop_methods = ("enable_torque", "disable_torque", "write_goal_positions") + # missing = [m for m in required_teleop_methods if not callable(getattr(teleop, m, None))] + # if missing: + # teleop.disconnect() + # raise ValueError( + # f"DAgger strategy requires a teleoperator with motor control methods " + # f"{required_teleop_methods}. '{type(teleop).__name__}' is missing: {missing}" + # ) + + # --- 4. Features + action-key reconciliation --------------------- + # TODO(Steven):Only ``.pos`` joint features are routed to the policy as state and as the + # action target; velocity and torque channels (when present) are kept in + # the raw observation but excluded from the policy-facing tensors. + all_obs_features = robot.observation_features + # ``observation_features`` values are either a tuple (camera shape) or the + # ``float`` type itself used as a sentinel for scalar motor features — + # see ``dict[str, type | tuple]`` annotation on ``Robot.observation_features``. + observation_features_hw = { + k: v + for k, v in all_obs_features.items() + if isinstance(v, tuple) or (v is float and k.endswith(".pos")) + } + action_features_hw = {k: v for k, v in robot.action_features.items() if k.endswith(".pos")} + + # The action side is always needed: sync inference reads action names from + # ``dataset_features[ACTION]`` to map policy tensors back to robot actions. + action_dataset_features = aggregate_pipeline_dataset_features( + pipeline=teleop_action_processor, + initial_features=create_initial_features(action=action_features_hw), + use_videos=cfg.dataset.video if cfg.dataset else True, + ) + # Observation-side aggregation is needed because of build_dataset_frame + observation_dataset_features = aggregate_pipeline_dataset_features( + pipeline=robot_observation_processor, + initial_features=create_initial_features(observation=observation_features_hw), + use_videos=cfg.dataset.video if cfg.dataset else True, + ) + dataset_features = combine_feature_dicts(action_dataset_features, observation_dataset_features) + hw_features = hw_to_dataset_features(observation_features_hw, "observation") + raw_action_keys = list(action_features_hw.keys()) + policy_action_names = getattr(policy_config, "action_feature_names", None) + ordered_action_keys = _resolve_action_key_order( + list(policy_action_names) if policy_action_names else None, + raw_action_keys, + ) + + # Validate visual features if no rename_map is active + rename_map = cfg.rename_map + if not rename_map: + expected_visuals = {k for k, v in full_config.input_features.items() if v.type == FeatureType.VISUAL} + provided_visuals = { + f"observation.images.{k}" for k, v in robot.observation_features.items() if isinstance(v, tuple) + } + policy_subset = expected_visuals.issubset(provided_visuals) + hw_subset = provided_visuals.issubset(expected_visuals) + if not (policy_subset or hw_subset): + raise ValueError( + f"Visual feature mismatch between policy and robot hardware.\n" + f"Policy expects: {expected_visuals}\n" + f"Robot provides: {provided_visuals}" + ) + + # --- 5. Dataset ------------- + dataset = None + if cfg.dataset is not None and not isinstance(cfg.strategy, BaseStrategyConfig): + logger.info("Setting up dataset (repo_id=%s)...", cfg.dataset.repo_id) + if cfg.resume: + dataset = LeRobotDataset.resume( + cfg.dataset.repo_id, + root=cfg.dataset.root, + batch_encoding_size=cfg.dataset.video_encoding_batch_size, + vcodec=cfg.dataset.vcodec, + streaming_encoding=cfg.dataset.streaming_encoding, + encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, + encoder_threads=cfg.dataset.encoder_threads, + image_writer_processes=cfg.dataset.num_image_writer_processes, + image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera + * len(robot.cameras if hasattr(robot, "cameras") else []), + ) + else: + if isinstance(cfg.strategy, DAggerStrategyConfig): + dataset_features["intervention"] = { + "dtype": "bool", + "shape": (1,), + "names": None, + } + + repo_name = cfg.dataset.repo_id.split("/", 1)[-1] + if not repo_name.startswith("rollout_"): + raise ValueError( + "Dataset names for rollout must start with 'rollout_'. " + "Use --dataset.repo_id=/rollout_ for policy deployment datasets." + ) + cfg.dataset.stamp_repo_id() + target_video_mb = getattr(cfg.strategy, "target_video_file_size_mb", None) + dataset = LeRobotDataset.create( + cfg.dataset.repo_id, + cfg.dataset.fps, + root=cfg.dataset.root, + robot_type=robot.name, + features=dataset_features, + use_videos=cfg.dataset.video, + image_writer_processes=cfg.dataset.num_image_writer_processes, + image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera + * len(robot.cameras if hasattr(robot, "cameras") else []), + batch_encoding_size=cfg.dataset.video_encoding_batch_size, + vcodec=cfg.dataset.vcodec, + streaming_encoding=cfg.dataset.streaming_encoding, + encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, + encoder_threads=cfg.dataset.encoder_threads, + video_files_size_in_mb=target_video_mb, + ) + + if dataset is not None: + logger.info("Dataset ready: %s (%d existing episodes)", dataset.repo_id, dataset.num_episodes) + + # --- 6. Policy pre/post processors (needs dataset stats if any) --- + dataset_stats = None + if dataset is not None: + dataset_stats = rename_stats( + dataset.meta.stats, + cfg.rename_map, + ) + + preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=policy_config, + pretrained_path=cfg.policy.pretrained_path, + dataset_stats=dataset_stats, + preprocessor_overrides={ + "device_processor": {"device": cfg.device}, + "rename_observations_processor": {"rename_map": cfg.rename_map}, + }, + ) + + if isinstance(cfg.inference, SyncInferenceConfig) and any( + isinstance(step, RelativeActionsProcessorStep) and step.enabled + for step in getattr(preprocessor, "steps", ()) + ): + raise NotImplementedError( + "SyncInferenceEngine does not support policies with relative actions for now." + "Use --inference.type=rtc or remove relative action processor steps from the policy pipeline." + ) + + # --- 7. Inference strategy (needs policy + pre/post + hardware) -- + logger.info( + "Creating inference engine (type=%s)...", + cfg.inference.type if hasattr(cfg.inference, "type") else "sync", + ) + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + inference_strategy = create_inference_engine( + cfg.inference, + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + robot_wrapper=robot_wrapper, + hw_features=hw_features, + dataset_features=dataset_features, + ordered_action_keys=ordered_action_keys, + task=task_str, + fps=cfg.fps, + device=cfg.device, + use_torch_compile=cfg.use_torch_compile, + compile_warmup_inferences=cfg.compile_warmup_inferences, + shutdown_event=shutdown_event, + ) + + # --- 8. Assemble --------------------------------------------------- + logger.info("Rollout context assembled successfully") + return RolloutContext( + runtime=RuntimeContext(cfg=cfg, shutdown_event=shutdown_event), + hardware=HardwareContext( + robot_wrapper=robot_wrapper, teleop=teleop, initial_position=initial_position + ), + policy=PolicyContext( + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + inference=inference_strategy, + ), + processors=ProcessorContext( + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, + ), + data=DatasetContext( + dataset=dataset, + dataset_features=dataset_features, + hw_features=hw_features, + ordered_action_keys=ordered_action_keys, + ), + ) diff --git a/src/lerobot/rollout/inference/__init__.py b/src/lerobot/rollout/inference/__init__.py new file mode 100644 index 000000000..b61cb342c --- /dev/null +++ b/src/lerobot/rollout/inference/__init__.py @@ -0,0 +1,39 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Inference engine package — backend-agnostic action production. + +Concrete backends (``sync``, ``rtc``, ...) expose the same small interface so +rollout strategies never branch on which backend is in use. +""" + +from .base import InferenceEngine +from .factory import ( + InferenceEngineConfig, + RTCInferenceConfig, + SyncInferenceConfig, + create_inference_engine, +) +from .rtc import RTCInferenceEngine +from .sync import SyncInferenceEngine + +__all__ = [ + "InferenceEngine", + "InferenceEngineConfig", + "RTCInferenceConfig", + "RTCInferenceEngine", + "SyncInferenceConfig", + "SyncInferenceEngine", + "create_inference_engine", +] diff --git a/src/lerobot/rollout/inference/base.py b/src/lerobot/rollout/inference/base.py new file mode 100644 index 000000000..f269aa5fe --- /dev/null +++ b/src/lerobot/rollout/inference/base.py @@ -0,0 +1,89 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Inference engine ABC. + +Rollout strategies consume actions through this small interface so they +do not need to know whether inference happens inline on the control thread +or asynchronously in a background thread (RTC). +""" + +from __future__ import annotations + +import abc + +import torch + + +class InferenceEngine(abc.ABC): + """Abstract backend for producing actions during rollout. + + Subclasses decide whether inference happens inline on the control + thread or asynchronously in a background thread. The contract is + minimal so additional backends can be plugged in without touching + rollout strategies. + + Lifecycle + --------- + ``start`` — prepare the backend (e.g. launch a background thread). + ``stop`` — shut the backend down cleanly. + ``reset`` — clear episode-scoped state (policy hidden state, queues…). + + Action production + ----------------- + ``get_action(obs_frame)`` — return the next action tensor, or + ``None`` if none is available (e.g. async queue empty). Sync + backends always compute from ``obs_frame``; async backends ignore + it (they receive observations via ``notify_observation``). + + Optional hooks + -------------- + ``notify_observation`` / ``pause`` / ``resume`` have a no-op default + so rollout strategies can invoke them unconditionally. + """ + + @abc.abstractmethod + def start(self) -> None: + """Initialise the backend.""" + + @abc.abstractmethod + def stop(self) -> None: + """Tear the backend down.""" + + @abc.abstractmethod + def reset(self) -> None: + """Clear episode-scoped state.""" + + @abc.abstractmethod + def get_action(self, obs_frame: dict | None) -> torch.Tensor | None: + """Return the next action tensor, or ``None`` if unavailable.""" + + def notify_observation(self, obs: dict) -> None: # noqa: B027 + """Publish the latest processed observation. Default: no-op.""" + + def pause(self) -> None: # noqa: B027 + """Pause background inference. Default: no-op.""" + + def resume(self) -> None: # noqa: B027 + """Resume background inference. Default: no-op.""" + + @property + def ready(self) -> bool: + """True once the backend can produce actions (e.g. warmup done).""" + return True + + @property + def failed(self) -> bool: + """True if an unrecoverable error occurred in the backend.""" + return False diff --git a/src/lerobot/rollout/inference/factory.py b/src/lerobot/rollout/inference/factory.py new file mode 100644 index 000000000..e600bed63 --- /dev/null +++ b/src/lerobot/rollout/inference/factory.py @@ -0,0 +1,128 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Inference engine configs and factory. + +Selection is explicit via ``--inference.type=sync|rtc``. Adding a new +backend requires registering its config subclass and dispatching it in +:func:`create_inference_engine`. +""" + +from __future__ import annotations + +import abc +import logging +from dataclasses import dataclass, field +from threading import Event + +import draccus + +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.processor import PolicyProcessorPipeline + +from ..robot_wrapper import ThreadSafeRobot +from .base import InferenceEngine +from .rtc import RTCInferenceEngine +from .sync import SyncInferenceEngine + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Configs +# --------------------------------------------------------------------------- + + +@dataclass +class InferenceEngineConfig(draccus.ChoiceRegistry, abc.ABC): + """Abstract base for inference backend configuration. + + Use ``--inference.type=`` on the CLI to select a backend. + """ + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +@InferenceEngineConfig.register_subclass("sync") +@dataclass +class SyncInferenceConfig(InferenceEngineConfig): + """Inline synchronous inference (one policy call per control tick).""" + + +@InferenceEngineConfig.register_subclass("rtc") +@dataclass +class RTCInferenceConfig(InferenceEngineConfig): + """Real-Time Chunking: async policy inference in a background thread.""" + + # Eagerly constructed so draccus exposes nested fields directly on the CLI + # (e.g. ``--inference.rtc.execution_horizon=...``). + rtc: RTCConfig = field(default_factory=RTCConfig) + queue_threshold: int = 30 + + +# --------------------------------------------------------------------------- +# Factory +# --------------------------------------------------------------------------- + + +def create_inference_engine( + config: InferenceEngineConfig, + *, + policy: PreTrainedPolicy, + preprocessor: PolicyProcessorPipeline, + postprocessor: PolicyProcessorPipeline, + robot_wrapper: ThreadSafeRobot, + hw_features: dict, + dataset_features: dict, + ordered_action_keys: list[str], + task: str, + fps: float, + device: str | None, + use_torch_compile: bool = False, + compile_warmup_inferences: int = 2, + shutdown_event: Event | None = None, +) -> InferenceEngine: + """Instantiate the appropriate inference engine from a config object.""" + logger.info("Creating inference engine: %s", config.type) + if isinstance(config, SyncInferenceConfig): + return SyncInferenceEngine( + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + dataset_features=dataset_features, + ordered_action_keys=ordered_action_keys, + task=task, + device=device, + robot_type=robot_wrapper.robot_type, + ) + if isinstance(config, RTCInferenceConfig): + return RTCInferenceEngine( + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + robot_wrapper=robot_wrapper, + rtc_config=config.rtc, + hw_features=hw_features, + task=task, + fps=fps, + device=device, + use_torch_compile=use_torch_compile, + compile_warmup_inferences=compile_warmup_inferences, + rtc_queue_threshold=config.queue_threshold, + shutdown_event=shutdown_event, + ) + raise ValueError(f"Unknown inference engine type: {type(config).__name__}") diff --git a/src/lerobot/rollout/inference/rtc.py b/src/lerobot/rollout/inference/rtc.py new file mode 100644 index 000000000..0eef62cef --- /dev/null +++ b/src/lerobot/rollout/inference/rtc.py @@ -0,0 +1,360 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Real-Time Chunking inference engine. + +A background thread produces action chunks asynchronously via +:meth:`policy.predict_action_chunk`. The main control loop polls +``get_action`` for the next ready action; observations flow the other +way via ``notify_observation``. +""" + +from __future__ import annotations + +import logging +import math +import time +import traceback +from threading import Event, Lock, Thread +from typing import Any + +import torch + +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.rtc import ActionQueue, LatencyTracker, reanchor_relative_rtc_prefix +from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.policies.utils import prepare_observation_for_inference +from lerobot.processor import ( + NormalizerProcessorStep, + PolicyProcessorPipeline, + RelativeActionsProcessorStep, +) +from lerobot.utils.feature_utils import build_dataset_frame + +from ..robot_wrapper import ThreadSafeRobot +from .base import InferenceEngine + +logger = logging.getLogger(__name__) + +# How long the RTC loop sleeps when paused, idle, or backpressured by a full queue. +_RTC_IDLE_SLEEP_S: float = 0.01 +# Backoff between transient inference errors (per consecutive failure). +_RTC_ERROR_RETRY_DELAY_S: float = 0.5 +# Consecutive transient errors tolerated before giving up and propagating shutdown. +_RTC_MAX_CONSECUTIVE_ERRORS: int = 10 +# Hard timeout for joining the RTC thread on stop(). +_RTC_JOIN_TIMEOUT_S: float = 3.0 + + +# --------------------------------------------------------------------------- +# RTC helpers +# --------------------------------------------------------------------------- + + +def _normalize_prev_actions_length(prev_actions: torch.Tensor, target_steps: int) -> torch.Tensor: + """Pad or truncate RTC prefix actions to a fixed length for stable compiled inference.""" + if prev_actions.ndim != 2: + raise ValueError(f"Expected 2D [T, A] tensor, got shape={tuple(prev_actions.shape)}") + steps, action_dim = prev_actions.shape + if steps == target_steps: + return prev_actions + if steps > target_steps: + return prev_actions[:target_steps] + padded = torch.zeros((target_steps, action_dim), dtype=prev_actions.dtype, device=prev_actions.device) + padded[:steps] = prev_actions + return padded + + +# --------------------------------------------------------------------------- +# RTCInferenceEngine +# --------------------------------------------------------------------------- + + +class RTCInferenceEngine(InferenceEngine): + """Async RTC inference: a background thread produces action chunks. + + ``get_action`` pops the next action from the shared queue (or + returns ``None`` if the queue is empty). The main loop should call + ``notify_observation`` every tick and ``pause``/``resume`` around + human-intervention phases. + """ + + def __init__( + self, + policy: PreTrainedPolicy, + preprocessor: PolicyProcessorPipeline, + postprocessor: PolicyProcessorPipeline, + robot_wrapper: ThreadSafeRobot, + rtc_config: RTCConfig, + hw_features: dict, + task: str, + fps: float, + device: str | None, + use_torch_compile: bool = False, + compile_warmup_inferences: int = 2, + rtc_queue_threshold: int = 30, + shutdown_event: Event | None = None, + ) -> None: + self._policy = policy + self._preprocessor = preprocessor + self._postprocessor = postprocessor + self._robot = robot_wrapper + self._rtc_config = rtc_config + self._hw_features = hw_features + self._task = task + self._fps = fps + self._device = device or "cpu" + self._use_torch_compile = use_torch_compile + self._compile_warmup_inferences = compile_warmup_inferences + self._rtc_queue_threshold = rtc_queue_threshold + + self._action_queue: ActionQueue | None = None + self._obs_holder: dict[str, Any] = {} + self._obs_lock = Lock() + self._policy_active = Event() + self._compile_warmup_done = Event() + self._shutdown_event = Event() + self._rtc_error = Event() + self._global_shutdown_event = shutdown_event + self._rtc_thread: Thread | None = None + + if not self._use_torch_compile: + self._compile_warmup_done.set() + logger.info("RTCInferenceEngine initialized (torch.compile disabled, no warmup needed)") + else: + logger.info( + "RTCInferenceEngine initialized (torch.compile enabled, %d warmup inferences)", + compile_warmup_inferences, + ) + + # Processor introspection for relative-action re-anchoring. + self._relative_step = next( + (s for s in preprocessor.steps if isinstance(s, RelativeActionsProcessorStep) and s.enabled), + None, + ) + self._normalizer_step = next( + (s for s in preprocessor.steps if isinstance(s, NormalizerProcessorStep)), + None, + ) + if self._relative_step is not None: + if self._relative_step.action_names is None: + cfg_names = getattr(policy.config, "action_feature_names", None) + if cfg_names: + self._relative_step.action_names = list(cfg_names) + else: + self._relative_step.action_names = [ + k for k in robot_wrapper.action_features if k.endswith(".pos") + ] + logger.info("Relative actions enabled: RTC prefix will be re-anchored") + + # ------------------------------------------------------------------ + # Lifecycle + # ------------------------------------------------------------------ + + @property + def ready(self) -> bool: + """True once torch.compile warmup is complete (or immediately if compile is disabled).""" + return self._compile_warmup_done.is_set() + + @property + def failed(self) -> bool: + """True if the RTC background thread exited due to an unrecoverable error.""" + return self._rtc_error.is_set() + + @property + def action_queue(self) -> ActionQueue | None: + """The shared action queue between the RTC thread and the main loop.""" + return self._action_queue + + def start(self) -> None: + """Launch the RTC background thread.""" + self._action_queue = ActionQueue(self._rtc_config) + self._obs_holder = { + "obs": None, + "robot_type": self._robot.robot_type, + } + self._shutdown_event.clear() + self._rtc_thread = Thread( + target=self._rtc_loop, + daemon=True, + name="RTCInference", + ) + self._rtc_thread.start() + logger.info("RTC inference thread started") + + def stop(self) -> None: + """Signal the RTC thread to stop and wait for it.""" + logger.info("Stopping RTC inference thread...") + self._shutdown_event.set() + self._policy_active.clear() + if self._rtc_thread is not None and self._rtc_thread.is_alive(): + self._rtc_thread.join(timeout=_RTC_JOIN_TIMEOUT_S) + if self._rtc_thread.is_alive(): + logger.warning("RTC thread did not join within %.1fs", _RTC_JOIN_TIMEOUT_S) + else: + logger.info("RTC inference thread stopped") + self._rtc_thread = None + + def pause(self) -> None: + """Pause the RTC background thread.""" + logger.info("Pausing RTC inference thread") + self._policy_active.clear() + + def resume(self) -> None: + """Resume the RTC background thread.""" + logger.info("Resuming RTC inference thread") + self._policy_active.set() + + def reset(self) -> None: + """Reset the policy, processors, and action queue.""" + logger.info("Resetting RTC inference state (policy + processors + queue)") + self._policy.reset() + self._preprocessor.reset() + self._postprocessor.reset() + if self._action_queue is not None: + self._action_queue.clear() + + # ------------------------------------------------------------------ + # Action production (called from main thread) + # ------------------------------------------------------------------ + + def get_action(self, obs_frame: dict | None) -> torch.Tensor | None: + """Pop the next action from the RTC queue (ignores ``obs_frame``).""" + if self._action_queue is None: + return None + return self._action_queue.get() + + def notify_observation(self, obs: dict) -> None: + """Publish the latest observation for the RTC thread to consume.""" + with self._obs_lock: + self._obs_holder["obs"] = obs + + # ------------------------------------------------------------------ + # RTC: background inference thread + # ------------------------------------------------------------------ + + def _rtc_loop(self) -> None: + """Background thread that generates action chunks via RTC.""" + try: + latency_tracker = LatencyTracker() + time_per_chunk = 1.0 / self._fps + policy_device = torch.device(self._device) + + warmup_required = max(1, self._compile_warmup_inferences) if self._use_torch_compile else 0 + inference_count = 0 + consecutive_errors = 0 + + while not self._shutdown_event.is_set(): + if not self._policy_active.is_set(): + time.sleep(_RTC_IDLE_SLEEP_S) + continue + + queue = self._action_queue + with self._obs_lock: + obs = self._obs_holder.get("obs") + if queue is None or obs is None: + time.sleep(_RTC_IDLE_SLEEP_S) + continue + + if queue.qsize() <= self._rtc_queue_threshold: + try: + current_time = time.perf_counter() + idx_before = queue.get_action_index() + prev_actions = queue.get_left_over() + + latency = latency_tracker.max() + delay = math.ceil(latency / time_per_chunk) if latency else 0 + + obs_batch = build_dataset_frame(self._hw_features, obs, prefix="observation") + obs_batch = prepare_observation_for_inference( + obs_batch, policy_device, self._task, self._robot.robot_type + ) + obs_batch["task"] = [self._task] + + preprocessed = self._preprocessor(obs_batch) + + if prev_actions is not None and self._relative_step is not None: + # Rebase against the raw cached state so the leftover tail stays in + # the training-time coordinate frame. + raw_state = self._relative_step.get_cached_state() + if raw_state is not None: + prev_abs = queue.get_processed_left_over() + if prev_abs is not None and prev_abs.numel() > 0: + prev_actions = reanchor_relative_rtc_prefix( + prev_actions_absolute=prev_abs, + current_state=raw_state, + relative_step=self._relative_step, + normalizer_step=self._normalizer_step, + policy_device=policy_device, + ) + + if prev_actions is not None: + prev_actions = _normalize_prev_actions_length( + prev_actions, target_steps=self._rtc_config.execution_horizon + ) + + actions = self._policy.predict_action_chunk( + preprocessed, inference_delay=delay, prev_chunk_left_over=prev_actions + ) + + original = actions.squeeze(0).clone() + processed = self._postprocessor(actions).squeeze(0) + new_latency = time.perf_counter() - current_time + new_delay = math.ceil(new_latency / time_per_chunk) + + inference_count += 1 + consecutive_errors = 0 + is_warmup = self._use_torch_compile and inference_count <= warmup_required + if is_warmup: + latency_tracker.reset() + else: + latency_tracker.add(new_latency) + + queue.merge(original, processed, new_delay, idx_before) + + if ( + is_warmup + and inference_count >= warmup_required + and not self._compile_warmup_done.is_set() + ): + self._compile_warmup_done.set() + logger.info("Compile warmup complete (%d inferences)", inference_count) + + logger.debug("RTC inference latency=%.2fs, queue=%d", new_latency, queue.qsize()) + + except Exception as e: + consecutive_errors += 1 + logger.error( + "RTC inference error (%d/%d): %s", + consecutive_errors, + _RTC_MAX_CONSECUTIVE_ERRORS, + e, + ) + logger.debug(traceback.format_exc()) + if consecutive_errors >= _RTC_MAX_CONSECUTIVE_ERRORS: + # Persistent failure: stop retrying and propagate shutdown. + raise + time.sleep(_RTC_ERROR_RETRY_DELAY_S) + else: + time.sleep(_RTC_IDLE_SLEEP_S) + + except Exception as e: + logger.error("Fatal error in RTC thread: %s", e) + logger.error(traceback.format_exc()) + self._rtc_error.set() + # Unblock any warmup waiters so the main loop doesn't spin forever + self._compile_warmup_done.set() + # Signal the top-level shutdown so strategies exit their control loops + if self._global_shutdown_event is not None: + self._global_shutdown_event.set() diff --git a/src/lerobot/rollout/inference/sync.py b/src/lerobot/rollout/inference/sync.py new file mode 100644 index 000000000..2bb05b6ab --- /dev/null +++ b/src/lerobot/rollout/inference/sync.py @@ -0,0 +1,122 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Synchronous inference engine: inline policy call per control tick.""" + +from __future__ import annotations + +import logging +from contextlib import nullcontext +from copy import copy + +import torch + +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.utils import make_robot_action, prepare_observation_for_inference +from lerobot.processor import PolicyProcessorPipeline + +from .base import InferenceEngine + +logger = logging.getLogger(__name__) + + +# TODO(Steven): support relative-action policies. The per-tick flow refreshes +# ``RelativeActionsProcessorStep._last_state`` every call, so cached chunk +# actions popped on later ticks get reanchored to the *current* robot state and +# absolute targets drift through the chunk. Relative-action policies are +# rejected at context-build time today; RTC postprocesses the whole chunk and +# is unaffected. +# +# Candidate fix: drive the policy via ``predict_action_chunk`` and serve a +# local FIFO of postprocessed actions. Eliminates drift by construction and +# saves per-tick pre/post work, but bypasses ``select_action`` — needs +# fallbacks for SAC (raises), ACT temporal ensembling (ensembler lives in +# ``select_action``), and Diffusion-family (obs-history queues populated as a +# side effect of ``select_action``). + + +class SyncInferenceEngine(InferenceEngine): + """Inline synchronous inference: compute one action per call. + + ``get_action`` runs the full policy pipeline (pre/post-processor + + ``select_action``) on the given observation frame and returns a + CPU action tensor reordered to match the dataset action keys. + """ + + def __init__( + self, + policy: PreTrainedPolicy, + preprocessor: PolicyProcessorPipeline, + postprocessor: PolicyProcessorPipeline, + dataset_features: dict, + ordered_action_keys: list[str], + task: str, + device: str | None, + robot_type: str, + ) -> None: + self._policy = policy + self._preprocessor = preprocessor + self._postprocessor = postprocessor + self._dataset_features = dataset_features + self._ordered_action_keys = ordered_action_keys + self._task = task + self._device = torch.device(device or "cpu") + self._robot_type = robot_type + logger.info( + "SyncInferenceEngine initialized (device=%s, action_keys=%d)", + self._device, + len(ordered_action_keys), + ) + + def start(self) -> None: + """No background resources to start.""" + logger.info("SyncInferenceEngine started (inline mode — no background thread)") + + def stop(self) -> None: + """No background resources to stop.""" + logger.info("SyncInferenceEngine stopped") + + def reset(self) -> None: + """Reset the policy and pre/post-processors.""" + logger.info("Resetting sync inference state (policy + processors)") + self._policy.reset() + self._preprocessor.reset() + self._postprocessor.reset() + + def get_action(self, obs_frame: dict | None) -> torch.Tensor | None: + """Run the full inference pipeline on ``obs_frame`` and return an action tensor.""" + if obs_frame is None: + return None + # Shallow copy is intentional: the caller (`send_next_action`) builds + # ``obs_frame`` fresh per tick via ``build_dataset_frame``, so the + # tensor/array values are not shared with any other reader. + observation = copy(obs_frame) + autocast_ctx = ( + torch.autocast(device_type=self._device.type) + if self._device.type == "cuda" and self._policy.config.use_amp + else nullcontext() + ) + with torch.inference_mode(), autocast_ctx: + observation = prepare_observation_for_inference( + observation, self._device, self._task, self._robot_type + ) + observation = self._preprocessor(observation) + action = self._policy.select_action(observation) + action = self._postprocessor(action) + action_tensor = action.squeeze(0).cpu() + + # Reorder to match dataset action ordering so the caller can treat + # the returned tensor uniformly across backends. + action_dict = make_robot_action(action_tensor, self._dataset_features) + return torch.tensor([action_dict[k] for k in self._ordered_action_keys]) diff --git a/src/lerobot/rollout/ring_buffer.py b/src/lerobot/rollout/ring_buffer.py new file mode 100644 index 000000000..2c0a06301 --- /dev/null +++ b/src/lerobot/rollout/ring_buffer.py @@ -0,0 +1,112 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Memory-bounded ring buffer for the Highlight Reel rollout strategy.""" + +from __future__ import annotations + +from collections import deque + +import numpy as np +import torch + + +class RolloutRingBuffer: + """Fixed-capacity circular buffer for observation/action frames. + + Stores the last *N* seconds of telemetry in memory, bounded by both + time (``max_frames``) and memory (``max_memory_bytes``). When either + limit is reached the oldest frames are evicted. + + .. note:: + This class is **single-threaded**. ``append``/``drain``/``clear`` + must all be called from the same thread (the rollout main loop). + Concurrent access from a background thread will corrupt + ``_current_bytes`` accounting. + + Parameters + ---------- + max_seconds: + Maximum duration of buffered telemetry. + max_memory_mb: + Hard memory cap in MiB. Frames are evicted when the estimated + total size exceeds this. + fps: + Frames per second — used to convert ``max_seconds`` to a frame + count. + """ + + def __init__(self, max_seconds: float = 30.0, max_memory_mb: int = 2048, fps: float = 30.0) -> None: + self._max_frames = int(max_seconds * fps) + self._max_bytes = int(max_memory_mb * 1024 * 1024) + self._buffer: deque[dict] = deque(maxlen=self._max_frames) + self._current_bytes: int = 0 + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + + def append(self, frame: dict) -> None: + """Add *frame* to the buffer, evicting the oldest if at capacity.""" + frame_bytes = _estimate_frame_bytes(frame) + + # Evict oldest frames until we are under the memory cap + while self._current_bytes + frame_bytes > self._max_bytes and self._buffer: + evicted = self._buffer.popleft() + self._current_bytes -= _estimate_frame_bytes(evicted) + + self._buffer.append(frame) + self._current_bytes += frame_bytes + + def drain(self) -> list[dict]: + """Return all buffered frames and clear the buffer.""" + frames = list(self._buffer) + self._buffer.clear() + self._current_bytes = 0 + return frames + + def clear(self) -> None: + """Discard all buffered frames.""" + self._buffer.clear() + self._current_bytes = 0 + + def __len__(self) -> int: + return len(self._buffer) + + @property + def estimated_bytes(self) -> int: + """Estimated total byte size of all buffered frames.""" + return self._current_bytes + + +# ------------------------------------------------------------------ +# Helpers +# ------------------------------------------------------------------ + + +def _estimate_frame_bytes(frame: dict) -> int: + """Rough byte estimate for a single frame dictionary.""" + total = 0 + for v in frame.values(): + if isinstance(v, torch.Tensor): + # ``torch.Tensor`` has no ``nbytes``; compute it explicitly so the + # memory cap is honoured even when frames hold unconverted tensors. + total += v.nelement() * v.element_size() + elif isinstance(v, np.ndarray) or hasattr(v, "nbytes"): + total += v.nbytes + elif isinstance(v, (int, float)): + total += 8 + elif isinstance(v, (str, bytes)): + total += len(v) + return max(total, 1) # avoid zero-size frames diff --git a/src/lerobot/rollout/robot_wrapper.py b/src/lerobot/rollout/robot_wrapper.py new file mode 100644 index 000000000..44f744812 --- /dev/null +++ b/src/lerobot/rollout/robot_wrapper.py @@ -0,0 +1,79 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Thread-safe robot wrapper for concurrent observation/action access.""" + +from __future__ import annotations + +from threading import Lock +from typing import Any + +from lerobot.robots import Robot + + +class ThreadSafeRobot: + """Lock-protected wrapper around a :class:`Robot` for use with background threads. + + When RTC inference runs in a background thread while the main loop + executes actions, both threads may access the robot concurrently. + This wrapper serialises ``get_observation`` and ``send_action`` calls. + + Read-only properties are proxied without the lock since they don't + mutate hardware state. + """ + + def __init__(self, robot: Robot) -> None: + self._robot = robot + self._lock = Lock() + + # -- Lock-protected I/O -------------------------------------------------- + + def get_observation(self) -> dict[str, Any]: + with self._lock: + return self._robot.get_observation() + + def send_action(self, action: dict[str, Any] | Any) -> Any: + with self._lock: + return self._robot.send_action(action) + + # -- Read-only proxies (no lock needed) ----------------------------------- + + @property + def observation_features(self) -> dict: + return self._robot.observation_features + + @property + def action_features(self) -> dict: + return self._robot.action_features + + @property + def name(self) -> str: + return self._robot.name + + @property + def robot_type(self) -> str: + return self._robot.robot_type + + @property + def cameras(self): + return getattr(self._robot, "cameras", {}) + + @property + def is_connected(self) -> bool: + return self._robot.is_connected + + @property + def inner(self) -> Robot: + """Access the underlying robot (e.g. for connect/disconnect).""" + return self._robot diff --git a/src/lerobot/rollout/strategies/__init__.py b/src/lerobot/rollout/strategies/__init__.py new file mode 100644 index 000000000..554327073 --- /dev/null +++ b/src/lerobot/rollout/strategies/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rollout strategies — public API re-exports.""" + +from .base import BaseStrategy +from .core import RolloutStrategy, estimate_max_episode_seconds, safe_push_to_hub, send_next_action +from .dagger import DAggerEvents, DAggerPhase, DAggerStrategy +from .factory import create_strategy +from .highlight import HighlightStrategy +from .sentry import SentryStrategy + +__all__ = [ + "BaseStrategy", + "DAggerEvents", + "DAggerPhase", + "DAggerStrategy", + "HighlightStrategy", + "RolloutStrategy", + "SentryStrategy", + "create_strategy", + "estimate_max_episode_seconds", + "safe_push_to_hub", + "send_next_action", +] diff --git a/src/lerobot/rollout/strategies/base.py b/src/lerobot/rollout/strategies/base.py new file mode 100644 index 000000000..e47b65209 --- /dev/null +++ b/src/lerobot/rollout/strategies/base.py @@ -0,0 +1,85 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base rollout strategy: autonomous policy execution with no data recording.""" + +from __future__ import annotations + +import logging +import time + +from lerobot.utils.robot_utils import precise_sleep + +from ..context import RolloutContext +from .core import RolloutStrategy, send_next_action + +logger = logging.getLogger(__name__) + + +class BaseStrategy(RolloutStrategy): + """Autonomous policy rollout with no data recording. + + All actions flow through the ``robot_action_processor`` pipeline + before reaching the robot. + """ + + def setup(self, ctx: RolloutContext) -> None: + """Initialise the inference engine.""" + self._init_engine(ctx) + logger.info("Base strategy ready") + + def run(self, ctx: RolloutContext) -> None: + """Run the autonomous control loop until shutdown or duration expires.""" + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + interpolator = self._interpolator + + control_interval = interpolator.get_control_interval(cfg.fps) + + start_time = time.perf_counter() + engine.resume() + logger.info("Base strategy control loop started") + + while not ctx.runtime.shutdown_event.is_set(): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + obs = robot.get_observation() + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + def teardown(self, ctx: RolloutContext) -> None: + """Disconnect hardware and stop inference.""" + self._teardown_hardware( + ctx.hardware, + return_to_initial_position=ctx.runtime.cfg.return_to_initial_position, + ) + logger.info("Base strategy teardown complete") diff --git a/src/lerobot/rollout/strategies/core.py b/src/lerobot/rollout/strategies/core.py new file mode 100644 index 000000000..9c897522f --- /dev/null +++ b/src/lerobot/rollout/strategies/core.py @@ -0,0 +1,304 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rollout strategy ABC and shared action-dispatch helper.""" + +from __future__ import annotations + +import abc +import logging +import time +from typing import TYPE_CHECKING + +from lerobot.datasets.utils import DEFAULT_VIDEO_FILE_SIZE_IN_MB +from lerobot.utils.action_interpolator import ActionInterpolator +from lerobot.utils.constants import OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame +from lerobot.utils.robot_utils import precise_sleep +from lerobot.utils.visualization_utils import log_rerun_data + +from ..inference import InferenceEngine + +if TYPE_CHECKING: + from ..configs import RolloutStrategyConfig + from ..context import HardwareContext, ProcessorContext, RolloutContext, RuntimeContext + +logger = logging.getLogger(__name__) + + +class RolloutStrategy(abc.ABC): + """Abstract base for rollout execution strategies. + + Each concrete strategy implements a self-contained control loop with + its own recording/interaction semantics. Strategies are mutually + exclusive — only one runs per session. + """ + + def __init__(self, config: RolloutStrategyConfig) -> None: + self.config = config + self._engine: InferenceEngine | None = None + self._interpolator: ActionInterpolator | None = None + self._warmup_flushed: bool = False + self._cached_obs_processed: dict | None = None + + def _init_engine(self, ctx: RolloutContext) -> None: + """Attach the inference engine and action interpolator, then start the backend. + + Creates an :class:`ActionInterpolator` from the config's + ``interpolation_multiplier`` and starts the inference engine. + Call this from ``setup()`` so strategies share identical + initialisation without duplicating code. + """ + self._interpolator = ActionInterpolator(multiplier=ctx.runtime.cfg.interpolation_multiplier) + self._engine = ctx.policy.inference + logger.info("Starting inference engine...") + self._engine.reset() + self._engine.start() + self._warmup_flushed = False + self._cached_obs_processed = None + logger.info("Inference engine started") + + def _process_observation_and_notify(self, processors: ProcessorContext, obs_raw: dict) -> dict: + """Run the observation processor and notify the engine — throttled to policy ticks. + + Callers are responsible for calling ``robot.get_observation()`` every loop + iteration so ``obs_raw`` stays fresh for the action post-processor. This + helper gates only the comparatively expensive bits — the processor pipeline + and ``engine.notify_observation`` — to fire when the interpolator signals + it needs a new action (once per ``interpolation_multiplier`` ticks). On + interpolated ticks the cached ``obs_processed`` is reused. + + With ``interpolation_multiplier == 1`` this is equivalent to the unthrottled + path: ``needs_new_action()`` is True every tick. + + The cache is implicitly invalidated whenever ``interpolator.reset()`` is + called (warmup completion, DAgger phase transitions back to AUTONOMOUS), + because reset makes ``needs_new_action()`` return True on the next call. + """ + if self._cached_obs_processed is None or self._interpolator.needs_new_action(): + obs_processed = processors.robot_observation_processor(obs_raw) + self._engine.notify_observation(obs_processed) + self._cached_obs_processed = obs_processed + return self._cached_obs_processed + + def _handle_warmup(self, use_torch_compile: bool, loop_start: float, control_interval: float) -> bool: + """Handle torch.compile warmup phase. + + Returns ``True`` if the caller should ``continue`` (still warming + up). On the first post-warmup iteration the engine and + interpolator are reset so stale warmup state is discarded. + """ + engine = self._engine + interpolator = self._interpolator + if not use_torch_compile: + return False + if not engine.ready: + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + return True + if not self._warmup_flushed: + logger.info("Warmup complete — flushing stale state and resuming engine") + engine.reset() + interpolator.reset() + self._warmup_flushed = True + engine.resume() + return False + + def _teardown_hardware(self, hw: HardwareContext, return_to_initial_position: bool = True) -> None: + """Stop the inference engine, optionally return robot to initial position, and disconnect hardware.""" + if self._engine is not None: + logger.info("Stopping inference engine...") + self._engine.stop() + robot = hw.robot_wrapper.inner + if robot.is_connected: + if return_to_initial_position and hw.initial_position: + logger.info("Returning robot to initial position before shutdown...") + self._return_to_initial_position(hw) + elif not return_to_initial_position: + logger.info( + "Skipping return-to-initial-position (disabled by config); leaving robot in final pose." + ) + logger.info("Disconnecting robot...") + robot.disconnect() + teleop = hw.teleop + if teleop is not None and teleop.is_connected: + logger.info("Disconnecting teleoperator...") + teleop.disconnect() + + @staticmethod + def _return_to_initial_position(hw: HardwareContext, duration_s: float = 3.0, fps: int = 50) -> None: + """Smoothly interpolate the robot back to its initial position.""" + robot = hw.robot_wrapper + target = hw.initial_position + try: + current_obs = robot.get_observation() + current_pos = {k: v for k, v in current_obs.items() if k in target} + steps = max(int(duration_s * fps), 1) + for step in range(1, steps + 1): + t = step / steps + interp = {} + for k in current_pos: + interp[k] = current_pos[k] * (1 - t) + target[k] * t + robot.send_action(interp) + precise_sleep(1 / fps) + except Exception as e: + logger.warning("Could not return to initial position: %s", e) + + @staticmethod + def _log_telemetry( + obs_processed: dict | None, + action_dict: dict | None, + runtime_ctx: RuntimeContext, + ) -> None: + """Log observation/action telemetry to Rerun if display_data is enabled.""" + cfg = runtime_ctx.cfg + if not cfg.display_data: + return + log_rerun_data( + observation=obs_processed, + action=action_dict, + compress_images=cfg.display_compressed_images, + ) + + @abc.abstractmethod + def setup(self, ctx: RolloutContext) -> None: + """Strategy-specific initialisation (keyboard listeners, buffers, etc.).""" + + @abc.abstractmethod + def run(self, ctx: RolloutContext) -> None: + """Main rollout loop. Returns when shutdown is requested or duration expires.""" + + @abc.abstractmethod + def teardown(self, ctx: RolloutContext) -> None: + """Cleanup: save dataset, stop threads, disconnect hardware.""" + + +# --------------------------------------------------------------------------- +# Shared helpers +# --------------------------------------------------------------------------- + + +def safe_push_to_hub(dataset, tags=None, private=False) -> bool: + """Push dataset to hub, skipping if no episodes have been saved. + + Returns ``True`` if the push was attempted, ``False`` if skipped. + """ + if dataset.num_episodes == 0: + logger.warning("No episodes saved — skipping push to hub") + return False + dataset.push_to_hub(tags=tags, private=private) + return True + + +def estimate_max_episode_seconds( + dataset_features: dict, + fps: float, + target_size_mb: float = DEFAULT_VIDEO_FILE_SIZE_IN_MB, +) -> float: + """Conservatively estimate how many seconds of video will exceed *target_size_mb*. + + Each camera produces its own video file, so the episode duration is + driven by the **slowest** camera to fill ``target_size_mb`` — i.e. + the one with the fewest pixels per frame (lowest bitrate). + + Uses a deliberately **low** bits-per-pixel estimate so the computed + duration is *longer* than reality. By the time the timer fires the + actual video file is guaranteed to have crossed the target size, + which aligns episode boundaries with the dataset's video-file + chunking — each ``push_to_hub`` uploads complete files rather than + re-uploading a still-growing one. + + The estimate ignores codec-specific settings (CRF, preset) on purpose: + we only need a rough lower bound on bitrate, not a precise prediction. + + Falls back to 300 s (5 min) when no video features are present. + """ + # 0.1 bits-per-pixel is a *low* estimate for CRF-30 streaming video of + # robot footage (real-world is typically 0.1 – 0.3 bpp). Under- + # estimating the bitrate over-estimates the time → the episode will be + # *larger* than target_size_mb when we save, which is what we want. + conservative_bpp = 0.1 + + # Collect per-camera pixel counts — each camera has its own video file. + camera_pixels = [] + for feat in dataset_features.values(): + if feat.get("dtype") == "video": + shape = feat.get("shape", ()) + + # (H, W, C) — bits-per-pixel is a per-spatial-pixel metric, + # so we exclude the channel dimension from the count. + if len(shape) == 3: + pixels = shape[0] * shape[1] + camera_pixels.append(pixels) + else: + raise ValueError(f"Unexpected video feature shape: {shape}") + + if not camera_pixels: + return 300.0 + + # Use the smallest camera: it produces the lowest bitrate and therefore + # takes the longest to reach the target — the conservative choice. + min_pixels = min(camera_pixels) + bits_per_frame = min_pixels * conservative_bpp + bytes_per_second = (bits_per_frame * fps) / 8 + + # Guard against division by zero just in case + if bytes_per_second <= 0: + return 300.0 + + return (target_size_mb * 1024 * 1024) / bytes_per_second + + +# --------------------------------------------------------------------------- +# Shared action-dispatch helper +# --------------------------------------------------------------------------- + + +def send_next_action( + obs_processed: dict, + obs_raw: dict, + ctx: RolloutContext, + interpolator: ActionInterpolator, +) -> dict | None: + """Dispatch the next action to the robot. + + Pulls the next action tensor from the inference engine, feeds the + interpolator, and sends the interpolated action through the + ``robot_action_processor`` to the robot. Works identically for + sync and async backends — the rollout strategy never needs to branch. + + Returns the action dict that was sent, or ``None`` if no action was + ready (e.g. empty async queue, interpolator not yet primed). + """ + engine = ctx.policy.inference + features = ctx.data.dataset_features + ordered_keys = ctx.data.ordered_action_keys + + if interpolator.needs_new_action(): + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_tensor = engine.get_action(obs_frame) + if action_tensor is not None: + interpolator.add(action_tensor.cpu()) + + interp = interpolator.get() + if interp is None: + return None + + if len(interp) != len(ordered_keys): + raise ValueError(f"Interpolated tensor length ({len(interp)}) != action keys ({len(ordered_keys)})") + action_dict = {k: interp[i].item() for i, k in enumerate(ordered_keys)} + processed = ctx.processors.robot_action_processor((action_dict, obs_raw)) + ctx.hardware.robot_wrapper.send_action(processed) + return action_dict diff --git a/src/lerobot/rollout/strategies/dagger.py b/src/lerobot/rollout/strategies/dagger.py new file mode 100644 index 000000000..da4b463fc --- /dev/null +++ b/src/lerobot/rollout/strategies/dagger.py @@ -0,0 +1,767 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DAgger rollout strategy: Human-in-the-Loop data collection. + +Implements the RaC paradigm (Recovery and Correction) for interactive +imitation learning. Alternates between autonomous policy execution and +human intervention via teleoperator. + +Input is controlled via either a keyboard or foot pedal, selected by +the ``input_device`` config field. Each device exposes three actions: + + 1. **pause_resume** — Toggle policy execution (AUTONOMOUS <-> PAUSED). + 2. **correction** — Toggle correction recording (PAUSED <-> CORRECTING). + 3. **upload** — Push dataset to hub on demand (corrections-only mode). + ESC (keyboard only) — Stop session. + +Recording modes: + ``record_autonomous=True``: Sentry-like continuous recording with + time-based episode rotation. Both autonomous and correction + frames are recorded; corrections tagged ``intervention=True``. + ``record_autonomous=False``: Only correction windows are recorded. + Each correction (start to stop) becomes one episode. + +Teleoperator expectations: + The user is responsible for keeping the leader arm aligned with the + follower arm at the moment a correction begins. Programmatic motor + handover (``enable_torque`` / ``disable_torque`` / ``write_goal_positions``) + is intentionally not invoked here — see the TODO in + :func:`DAggerStrategy._apply_transition` for the open design decision. +""" + +from __future__ import annotations + +import contextlib +import enum +import logging +import os +import sys +import time +from concurrent.futures import Future, ThreadPoolExecutor +from threading import Event, Lock +from typing import Any + +import numpy as np + +from lerobot.common.control_utils import is_headless +from lerobot.datasets import VideoEncodingManager +from lerobot.datasets.utils import DEFAULT_VIDEO_FILE_SIZE_IN_MB +from lerobot.teleoperators import Teleoperator +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame +from lerobot.utils.import_utils import _pynput_available +from lerobot.utils.pedal import start_pedal_listener +from lerobot.utils.robot_utils import precise_sleep +from lerobot.utils.utils import log_say + +from ..configs import DAggerKeyboardConfig, DAggerPedalConfig, DAggerStrategyConfig +from ..context import RolloutContext +from ..robot_wrapper import ThreadSafeRobot +from .core import RolloutStrategy, estimate_max_episode_seconds, safe_push_to_hub, send_next_action + +PYNPUT_AVAILABLE = _pynput_available +keyboard = None +if PYNPUT_AVAILABLE: + try: + if ("DISPLAY" not in os.environ) and ("linux" in sys.platform): + logging.info("No DISPLAY set. Skipping pynput import.") + PYNPUT_AVAILABLE = False + else: + from pynput import keyboard + except Exception as e: + PYNPUT_AVAILABLE = False + logging.info(f"Could not import pynput: {e}") + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# DAgger state machine +# --------------------------------------------------------------------------- + + +class DAggerPhase(enum.Enum): + """Observable phases of a DAgger episode.""" + + AUTONOMOUS = "autonomous" # Policy driving + PAUSED = "paused" # Engine paused, teleop aligned, awaiting input + CORRECTING = "correcting" # Human driving via teleop, recording interventions + + +# Valid (current_phase, event) -> next_phase +_DAGGER_TRANSITIONS: dict[tuple[DAggerPhase, str], DAggerPhase] = { + (DAggerPhase.AUTONOMOUS, "pause_resume"): DAggerPhase.PAUSED, + (DAggerPhase.PAUSED, "pause_resume"): DAggerPhase.AUTONOMOUS, + (DAggerPhase.PAUSED, "correction"): DAggerPhase.CORRECTING, + (DAggerPhase.CORRECTING, "correction"): DAggerPhase.PAUSED, +} + + +class DAggerEvents: + """Thread-safe container for DAgger input device events. + + The keyboard/pedal threads write transition requests; the main loop + consumes them. + """ + + def __init__(self) -> None: + self._lock = Lock() + self._phase = DAggerPhase.AUTONOMOUS + self._pending_transition: str | None = None + + # Session-level flags + self.stop_recording = Event() + self.upload_requested = Event() + + # -- Thread-safe phase access ------------------------------------------ + + @property + def phase(self) -> DAggerPhase: + """Current phase of the DAgger state machine.""" + with self._lock: + return self._phase + + @phase.setter + def phase(self, value: DAggerPhase) -> None: + with self._lock: + self._phase = value + + def request_transition(self, event: str) -> None: + """Request a phase transition (called from keyboard/pedal threads). + + Only enqueues the request if it corresponds to a valid transition + from the current phase, preventing impossible state changes. + """ + with self._lock: + if (self._phase, event) in _DAGGER_TRANSITIONS: + self._pending_transition = event + + def consume_transition(self) -> tuple[DAggerPhase, DAggerPhase] | None: + """Consume a pending transition (called from main loop).""" + with self._lock: + if self._pending_transition is None: + return None + key = (self._phase, self._pending_transition) + self._pending_transition = None + new_phase = _DAGGER_TRANSITIONS.get(key) + if new_phase is None: + return None + old_phase = self._phase + self._phase = new_phase + return old_phase, new_phase + + def reset(self) -> None: + """Reset all transient state for a fresh session.""" + with self._lock: + self._phase = DAggerPhase.AUTONOMOUS + self._pending_transition = None + self.upload_requested.clear() + + +# --------------------------------------------------------------------------- +# Teleoperator helpers +# --------------------------------------------------------------------------- + + +# TODO(Steven): re-enable programmatic teleop alignment once we decide whether +# to enforce motor-control methods on every Teleoperator. Until then the user +# is responsible for moving the leader arm to the follower's pose at the moment +# a correction begins. +def _teleop_smooth_move_to( + teleop: Teleoperator, target_pos: dict, duration_s: float = 2.0, fps: int = 50 +) -> None: + """Smoothly move teleop to target position via linear interpolation. + + Requires the teleoperator to support motor control methods + (``enable_torque``, ``write_goal_positions``, ``get_action``). + """ + teleop.enable_torque() + current = teleop.get_action() + steps = max(int(duration_s * fps), 1) + + for step in range(steps + 1): + t = step / steps + interp = {} + for k in current: + if k in target_pos: + interp[k] = current[k] * (1 - t) + target_pos[k] * t + else: + interp[k] = current[k] + teleop.write_goal_positions(interp) + time.sleep(1 / fps) + + +# --------------------------------------------------------------------------- +# Input device handlers +# --------------------------------------------------------------------------- + + +def _init_dagger_keyboard(events: DAggerEvents, cfg: DAggerKeyboardConfig): + """Initialise keyboard listener with DAgger 3-key controls. + + Returns the pynput Listener (or ``None`` in headless mode or when + pynput is unavailable). + """ + if not PYNPUT_AVAILABLE or is_headless(): + logger.warning("Headless environment or pynput unavailable — keyboard controls disabled") + return None + + # Map config key names to pynput Key objects for special keys + special_keys = { + "space": keyboard.Key.space, + "tab": keyboard.Key.tab, + "enter": keyboard.Key.enter, + } + + def _resolve_key(key) -> str | None: + """Resolve a pynput key event to a config-comparable string.""" + if key == keyboard.Key.esc: + return "esc" + for name, pynput_key in special_keys.items(): + if key == pynput_key: + return name + if hasattr(key, "char") and key.char: + return key.char + return None + + # Build mapping: resolved key string -> DAgger event name + key_to_event = { + cfg.pause_resume: "pause_resume", + cfg.correction: "correction", + } + + def on_press(key): + try: + resolved = _resolve_key(key) + if resolved is None: + return + if resolved == "esc": + logger.info("Stop recording...") + events.stop_recording.set() + return + if resolved in key_to_event: + events.request_transition(key_to_event[resolved]) + if resolved == cfg.upload: + events.upload_requested.set() + except Exception as e: + logger.debug("Key error: %s", e) + + listener = keyboard.Listener(on_press=on_press) + listener.start() + logger.info( + "DAgger keyboard listener started (pause_resume='%s', correction='%s', upload='%s', ESC=stop)", + cfg.pause_resume, + cfg.correction, + cfg.upload, + ) + return listener + + +def _init_dagger_pedal(events: DAggerEvents, cfg: DAggerPedalConfig): + """Initialise foot pedal listener with DAgger 3-pedal controls. + + Returns the pedal listener thread (or ``None`` if evdev is unavailable). + """ + code_to_event = { + cfg.pause_resume: "pause_resume", + cfg.correction: "correction", + } + + def on_press(code: str) -> None: + if code in code_to_event: + events.request_transition(code_to_event[code]) + if code == cfg.upload: + events.upload_requested.set() + + logger.info("Initializing DAgger foot pedal listener (device=%s)", cfg.device_path) + return start_pedal_listener(on_press, device_path=cfg.device_path) + + +# --------------------------------------------------------------------------- +# DAgger Strategy +# --------------------------------------------------------------------------- + + +class DAggerStrategy(RolloutStrategy): + """Human-in-the-Loop data collection with intervention tagging. + + State machine:: + + AUTONOMOUS --(key1)--> PAUSED --(key2)--> CORRECTING --(key2)--> PAUSED + --(key1)--> AUTONOMOUS + + Recording modes: + ``record_autonomous=True``: Sentry-like continuous recording with + time-based episode rotation. Intervention frames tagged True. + ``record_autonomous=False``: Only correction windows recorded. + Each correction = one episode. Upload on demand via key3. + """ + + config: DAggerStrategyConfig + + def __init__(self, config: DAggerStrategyConfig): + super().__init__(config) + self._listener = None + self._pedal_thread = None + self._events = DAggerEvents() + self._push_executor: ThreadPoolExecutor | None = None + self._pending_push: Future | None = None + self._needs_push = Event() + self._episode_lock = Lock() + + def setup(self, ctx: RolloutContext) -> None: + """Initialise the inference engine and input device listener.""" + self._init_engine(ctx) + self._push_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="dagger-push") + target_mb = self.config.target_video_file_size_mb or DEFAULT_VIDEO_FILE_SIZE_IN_MB + self._episode_duration_s = estimate_max_episode_seconds( + ctx.data.dataset_features, ctx.runtime.cfg.fps, target_size_mb=target_mb + ) + + if self.config.input_device == "keyboard": + self._listener = _init_dagger_keyboard(self._events, self.config.keyboard) + else: + self._pedal_thread = _init_dagger_pedal(self._events, self.config.pedal) + + record_mode = "all frames (sentry-like)" if self.config.record_autonomous else "corrections only" + logger.info( + "DAgger strategy ready (input=%s, episodes=%d, record=%s, episode_duration=%.0fs)", + self.config.input_device, + self.config.num_episodes, + record_mode, + self._episode_duration_s, + ) + + def run(self, ctx: RolloutContext) -> None: + """Run DAgger episodes with human-in-the-loop intervention.""" + if self.config.record_autonomous: + self._run_continuous(ctx) + else: + self._run_corrections_only(ctx) + + def teardown(self, ctx: RolloutContext) -> None: + """Stop listeners, finalise the dataset, and disconnect hardware.""" + play_sounds = ctx.runtime.cfg.play_sounds + logger.info("Stopping DAgger recording") + log_say("Stopping DAgger recording", play_sounds) + + if self._listener is not None and not is_headless(): + logger.info("Stopping keyboard listener") + self._listener.stop() + + # Flush any queued/running push cleanly + if self._push_executor is not None: + logger.info("Shutting down push executor (waiting for pending pushes)...") + self._push_executor.shutdown(wait=True) + self._push_executor = None + + if ctx.data.dataset is not None: + logger.info("Finalizing dataset...") + ctx.data.dataset.finalize() + if self._needs_push.is_set() and ctx.runtime.cfg.dataset and ctx.runtime.cfg.dataset.push_to_hub: + logger.info("Pushing final dataset to hub...") + if safe_push_to_hub( + ctx.data.dataset, + tags=ctx.runtime.cfg.dataset.tags, + private=ctx.runtime.cfg.dataset.private, + ): + logger.info("Dataset uploaded to hub") + log_say("Dataset uploaded to hub", play_sounds) + + self._teardown_hardware( + ctx.hardware, + return_to_initial_position=ctx.runtime.cfg.return_to_initial_position, + ) + logger.info("DAgger strategy teardown complete") + + # ------------------------------------------------------------------ + # Continuous recording mode (record_autonomous=True) + # ------------------------------------------------------------------ + + def _run_continuous(self, ctx: RolloutContext) -> None: + """Sentry-like continuous recording with intervention tagging. + + Episodes are auto-rotated every ``episode_time_s`` seconds and + uploaded in the background every ``upload_every_n_episodes`` episodes. + Both autonomous and correction frames are recorded; corrections are + tagged with ``intervention=True``. + """ + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + teleop = ctx.hardware.teleop + dataset = ctx.data.dataset + events = self._events + interpolator = self._interpolator + features = ctx.data.dataset_features + + control_interval = interpolator.get_control_interval(cfg.fps) + record_stride = max(1, cfg.interpolation_multiplier) + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + play_sounds = cfg.play_sounds + + engine.reset() + interpolator.reset() + events.reset() + # TODO(Steven): re-enable once Teleoperator motor-control methods are + # standardised; until then the user pre-aligns the leader by hand. + # teleop.disable_torque() + engine.resume() + + last_action: dict[str, Any] | None = None + record_tick = 0 + start_time = time.perf_counter() + episode_start = time.perf_counter() + episodes_since_push = 0 + episode_duration_s = self._episode_duration_s + logger.info("DAgger continuous recording started (episode_duration=%.0fs)", episode_duration_s) + + with VideoEncodingManager(dataset): + try: + while not events.stop_recording.is_set() and not ctx.runtime.shutdown_event.is_set(): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + # Process transitions + transition = events.consume_transition() + if transition is not None: + old_phase, new_phase = transition + self._apply_transition(old_phase, new_phase, engine, interpolator, robot, teleop) + last_action = None + + phase = events.phase + obs = robot.get_observation() + + # --- CORRECTING: human teleop control --- + # TODO(Steven): teleop runs at the same FPS as the policy. To + # decouple the two, sample teleop at its native rate and + # interpolate to the control loop's tick rate. + if phase == DAggerPhase.CORRECTING: + obs_processed = ctx.processors.robot_observation_processor(obs) + teleop_action = teleop.get_action() + processed_teleop = ctx.processors.teleop_action_processor((teleop_action, obs)) + robot_action_to_send = ctx.processors.robot_action_processor((processed_teleop, obs)) + robot.send_action(robot_action_to_send) + last_action = robot_action_to_send + self._log_telemetry(obs_processed, processed_teleop, ctx.runtime) + if record_tick % record_stride == 0: + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, processed_teleop, prefix=ACTION) + frame = { + **obs_frame, + **action_frame, + "task": task_str, + "intervention": np.array([True], dtype=bool), + } + dataset.add_frame(frame) + record_tick += 1 + + # --- PAUSED: hold position --- + elif phase == DAggerPhase.PAUSED: + if last_action: + robot.send_action(last_action) + + # --- AUTONOMOUS: policy control --- + else: + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + if action_dict is not None: + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + last_action = ctx.processors.robot_action_processor((action_dict, obs)) + if record_tick % record_stride == 0: + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, action_dict, prefix=ACTION) + frame = { + **obs_frame, + **action_frame, + "task": task_str, + "intervention": np.array([False], dtype=bool), + } + dataset.add_frame(frame) + record_tick += 1 + + # Episode rotation derived from the video file-size target. + # Saving is deferred while a correction is ongoing so the + # episode boundary lands on a clean autonomous frame. + elapsed = time.perf_counter() - episode_start + if elapsed >= episode_duration_s and phase != DAggerPhase.CORRECTING: + with self._episode_lock: + dataset.save_episode() + episodes_since_push += 1 + self._needs_push.set() + logger.info( + "Episode saved (total: %d, elapsed: %.1fs)", + dataset.num_episodes, + elapsed, + ) + log_say(f"Episode {dataset.num_episodes} saved", play_sounds) + + if episodes_since_push >= self.config.upload_every_n_episodes: + self._background_push(dataset, cfg) + episodes_since_push = 0 + + episode_start = time.perf_counter() + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + finally: + logger.info("DAgger continuous control loop ended — pausing engine") + engine.pause() + # TODO(Steven): re-enable once Teleoperator motor-control methods + # are standardised across all teleop implementations. + # teleop.disable_torque() + with contextlib.suppress(Exception): + with self._episode_lock: + dataset.save_episode() + self._needs_push.set() + logger.info("Final in-progress episode saved") + + # ------------------------------------------------------------------ + # Corrections-only mode (record_autonomous=False) + # ------------------------------------------------------------------ + + def _run_corrections_only(self, ctx: RolloutContext) -> None: + """Record only human correction windows. Each correction = one episode. + + The policy runs autonomously without recording. When the user + pauses and starts a correction, frames are recorded with + ``intervention=True``. Stopping the correction saves the episode. + The dataset can be uploaded on demand via the upload key/pedal. + """ + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + teleop = ctx.hardware.teleop + dataset = ctx.data.dataset + events = self._events + interpolator = self._interpolator + features = ctx.data.dataset_features + + control_interval = interpolator.get_control_interval(cfg.fps) + record_stride = max(1, cfg.interpolation_multiplier) + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + play_sounds = cfg.play_sounds + + engine.reset() + interpolator.reset() + events.reset() + # TODO(Steven): re-enable once Teleoperator motor-control methods are + # standardised; until then the user pre-aligns the leader by hand. + # teleop.disable_torque() + engine.resume() + + last_action: dict[str, Any] | None = None + start_time = time.perf_counter() + record_tick = 0 + recorded = 0 + logger.info( + "DAgger corrections-only recording started (target: %d episodes)", self.config.num_episodes + ) + + with VideoEncodingManager(dataset): + try: + while ( + recorded < self.config.num_episodes + and not events.stop_recording.is_set() + and not ctx.runtime.shutdown_event.is_set() + ): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + # Process transitions + transition = events.consume_transition() + if transition is not None: + old_phase, new_phase = transition + self._apply_transition(old_phase, new_phase, engine, interpolator, robot, teleop) + last_action = None + + # Correction ended -> save episode (blocking if not streaming) + if old_phase == DAggerPhase.CORRECTING and new_phase == DAggerPhase.PAUSED: + with self._episode_lock: + dataset.save_episode() + recorded += 1 + self._needs_push.set() + logger.info( + "Correction %d/%d saved", + recorded, + self.config.num_episodes, + ) + log_say(f"Correction {recorded} saved", play_sounds) + + # On-demand upload + if events.upload_requested.is_set(): + events.upload_requested.clear() + logger.info("Upload requested by user") + self._background_push(dataset, cfg) + + phase = events.phase + obs = robot.get_observation() + + # --- CORRECTING: human teleop control + recording --- + # TODO(Steven): teleop runs at the same FPS as the policy. To + # decouple the two, sample teleop at its native rate and + # interpolate to the control loop's tick rate. + if phase == DAggerPhase.CORRECTING: + obs_processed = ctx.processors.robot_observation_processor(obs) + teleop_action = teleop.get_action() + processed_teleop = ctx.processors.teleop_action_processor((teleop_action, obs)) + robot_action_to_send = ctx.processors.robot_action_processor((processed_teleop, obs)) + robot.send_action(robot_action_to_send) + last_action = robot_action_to_send + self._log_telemetry(obs_processed, processed_teleop, ctx.runtime) + + if record_tick % record_stride == 0: + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, processed_teleop, prefix=ACTION) + dataset.add_frame( + { + **obs_frame, + **action_frame, + "task": task_str, + "intervention": np.array([True], dtype=bool), + } + ) + record_tick += 1 + + # --- PAUSED: hold position --- + elif phase == DAggerPhase.PAUSED: + if last_action: + robot.send_action(last_action) + + # --- AUTONOMOUS: policy control (no recording) --- + else: + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + if action_dict is not None: + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + last_action = ctx.processors.robot_action_processor((action_dict, obs)) + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + finally: + logger.info("DAgger corrections-only loop ended — pausing engine") + engine.pause() + # TODO(Steven): re-enable once Teleoperator motor-control methods + # are standardised across all teleop implementations. + # teleop.disable_torque() + with contextlib.suppress(Exception): + with self._episode_lock: + dataset.save_episode() + self._needs_push.set() + logger.info("Final in-progress episode saved") + + # ------------------------------------------------------------------ + # State-machine transition side-effects + # ------------------------------------------------------------------ + + @staticmethod + def _apply_transition( + old_phase: DAggerPhase, + new_phase: DAggerPhase, + engine, + interpolator, + robot: ThreadSafeRobot, + teleop: Teleoperator, + ) -> None: + """Execute side-effects for a validated phase transition.""" + logger.info("Phase transition: %s -> %s", old_phase.value, new_phase.value) + if old_phase == DAggerPhase.AUTONOMOUS and new_phase == DAggerPhase.PAUSED: + logger.info("Pausing engine — robot holds position") + engine.pause() + obs = robot.get_observation() + _robot_pos = { + k: v for k, v in obs.items() if k.endswith(".pos") and k in robot.observation_features + } + # TODO(Steven): once Teleoperator motor-control methods are + # standardised, drive the leader to the follower's pose here so the + # operator does not need to pre-align the arm by hand. Until then + # the user is responsible for the alignment. + # _teleop_smooth_move_to(teleop, _robot_pos, duration_s=2.0, fps=50) + + elif new_phase == DAggerPhase.CORRECTING: + logger.info("Entering correction mode — human teleop control") + # TODO(Steven): re-enable once Teleoperator motor-control methods + # are standardised across all teleop implementations. + # teleop.disable_torque() + + elif new_phase == DAggerPhase.AUTONOMOUS: + logger.info("Resuming autonomous mode — resetting engine and interpolator") + interpolator.reset() + engine.reset() + engine.resume() + + # ------------------------------------------------------------------ + # Background push (shared by both modes) + # ------------------------------------------------------------------ + + def _background_push(self, dataset, cfg) -> None: + """Queue a Hub push on the single-worker executor. + + The executor's max_workers=1 guarantees at most one push runs at + a time; submitted tasks are queued rather than dropped. Pushes + are blocked while the operator is mid-correction to avoid + uploading a partially-recorded episode. + """ + if self._push_executor is None: + return + + if self._events.phase == DAggerPhase.CORRECTING: + logger.info("Skipping push — correction in progress") + return + + if self._pending_push is not None and not self._pending_push.done(): + logger.info("Previous push still in progress; queueing next") + + def _push(): + try: + with self._episode_lock: + if safe_push_to_hub( + dataset, + tags=cfg.dataset.tags if cfg.dataset else None, + private=cfg.dataset.private if cfg.dataset else False, + ): + self._needs_push.clear() + logger.info("Background push to hub complete") + except Exception as e: + logger.error("Background push failed: %s", e) + + self._pending_push = self._push_executor.submit(_push) + logger.info("Background push task submitted") diff --git a/src/lerobot/rollout/strategies/factory.py b/src/lerobot/rollout/strategies/factory.py new file mode 100644 index 000000000..8a9727769 --- /dev/null +++ b/src/lerobot/rollout/strategies/factory.py @@ -0,0 +1,45 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Strategy factory: config type-name → strategy class dispatch.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .base import BaseStrategy +from .core import RolloutStrategy +from .dagger import DAggerStrategy +from .highlight import HighlightStrategy +from .sentry import SentryStrategy + +if TYPE_CHECKING: + from ..configs import RolloutStrategyConfig + + +def create_strategy(config: RolloutStrategyConfig) -> RolloutStrategy: + """Instantiate the appropriate strategy from a config object. + + Dispatches on ``config.type`` (the name registered via + ``draccus.ChoiceRegistry``). + """ + if config.type == "base": + return BaseStrategy(config) + if config.type == "sentry": + return SentryStrategy(config) + if config.type == "highlight": + return HighlightStrategy(config) + if config.type == "dagger": + return DAggerStrategy(config) + raise ValueError(f"Unknown strategy type '{config.type}'. Available: base, sentry, highlight, dagger") diff --git a/src/lerobot/rollout/strategies/highlight.py b/src/lerobot/rollout/strategies/highlight.py new file mode 100644 index 000000000..baff70da7 --- /dev/null +++ b/src/lerobot/rollout/strategies/highlight.py @@ -0,0 +1,283 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Highlight Reel strategy: on-demand recording via ring buffer.""" + +from __future__ import annotations + +import contextlib +import logging +import os +import sys +import time +from concurrent.futures import Future, ThreadPoolExecutor +from threading import Event as ThreadingEvent, Lock + +from lerobot.common.control_utils import is_headless +from lerobot.datasets import VideoEncodingManager +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame +from lerobot.utils.import_utils import _pynput_available, require_package +from lerobot.utils.robot_utils import precise_sleep +from lerobot.utils.utils import log_say + +from ..configs import HighlightStrategyConfig +from ..context import RolloutContext +from ..ring_buffer import RolloutRingBuffer +from .core import RolloutStrategy, safe_push_to_hub, send_next_action + +PYNPUT_AVAILABLE = _pynput_available +keyboard = None +if PYNPUT_AVAILABLE: + try: + if ("DISPLAY" not in os.environ) and ("linux" in sys.platform): + logging.info("No DISPLAY set. Skipping pynput import.") + PYNPUT_AVAILABLE = False + else: + from pynput import keyboard + except Exception as e: + PYNPUT_AVAILABLE = False + logging.info(f"Could not import pynput: {e}") + +logger = logging.getLogger(__name__) + + +class HighlightStrategy(RolloutStrategy): + """Autonomous rollout with on-demand recording via ring buffer. + + The robot runs autonomously while a memory-bounded ring buffer + captures continuous telemetry. When the user presses the save key: + + 1. The ring buffer is flushed to the dataset (last *Z* seconds). + 2. Live recording continues until the save key is pressed again. + 3. The episode is saved and the ring buffer resumes capturing. + + Requires ``streaming_encoding=True`` (enforced in config validation) + so that ``dataset.add_frame`` is a non-blocking queue put — flushing + the entire ring buffer in one tick must not stall the control loop. + """ + + config: HighlightStrategyConfig + + def __init__(self, config: HighlightStrategyConfig): + super().__init__(config) + require_package("pynput", extra="pynput-dep") + self._ring: RolloutRingBuffer | None = None + self._listener = None + self._save_requested = ThreadingEvent() + self._recording_live = ThreadingEvent() + self._push_requested = ThreadingEvent() + self._push_executor: ThreadPoolExecutor | None = None + self._pending_push: Future | None = None + self._episode_lock = Lock() + + def setup(self, ctx: RolloutContext) -> None: + """Initialise the inference engine, ring buffer, and keyboard listener.""" + self._init_engine(ctx) + + self._ring = RolloutRingBuffer( + max_seconds=self.config.ring_buffer_seconds, + max_memory_mb=self.config.ring_buffer_max_memory_mb, + fps=ctx.runtime.cfg.fps, + ) + + self._push_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="highlight-push") + logger.info( + "Ring buffer initialized (max_seconds=%.0f, max_memory=%.0fMB)", + self.config.ring_buffer_seconds, + self.config.ring_buffer_max_memory_mb, + ) + self._setup_keyboard(ctx.runtime.shutdown_event) + logger.info( + "Highlight strategy ready (buffer=%.0fs, save='%s', push='%s')", + self.config.ring_buffer_seconds, + self.config.save_key, + self.config.push_key, + ) + + def run(self, ctx: RolloutContext) -> None: + """Run the autonomous loop, buffering frames and recording on demand.""" + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + dataset = ctx.data.dataset + ring = self._ring + interpolator = self._interpolator + features = ctx.data.dataset_features + + control_interval = interpolator.get_control_interval(cfg.fps) + + engine.resume() + play_sounds = cfg.play_sounds + + start_time = time.perf_counter() + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + logger.info("Highlight strategy recording started (press '%s' to save)", self.config.save_key) + + with VideoEncodingManager(dataset): + try: + while not ctx.runtime.shutdown_event.is_set(): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + obs = robot.get_observation() + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + + if action_dict is not None: + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, action_dict, prefix=ACTION) + frame = {**obs_frame, **action_frame, "task": task_str} + + # NOTE: ``is_set()`` then ``clear()`` is not atomic + # against the keyboard thread setting the flag again + # in between — but that is benign: we lose at most one + # toggle, processed on the next iteration. + if self._save_requested.is_set(): + self._save_requested.clear() + if not self._recording_live.is_set(): + logger.info( + "Flushing ring buffer (%d frames) + starting live recording", + len(ring), + ) + for buffered_frame in ring.drain(): + dataset.add_frame(buffered_frame) + self._recording_live.set() + else: + dataset.add_frame(frame) + with self._episode_lock: + dataset.save_episode() + logger.info("Episode saved (total: %d)", dataset.num_episodes) + log_say( + f"Episode {dataset.num_episodes} saved", + play_sounds, + ) + self._recording_live.clear() + continue # frame already consumed — skip ring.append + + if self._push_requested.is_set(): + self._push_requested.clear() + logger.info("Push requested by user") + self._background_push(dataset, cfg) + + if self._recording_live.is_set(): + dataset.add_frame(frame) + else: + ring.append(frame) + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + finally: + logger.info("Highlight control loop ended") + if self._recording_live.is_set(): + logger.info("Saving in-progress live episode") + with contextlib.suppress(Exception), self._episode_lock: + dataset.save_episode() + + def teardown(self, ctx: RolloutContext) -> None: + """Stop listeners, finalise the dataset, and disconnect hardware.""" + play_sounds = ctx.runtime.cfg.play_sounds + logger.info("Stopping highlight recording") + log_say("Stopping highlight recording", play_sounds) + + if self._listener is not None: + logger.info("Stopping keyboard listener") + self._listener.stop() + + if self._push_executor is not None: + logger.info("Shutting down push executor (waiting for pending pushes)...") + self._push_executor.shutdown(wait=True) + self._push_executor = None + + if ctx.data.dataset is not None: + logger.info("Finalizing dataset...") + ctx.data.dataset.finalize() + if ctx.runtime.cfg.dataset and ctx.runtime.cfg.dataset.push_to_hub: + logger.info("Pushing final dataset to hub...") + if safe_push_to_hub( + ctx.data.dataset, + tags=ctx.runtime.cfg.dataset.tags, + private=ctx.runtime.cfg.dataset.private, + ): + logger.info("Dataset uploaded to hub") + log_say("Dataset uploaded to hub", play_sounds) + + self._teardown_hardware( + ctx.hardware, + return_to_initial_position=ctx.runtime.cfg.return_to_initial_position, + ) + logger.info("Highlight strategy teardown complete") + + def _setup_keyboard(self, shutdown_event: ThreadingEvent) -> None: + """Set up keyboard listener for save and push keys.""" + if is_headless(): + logger.warning("Headless environment — highlight keys unavailable") + return + + try: + save_key = self.config.save_key + push_key = self.config.push_key + + def on_press(key): + with contextlib.suppress(Exception): + if hasattr(key, "char") and key.char == save_key: + self._save_requested.set() + elif hasattr(key, "char") and key.char == push_key: + self._push_requested.set() + elif key == keyboard.Key.esc: + self._save_requested.clear() + shutdown_event.set() + + self._listener = keyboard.Listener(on_press=on_press) + self._listener.start() + logger.info("Keyboard listener started (save='%s', push='%s', ESC=stop)", save_key, push_key) + except ImportError: + logger.warning("pynput not available — keyboard listener disabled") + + def _background_push(self, dataset, cfg) -> None: + """Queue a Hub push on the single-worker executor.""" + if self._push_executor is None: + return + + if self._pending_push is not None and not self._pending_push.done(): + logger.info("Previous push still in progress; queueing next") + + def _push(): + try: + with self._episode_lock: + if safe_push_to_hub( + dataset, + tags=cfg.dataset.tags if cfg.dataset else None, + private=cfg.dataset.private if cfg.dataset else False, + ): + logger.info("Background push to hub complete") + except Exception as e: + logger.error("Background push failed: %s", e) + + self._pending_push = self._push_executor.submit(_push) + logger.info("Background push task submitted") diff --git a/src/lerobot/rollout/strategies/sentry.py b/src/lerobot/rollout/strategies/sentry.py new file mode 100644 index 000000000..61e38aa68 --- /dev/null +++ b/src/lerobot/rollout/strategies/sentry.py @@ -0,0 +1,231 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Sentry rollout strategy: continuous autonomous recording with auto-upload.""" + +from __future__ import annotations + +import contextlib +import logging +import time +from concurrent.futures import Future, ThreadPoolExecutor +from threading import Event, Lock + +from lerobot.datasets import VideoEncodingManager +from lerobot.datasets.utils import DEFAULT_VIDEO_FILE_SIZE_IN_MB +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame +from lerobot.utils.robot_utils import precise_sleep +from lerobot.utils.utils import log_say + +from ..configs import SentryStrategyConfig +from ..context import RolloutContext +from .core import RolloutStrategy, estimate_max_episode_seconds, safe_push_to_hub, send_next_action + +logger = logging.getLogger(__name__) + + +class SentryStrategy(RolloutStrategy): + """Continuous autonomous rollout with always-on recording. + + Episode duration is derived from camera resolution, FPS, and + ``DEFAULT_VIDEO_FILE_SIZE_IN_MB`` so that each saved episode + produces a video file that has crossed the chunk-size boundary. + This keeps ``push_to_hub`` efficient — it uploads complete video + files rather than re-uploading a still-growing one. + + The dataset is pushed to the Hub via a bounded single-worker executor + so no push is ever silently dropped and exactly one push runs at a + time. + + Policy state (hidden state, RTC queue) intentionally persists across + episode boundaries — Sentry slices one continuous rollout, the robot + does not reset between slices. + + Requires ``streaming_encoding=True`` (enforced in config validation) + to prevent disk I/O from blocking the control loop. + """ + + config: SentryStrategyConfig + + def __init__(self, config: SentryStrategyConfig): + super().__init__(config) + self._push_executor: ThreadPoolExecutor | None = None + self._pending_push: Future | None = None + self._needs_push = Event() + self._episode_lock = Lock() + + def setup(self, ctx: RolloutContext) -> None: + """Initialise the inference engine and background push executor.""" + self._init_engine(ctx) + self._push_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="sentry-push") + target_mb = self.config.target_video_file_size_mb or DEFAULT_VIDEO_FILE_SIZE_IN_MB + self._episode_duration_s = estimate_max_episode_seconds( + ctx.data.dataset_features, ctx.runtime.cfg.fps, target_size_mb=target_mb + ) + logger.info( + "Sentry strategy ready (episode_duration=%.0fs, upload_every=%d eps)", + self._episode_duration_s, + self.config.upload_every_n_episodes, + ) + + def run(self, ctx: RolloutContext) -> None: + """Run the continuous recording loop with automatic episode rotation.""" + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + dataset = ctx.data.dataset + interpolator = self._interpolator + features = ctx.data.dataset_features + + control_interval = interpolator.get_control_interval(cfg.fps) + + engine.resume() + play_sounds = cfg.play_sounds + episode_duration_s = self._episode_duration_s + + start_time = time.perf_counter() + episode_start = time.perf_counter() + episodes_since_push = 0 + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + logger.info("Sentry recording started (episode_duration=%.0fs)", episode_duration_s) + + with VideoEncodingManager(dataset): + try: + while not ctx.runtime.shutdown_event.is_set(): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + obs = robot.get_observation() + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + + if action_dict is not None: + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, action_dict, prefix=ACTION) + frame = {**obs_frame, **action_frame, "task": task_str} + # ``add_frame`` writes to the in-progress episode buffer; the + # background pusher only ever touches *finalised* episode + # artifacts on disk. The two operate on disjoint state, so + # ``add_frame`` does not need ``_episode_lock``. + dataset.add_frame(frame) + + # Episode rotation derived from video file-size target. + # The duration is a conservative estimate so the actual + # video has crossed DEFAULT_VIDEO_FILE_SIZE_IN_MB by now, + # keeping push_to_hub efficient (uploads complete files). + elapsed = time.perf_counter() - episode_start + if elapsed >= episode_duration_s: + # ``save_episode`` finalises the in-progress episode and + # flushes it to disk; ``_episode_lock`` serialises this with + # ``push_to_hub`` (run in the background executor) so the + # pusher never reads a half-written episode. + with self._episode_lock: + dataset.save_episode() + episodes_since_push += 1 + self._needs_push.set() + logger.info( + "Episode saved (total: %d, elapsed: %.1fs)", + dataset.num_episodes, + elapsed, + ) + log_say(f"Episode {dataset.num_episodes} saved", play_sounds) + + if episodes_since_push >= self.config.upload_every_n_episodes: + self._background_push(dataset, cfg) + episodes_since_push = 0 + + episode_start = time.perf_counter() + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + finally: + logger.info("Sentry control loop ended — saving final episode") + with contextlib.suppress(Exception): + with self._episode_lock: + dataset.save_episode() + self._needs_push.set() + + def teardown(self, ctx: RolloutContext) -> None: + """Flush pending pushes, finalise the dataset, and disconnect hardware.""" + play_sounds = ctx.runtime.cfg.play_sounds + logger.info("Stopping sentry recording") + log_say("Stopping sentry recording", play_sounds) + + # Flush any queued/running push cleanly. + if self._push_executor is not None: + logger.info("Shutting down push executor (waiting for pending pushes)...") + self._push_executor.shutdown(wait=True) + self._push_executor = None + + if ctx.data.dataset is not None: + logger.info("Finalizing dataset...") + ctx.data.dataset.finalize() + if self._needs_push.is_set() and ctx.runtime.cfg.dataset and ctx.runtime.cfg.dataset.push_to_hub: + logger.info("Pushing final dataset to hub...") + if safe_push_to_hub( + ctx.data.dataset, + tags=ctx.runtime.cfg.dataset.tags, + private=ctx.runtime.cfg.dataset.private, + ): + logger.info("Dataset uploaded to hub") + log_say("Dataset uploaded to hub", play_sounds) + + self._teardown_hardware( + ctx.hardware, + return_to_initial_position=ctx.runtime.cfg.return_to_initial_position, + ) + logger.info("Sentry strategy teardown complete") + + def _background_push(self, dataset, cfg) -> None: + """Queue a Hub push on the single-worker executor. + + The executor's max_workers=1 guarantees at most one push runs at + a time; submitted tasks are queued rather than dropped. + """ + if self._push_executor is None: + return + + if self._pending_push is not None and not self._pending_push.done(): + logger.info("Previous push still in progress; queueing next") + + def _push(): + try: + with self._episode_lock: + if safe_push_to_hub( + dataset, + tags=cfg.dataset.tags if cfg.dataset else None, + private=cfg.dataset.private if cfg.dataset else False, + ): + self._needs_push.clear() + logger.info("Background push to hub complete") + except Exception as e: + logger.error("Background push failed: %s", e) + + self._pending_push = self._push_executor.submit(_push) + logger.info("Background push task submitted") diff --git a/src/lerobot/scripts/convert_dataset_v21_to_v30.py b/src/lerobot/scripts/convert_dataset_v21_to_v30.py index 59e635712..1f220dc20 100644 --- a/src/lerobot/scripts/convert_dataset_v21_to_v30.py +++ b/src/lerobot/scripts/convert_dataset_v21_to_v30.py @@ -286,7 +286,7 @@ def convert_videos(root: Path, new_root: Path, video_file_size_in_mb: int): if len(set(num_eps_per_cam)) != 1: raise ValueError(f"All cams dont have same number of episodes ({num_eps_per_cam}).") - episods_metadata = [] + episodes_metadata = [] num_cameras = len(video_keys) num_episodes = num_eps_per_cam[0] for ep_idx in tqdm.tqdm(range(num_episodes), desc="convert videos"): @@ -299,9 +299,9 @@ def convert_videos(root: Path, new_root: Path, video_file_size_in_mb: int): ep_dict = {} for cam_idx in range(num_cameras): ep_dict.update(eps_metadata_per_cam[cam_idx][ep_idx]) - episods_metadata.append(ep_dict) + episodes_metadata.append(ep_dict) - return episods_metadata + return episodes_metadata def convert_videos_of_camera(root: Path, new_root: Path, video_key: str, video_file_size_in_mb: int): diff --git a/src/lerobot/scripts/lerobot_edit_dataset.py b/src/lerobot/scripts/lerobot_edit_dataset.py index 0cfb34325..a708d37a3 100644 --- a/src/lerobot/scripts/lerobot_edit_dataset.py +++ b/src/lerobot/scripts/lerobot_edit_dataset.py @@ -150,11 +150,24 @@ Show dataset information without feature details: --operation.type info \ --operation.show_features false -Recompute dataset statistics: +Recompute dataset statistics (saves to lerobot/pusht_recomputed_stats by default): lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type recompute_stats +Recompute stats and save to a specific new repo_id: + lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --new_repo_id lerobot/pusht_new_stats \ + --operation.type recompute_stats + +Recompute stats in-place (overwrites original dataset stats): + lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --new_repo_id lerobot/pusht \ + --operation.type recompute_stats \ + --operation.overwrite true + Recompute stats for relative actions and push to hub: lerobot-edit-dataset \ --repo_id lerobot/pusht \ @@ -256,6 +269,7 @@ class RecomputeStatsConfig(OperationConfig): relative_exclude_joints: list[str] | None = None chunk_size: int = 50 num_workers: int = 0 + overwrite: bool = False @OperationConfig.register_subclass("info") @@ -280,16 +294,30 @@ class EditDatasetConfig: push_to_hub: bool = False +def _resolve_io_paths( + repo_id: str, + new_repo_id: str | None, + root: Path | str | None, + new_root: Path | str | None, + default_new_repo_id: str | None = None, +) -> tuple[str, Path, Path]: + """Resolve input/output paths and repo_id for dataset operations. + + Returns (output_repo_id, input_path, output_path) with resolved (symlink-safe) paths. + """ + input_path = (Path(root) if root else HF_LEROBOT_HOME / repo_id).resolve() + output_repo_id = new_repo_id or default_new_repo_id or repo_id + output_path = (Path(new_root) if new_root else HF_LEROBOT_HOME / output_repo_id).resolve() + return output_repo_id, input_path, output_path + + def get_output_path( repo_id: str, new_repo_id: str | None, root: Path | str | None, new_root: Path | str | None, ) -> tuple[str, Path]: - input_path = Path(root) if root else HF_LEROBOT_HOME / repo_id - - output_repo_id = new_repo_id if new_repo_id else repo_id - output_path = Path(new_root) if new_root else HF_LEROBOT_HOME / output_repo_id + output_repo_id, input_path, output_path = _resolve_io_paths(repo_id, new_repo_id, root, new_root) # In case of in-place modification, create a backup of the original dataset (if it exists) if output_path == input_path: @@ -557,7 +585,39 @@ def handle_recompute_stats(cfg: EditDatasetConfig) -> None: if not isinstance(cfg.operation, RecomputeStatsConfig): raise ValueError("Operation config must be RecomputeStatsConfig") - dataset = LeRobotDataset(cfg.repo_id, root=cfg.root) + # Determine whether this is an in-place operation + output_repo_id, input_root, output_root = _resolve_io_paths( + cfg.repo_id, + cfg.new_repo_id, + cfg.root, + cfg.new_root, + default_new_repo_id=f"{cfg.repo_id}_recomputed_stats", + ) + in_place = output_root == input_root + + if in_place and not cfg.operation.overwrite: + raise ValueError( + f"recompute_stats would overwrite the dataset in-place at {input_root}. " + "Pass --operation.overwrite true to allow in-place modification, " + "or use --new_repo_id / --new_root to write to a different location. " + f"Default output repo_id when neither is set: '{cfg.repo_id}_recomputed_stats'." + ) + + if in_place: + logging.warning( + f"Overwriting dataset stats in-place at {input_root}. The original stats will be lost." + ) + dataset = LeRobotDataset(cfg.repo_id, root=input_root) + else: + logging.info(f"Copying dataset from {input_root} to {output_root}") + if output_root.exists(): + backup_path = output_root.with_name(output_root.name + "_old") + logging.warning(f"Output directory {output_root} already exists. Moving to {backup_path}") + if backup_path.exists(): + shutil.rmtree(backup_path) + shutil.move(output_root, backup_path) + shutil.copytree(input_root, output_root) + dataset = LeRobotDataset(output_repo_id, root=output_root) logging.info(f"Recomputing stats for {cfg.repo_id}") if cfg.operation.relative_action: @@ -578,7 +638,7 @@ def handle_recompute_stats(cfg: EditDatasetConfig) -> None: logging.info(f"Stats written to {dataset.root}") if cfg.push_to_hub: - logging.info(f"Pushing to hub as {dataset.meta.repo_id}...") + logging.info(f"Pushing to hub as {dataset.repo_id}...") dataset.push_to_hub() diff --git a/src/lerobot/scripts/lerobot_record.py b/src/lerobot/scripts/lerobot_record.py index fa92a296d..129696bd3 100644 --- a/src/lerobot/scripts/lerobot_record.py +++ b/src/lerobot/scripts/lerobot_record.py @@ -13,70 +13,62 @@ # limitations under the License. """ -Records a dataset. Actions for the robot can be either generated by teleoperation or by a policy. +Records a dataset via teleoperation. This is a pure data-collection +tool — no policy inference. For deploying trained policies, use +``lerobot-rollout`` instead. Requires: pip install 'lerobot[core_scripts]' (includes dataset + hardware + viz extras) Example: ```shell -lerobot-record \ - --robot.type=so100_follower \ - --robot.port=/dev/tty.usbmodem58760431541 \ - --robot.cameras="{laptop: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ - --robot.id=black \ - --dataset.repo_id=/ \ - --dataset.num_episodes=2 \ - --dataset.single_task="Grab the cube" \ - --dataset.streaming_encoding=true \ - --dataset.encoder_threads=2 \ +lerobot-record \\ + --robot.type=so100_follower \\ + --robot.port=/dev/tty.usbmodem58760431541 \\ + --robot.cameras="{laptop: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \\ + --robot.id=black \\ + --teleop.type=so100_leader \\ + --teleop.port=/dev/tty.usbmodem58760431551 \\ + --teleop.id=blue \\ + --dataset.repo_id=/ \\ + --dataset.num_episodes=2 \\ + --dataset.single_task="Grab the cube" \\ + --dataset.streaming_encoding=true \\ + --dataset.encoder_threads=2 \\ --display_data=true - # <- Optional: specify video codec (auto, h264, hevc, libsvtav1). Default is libsvtav1. \ - # --dataset.vcodec=h264 \ - # <- Teleop optional if you want to teleoperate to record or in between episodes with a policy \ - # --teleop.type=so100_leader \ - # --teleop.port=/dev/tty.usbmodem58760431551 \ - # --teleop.id=blue \ - # <- Policy optional if you want to record with a policy \ - # --policy.path=${HF_USER}/my_policy \ ``` Example recording with bimanual so100: ```shell -lerobot-record \ - --robot.type=bi_so_follower \ - --robot.left_arm_config.port=/dev/tty.usbmodem5A460822851 \ - --robot.right_arm_config.port=/dev/tty.usbmodem5A460814411 \ - --robot.id=bimanual_follower \ +lerobot-record \\ + --robot.type=bi_so_follower \\ + --robot.left_arm_config.port=/dev/tty.usbmodem5A460822851 \\ + --robot.right_arm_config.port=/dev/tty.usbmodem5A460814411 \\ + --robot.id=bimanual_follower \\ --robot.left_arm_config.cameras='{ wrist: {"type": "opencv", "index_or_path": 1, "width": 640, "height": 480, "fps": 30}, top: {"type": "opencv", "index_or_path": 3, "width": 640, "height": 480, "fps": 30}, }' --robot.right_arm_config.cameras='{ wrist: {"type": "opencv", "index_or_path": 2, "width": 640, "height": 480, "fps": 30}, front: {"type": "opencv", "index_or_path": 4, "width": 640, "height": 480, "fps": 30}, - }' \ - --teleop.type=bi_so_leader \ - --teleop.left_arm_config.port=/dev/tty.usbmodem5A460852721 \ - --teleop.right_arm_config.port=/dev/tty.usbmodem5A460819811 \ - --teleop.id=bimanual_leader \ - --display_data=true \ - --dataset.repo_id=${HF_USER}/bimanual-so-handover-cube \ - --dataset.num_episodes=25 \ - --dataset.single_task="Grab and handover the red cube to the other arm" \ - --dataset.streaming_encoding=true \ - # --dataset.vcodec=auto \ + }' \\ + --teleop.type=bi_so_leader \\ + --teleop.left_arm_config.port=/dev/tty.usbmodem5A460852721 \\ + --teleop.right_arm_config.port=/dev/tty.usbmodem5A460819811 \\ + --teleop.id=bimanual_leader \\ + --display_data=true \\ + --dataset.repo_id=${HF_USER}/bimanual-so-handover-cube \\ + --dataset.num_episodes=25 \\ + --dataset.single_task="Grab and handover the red cube to the other arm" \\ + --dataset.streaming_encoding=true \\ --dataset.encoder_threads=2 ``` """ import logging import time -from dataclasses import asdict, dataclass, field -from pathlib import Path +from dataclasses import asdict, dataclass from pprint import pformat -from typing import Any - -import torch from lerobot.cameras import CameraConfig # noqa: F401 from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401 @@ -86,11 +78,10 @@ from lerobot.cameras.zmq import ZMQCameraConfig # noqa: F401 from lerobot.common.control_utils import ( init_keyboard_listener, is_headless, - predict_action, - sanity_check_dataset_name, sanity_check_dataset_robot_compatibility, ) -from lerobot.configs import PreTrainedConfig, parser +from lerobot.configs import parser +from lerobot.configs.dataset import DatasetRecordConfig from lerobot.datasets import ( LeRobotDataset, VideoEncodingManager, @@ -98,21 +89,11 @@ from lerobot.datasets import ( create_initial_features, safe_stop_image_writer, ) -from lerobot.policies import ( - ActionInterpolator, - PreTrainedPolicy, - make_policy, - make_pre_post_processors, - make_robot_action, -) from lerobot.processor import ( - PolicyAction, - PolicyProcessorPipeline, RobotAction, RobotObservation, RobotProcessorPipeline, make_default_processors, - rename_stats, ) from lerobot.robots import ( # noqa: F401 Robot, @@ -146,7 +127,6 @@ from lerobot.teleoperators import ( # noqa: F401 ) from lerobot.teleoperators.keyboard import KeyboardTeleop from lerobot.utils.constants import ACTION, OBS_STR -from lerobot.utils.device_utils import get_safe_torch_device from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts from lerobot.utils.import_utils import register_third_party_plugins from lerobot.utils.robot_utils import precise_sleep @@ -157,71 +137,12 @@ from lerobot.utils.utils import ( from lerobot.utils.visualization_utils import init_rerun, log_rerun_data -@dataclass -class DatasetRecordConfig: - # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). - repo_id: str - # A short but accurate description of the task performed during the recording (e.g. "Pick the Lego block and drop it in the box on the right.") - single_task: str - # Root directory where the dataset will be stored (e.g. 'dataset/path'). If None, defaults to $HF_LEROBOT_HOME/repo_id. - root: str | Path | None = None - # Limit the frames per second. - fps: int = 30 - # Number of seconds for data recording for each episode. - episode_time_s: int | float = 60 - # Number of seconds for resetting the environment after each episode. - reset_time_s: int | float = 60 - # Number of episodes to record. - num_episodes: int = 50 - # Encode frames in the dataset into video - video: bool = True - # Upload dataset to Hugging Face hub. - push_to_hub: bool = True - # Upload on private repository on the Hugging Face hub. - private: bool = False - # Add tags to your dataset on the hub. - tags: list[str] | None = None - # Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only; - # set to ≥1 to use subprocesses, each using threads to write images. The best number of processes - # and threads depends on your system. We recommend 4 threads per camera with 0 processes. - # If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses. - num_image_writer_processes: int = 0 - # Number of threads writing the frames as png images on disk, per camera. - # Too many threads might cause unstable teleoperation fps due to main thread being blocked. - # Not enough threads might cause low camera fps. - num_image_writer_threads_per_camera: int = 4 - # Number of episodes to record before batch encoding videos - # Set to 1 for immediate encoding (default behavior), or higher for batched encoding - video_encoding_batch_size: int = 1 - # Video codec for encoding videos. Options: 'h264', 'hevc', 'libsvtav1', 'auto', - # or hardware-specific: 'h264_videotoolbox', 'h264_nvenc', 'h264_vaapi', 'h264_qsv'. - # Use 'auto' to auto-detect the best available hardware encoder. - vcodec: str = "libsvtav1" - # Enable streaming video encoding: encode frames in real-time during capture instead - # of writing PNG images first. Makes save_episode() near-instant. More info in the documentation: https://huggingface.co/docs/lerobot/streaming_video_encoding - streaming_encoding: bool = False - # Maximum number of frames to buffer per camera when using streaming encoding. - # ~1s buffer at 30fps. Provides backpressure if the encoder can't keep up. - encoder_queue_maxsize: int = 30 - # Number of threads per encoder instance. None = auto (codec default). - # Lower values reduce CPU usage, maps to 'lp' (via svtav1-params) for libsvtav1 and 'threads' for h264/hevc.. - encoder_threads: int | None = None - # Rename map for the observation to override the image and state keys - rename_map: dict[str, str] = field(default_factory=dict) - - def __post_init__(self): - if self.single_task is None: - raise ValueError("You need to provide a task as argument in `single_task`.") - - @dataclass class RecordConfig: robot: RobotConfig dataset: DatasetRecordConfig - # Whether to control the robot with a teleoperator + # Teleoperator to control the robot (required) teleop: TeleoperatorConfig | None = None - # Whether to control the robot with a policy - policy: PreTrainedConfig | None = None # Display all cameras on screen display_data: bool = False # Display data on a remote Rerun server @@ -234,27 +155,14 @@ class RecordConfig: play_sounds: bool = True # Resume recording on an existing dataset. resume: bool = False - # Action interpolation multiplier for smoother policy control (1=off, 2=2x, 3=3x) - # Only applies when using a policy (not teleop) - interpolation_multiplier: int = 1 def __post_init__(self): - # HACK: We parse again the cli args here to get the pretrained path if there was one. - policy_path = parser.get_path_arg("policy") - - if policy_path: - cli_overrides = parser.get_cli_overrides("policy") - - self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.pretrained_path = policy_path - - if self.teleop is None and self.policy is None: - raise ValueError("Choose a policy, a teleoperator or both to control the robot") - - @classmethod - def __get_path_fields__(cls) -> list[str]: - """This enables the parser to load config from the policy using `--policy.path=local/dir`""" - return ["policy"] + if self.teleop is None: + raise ValueError( + "A teleoperator is required for recording. " + "Use --teleop.type=... to specify one. " + "For policy-based deployment, use lerobot-rollout instead." + ) """ --------------- record_loop() data flow -------------------------- @@ -264,18 +172,14 @@ class RecordConfig: V [ robot_observation_processor ] ---> processed_obs V - .-----( ACTION LOGIC )------------------. - V V - [ From Teleoperator ] [ From Policy ] - | | - | [teleop.get_action] -> raw_action | [predict_action] - | | | | - | V | V - | [teleop_action_processor] | | - | | | | - '---> processed_teleop_action '---> processed_policy_action - | | - '-------------------------.-------------' + [ Teleoperator ] + | + | [teleop.get_action] -> raw_action + | | + | V + | [teleop_action_processor] + | | + '---> processed_teleop_action V [ robot_action_processor ] --> robot_action_to_send V @@ -303,13 +207,9 @@ def record_loop( ], # runs after robot dataset: LeRobotDataset | None = None, teleop: Teleoperator | list[Teleoperator] | None = None, - policy: PreTrainedPolicy | None = None, - preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]] | None = None, - postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction] | None = None, control_time_s: int | None = None, single_task: str | None = None, display_data: bool = False, - interpolator: ActionInterpolator | None = None, display_compressed_images: bool = False, ): if dataset is not None and dataset.fps != fps: @@ -340,21 +240,7 @@ def record_loop( "For multi-teleop, the list must contain exactly one KeyboardTeleop and one arm teleoperator. Currently only supported for LeKiwi robot." ) - # Reset policy and processor if they are provided - if policy is not None and preprocessor is not None and postprocessor is not None: - policy.reset() - preprocessor.reset() - postprocessor.reset() - - # Reset interpolator if provided - if interpolator is not None: - interpolator.reset() - - # Calculate control interval based on interpolation - use_interpolation = interpolator is not None and interpolator.enabled and policy is not None - control_interval = interpolator.get_control_interval(fps) if interpolator else 1 / fps - # Pre-compute action key order outside the hot loop — it won't change mid-episode. - action_keys = sorted(robot.action_features) if use_interpolation else [] + control_interval = 1 / fps no_action_count = 0 timestamp = 0 @@ -372,63 +258,11 @@ def record_loop( # Applies a pipeline to the raw robot observation, default is IdentityProcessor obs_processed = robot_observation_processor(obs) - if policy is not None or dataset is not None: + if dataset is not None: observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR) - # Track whether this iteration should be recorded to the dataset. - # Interpolated-only iterations send actions to the robot but don't record frames, - # keeping the dataset at the original fps while the robot moves at the higher rate. - is_record_frame = True - - # Get action from either policy or teleop - if policy is not None and preprocessor is not None and postprocessor is not None: - # With interpolation: only call policy when interpolator needs new action - if use_interpolation: - ran_inference = False - - if interpolator.needs_new_action(): - action_values = predict_action( - observation=observation_frame, - policy=policy, - device=get_safe_torch_device(policy.config.device), - preprocessor=preprocessor, - postprocessor=postprocessor, - use_amp=policy.config.use_amp, - task=single_task, - robot_type=robot.robot_type, - ) - act_processed_policy = make_robot_action(action_values, dataset.features) - robot_action_to_send = robot_action_processor((act_processed_policy, obs)) - - action_tensor = torch.tensor([robot_action_to_send[k] for k in action_keys]) - interpolator.add(action_tensor) - ran_inference = True - - interp_action = interpolator.get() - if interp_action is not None: - robot_action_to_send = {k: interp_action[i].item() for i, k in enumerate(action_keys)} - action_values = robot_action_to_send - else: - continue - - is_record_frame = ran_inference - else: - action_values = predict_action( - observation=observation_frame, - policy=policy, - device=get_safe_torch_device(policy.config.device), - preprocessor=preprocessor, - postprocessor=postprocessor, - use_amp=policy.config.use_amp, - task=single_task, - robot_type=robot.robot_type, - ) - act_processed_policy: RobotAction = make_robot_action(action_values, dataset.features) - # Applies a pipeline to the action, default is IdentityProcessor - robot_action_to_send = robot_action_processor((act_processed_policy, obs)) - action_values = robot_action_to_send - - elif policy is None and isinstance(teleop, Teleoperator): + # Get action from teleop + if isinstance(teleop, Teleoperator): act = teleop.get_action() if robot.name == "unitree_g1": teleop.send_feedback(obs) @@ -438,7 +272,7 @@ def record_loop( action_values = act_processed_teleop robot_action_to_send = robot_action_processor((act_processed_teleop, obs)) - elif policy is None and isinstance(teleop, list): + elif isinstance(teleop, list): arm_action = teleop_arm.get_action() arm_action = {f"arm_{k}": v for k, v in arm_action.items()} keyboard_action = teleop_keyboard.get_action() @@ -451,7 +285,7 @@ def record_loop( no_action_count += 1 if no_action_count == 1 or no_action_count % 10 == 0: logging.warning( - "No policy or teleoperator provided, skipping action generation. " + "No teleoperator provided, skipping action generation. " "This is likely to happen when resetting the environment without a teleop device. " "The robot won't be at its rest position at the start of the next episode." ) @@ -463,8 +297,8 @@ def record_loop( # TODO(steven, pepijn, adil): we should use a pipeline step to clip the action, so the sent action is the action that we input to the robot. _sent_action = robot.send_action(robot_action_to_send) - # Write to dataset (only on real policy frames, not interpolated-only iterations) - if dataset is not None and is_record_frame: + # Write to dataset + if dataset is not None: action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION) frame = {**observation_frame, **action_frame, "task": single_task} dataset.add_frame(frame) @@ -488,7 +322,12 @@ def record_loop( @parser.wrap() -def record(cfg: RecordConfig) -> LeRobotDataset: +def record( + cfg: RecordConfig, + teleop_action_processor: RobotProcessorPipeline | None = None, + robot_action_processor: RobotProcessorPipeline | None = None, + robot_observation_processor: RobotProcessorPipeline | None = None, +) -> LeRobotDataset: init_logging() logging.info(pformat(asdict(cfg))) if cfg.display_data: @@ -502,7 +341,16 @@ def record(cfg: RecordConfig) -> LeRobotDataset: robot = make_robot_from_config(cfg.robot) teleop = make_teleoperator_from_config(cfg.teleop) if cfg.teleop is not None else None - teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors() + # Fall back to identity pipelines when the caller doesn't supply processors. + if ( + teleop_action_processor is None + or robot_action_processor is None + or robot_observation_processor is None + ): + _t, _r, _o = make_default_processors() + teleop_action_processor = teleop_action_processor or _t + robot_action_processor = robot_action_processor or _r + robot_observation_processor = robot_observation_processor or _o dataset_features = combine_feature_dicts( aggregate_pipeline_dataset_features( @@ -540,8 +388,14 @@ def record(cfg: RecordConfig) -> LeRobotDataset: ) sanity_check_dataset_robot_compatibility(dataset, robot, cfg.dataset.fps, dataset_features) else: - # Create empty dataset or load existing saved episodes - sanity_check_dataset_name(cfg.dataset.repo_id, cfg.policy) + # Reject eval_ prefix — for policy evaluation use lerobot-rollout + repo_name = cfg.dataset.repo_id.split("/", 1)[-1] + if repo_name.startswith("eval_"): + raise ValueError( + "Dataset names starting with 'eval_' are reserved for policy evaluation. " + "lerobot-record is for data collection only. Use lerobot-rollout for policy deployment." + ) + cfg.dataset.stamp_repo_id() dataset = LeRobotDataset.create( cfg.dataset.repo_id, cfg.dataset.fps, @@ -558,26 +412,6 @@ def record(cfg: RecordConfig) -> LeRobotDataset: encoder_threads=cfg.dataset.encoder_threads, ) - # Load pretrained policy - policy = None if cfg.policy is None else make_policy(cfg.policy, ds_meta=dataset.meta) - preprocessor = None - postprocessor = None - interpolator = None - if cfg.policy is not None: - preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=cfg.policy, - pretrained_path=cfg.policy.pretrained_path, - dataset_stats=rename_stats(dataset.meta.stats, cfg.dataset.rename_map), - preprocessor_overrides={ - "device_processor": {"device": cfg.policy.device}, - "rename_observations_processor": {"rename_map": cfg.dataset.rename_map}, - }, - ) - # Create interpolator for smoother policy control - if cfg.interpolation_multiplier > 1: - interpolator = ActionInterpolator(multiplier=cfg.interpolation_multiplier) - logging.info(f"Action interpolation enabled: {cfg.interpolation_multiplier}x control rate") - robot.connect() if teleop is not None: teleop.connect() @@ -601,14 +435,10 @@ def record(cfg: RecordConfig) -> LeRobotDataset: robot_action_processor=robot_action_processor, robot_observation_processor=robot_observation_processor, teleop=teleop, - policy=policy, - preprocessor=preprocessor, - postprocessor=postprocessor, dataset=dataset, control_time_s=cfg.dataset.episode_time_s, single_task=cfg.dataset.single_task, display_data=cfg.display_data, - interpolator=interpolator, display_compressed_images=display_compressed_images, ) @@ -656,7 +486,10 @@ def record(cfg: RecordConfig) -> LeRobotDataset: listener.stop() if cfg.dataset.push_to_hub: - dataset.push_to_hub(tags=cfg.dataset.tags, private=cfg.dataset.private) + if dataset and dataset.num_episodes > 0: + dataset.push_to_hub(tags=cfg.dataset.tags, private=cfg.dataset.private) + else: + logging.warning("No episodes saved — skipping push to hub") log_say("Exiting", cfg.play_sounds) return dataset diff --git a/src/lerobot/scripts/lerobot_rollout.py b/src/lerobot/scripts/lerobot_rollout.py new file mode 100644 index 000000000..6a81563ee --- /dev/null +++ b/src/lerobot/scripts/lerobot_rollout.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Policy deployment engine with pluggable rollout strategies. + +``lerobot-rollout`` is the single CLI for running trained policies on +real robots. + +Strategies +---------- + --strategy.type=base Autonomous rollout, no recording + --strategy.type=sentry Continuous recording with auto-upload + --strategy.type=highlight Ring buffer + keystroke save + --strategy.type=dagger Human-in-the-loop (DAgger / RaC) + +Inference backends +------------------ + --inference.type=sync One policy call per control tick (default) + --inference.type=rtc Real-Time Chunking for slow VLA models + +Usage examples +-------------- +:: + + # Base mode — quick evaluation with sync inference + lerobot-rollout \\ + --strategy.type=base \\ + --policy.path=lerobot/act_koch_real \\ + --robot.type=koch_follower \\ + --robot.port=/dev/ttyACM0 \\ + --task="pick up cube" --duration=30 + + # Base mode — RTC inference for slow VLAs (Pi0, Pi0.5, SmolVLA) + lerobot-rollout \\ + --strategy.type=base \\ + --policy.path=lerobot/pi0_base \\ + --inference.type=rtc \\ + --inference.rtc.execution_horizon=10 \\ + --inference.rtc.max_guidance_weight=10.0 \\ + --robot.type=so100_follower \\ + --robot.port=/dev/ttyACM0 \\ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \\ + --task="pick up cube" --duration=60 + + # Sentry mode — continuous recording with periodic upload + lerobot-rollout \\ + --strategy.type=sentry \\ + --strategy.upload_every_n_episodes=5 \\ + --policy.path=lerobot/pi0_base \\ + --inference.type=rtc \\ + --robot.type=so100_follower \\ + --robot.port=/dev/ttyACM0 \\ + --dataset.repo_id=user/rollout_sentry_data \\ + --dataset.single_task="patrol" --duration=3600 + + # Highlight mode — ring buffer, press 's' to save, 'h' to push + lerobot-rollout \\ + --strategy.type=highlight \\ + --strategy.ring_buffer_seconds=30 \\ + --policy.path=lerobot/act_koch_real \\ + --robot.type=koch_follower \\ + --robot.port=/dev/ttyACM0 \\ + --dataset.repo_id=user/rollout_highlight_data \\ + --dataset.single_task="pick up cube" + + # DAgger mode — human-in-the-loop corrections only + lerobot-rollout \\ + --strategy.type=dagger \\ + --strategy.num_episodes=20 \\ + --policy.path=outputs/pretrain/checkpoints/last/pretrained_model \\ + --robot.type=bi_openarm_follower \\ + --teleop.type=openarm_mini \\ + --dataset.repo_id=user/rollout_hil_data \\ + --dataset.single_task="Fold the T-shirt" + + # DAgger mode — continuous recording with RTC inference + lerobot-rollout \\ + --strategy.type=dagger \\ + --strategy.record_autonomous=true \\ + --strategy.num_episodes=50 \\ + --inference.type=rtc \\ + --inference.rtc.execution_horizon=10 \\ + --policy.path=user/my_pi0_policy \\ + --robot.type=so100_follower \\ + --robot.port=/dev/ttyACM0 \\ + --teleop.type=so101_leader \\ + --teleop.port=/dev/ttyACM1 \\ + --dataset.repo_id=user/rollout_dagger_rtc_data \\ + --dataset.single_task="Grasp the block" + + # With Rerun visualization and torch.compile + lerobot-rollout \\ + --strategy.type=base \\ + --policy.path=lerobot/act_koch_real \\ + --robot.type=koch_follower \\ + --robot.port=/dev/ttyACM0 \\ + --task="pick up cube" --duration=60 \\ + --display_data=true \\ + --use_torch_compile=true + + # Resume a previous sentry recording session + lerobot-rollout \\ + --strategy.type=sentry \\ + --policy.path=user/my_policy \\ + --robot.type=so100_follower \\ + --robot.port=/dev/ttyACM0 \\ + --dataset.repo_id=user/rollout_sentry_data \\ + --dataset.single_task="patrol" \\ + --resume=true +""" + +import logging + +from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401 +from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401 +from lerobot.cameras.zmq import ZMQCameraConfig # noqa: F401 +from lerobot.configs import parser +from lerobot.robots import ( # noqa: F401 + Robot, + RobotConfig, + bi_openarm_follower, + bi_so_follower, + earthrover_mini_plus, + hope_jr, + koch_follower, + omx_follower, + openarm_follower, + reachy2, + so_follower, + unitree_g1 as unitree_g1_robot, +) +from lerobot.rollout import RolloutConfig, build_rollout_context, create_strategy +from lerobot.teleoperators import ( # noqa: F401 + Teleoperator, + TeleoperatorConfig, + bi_openarm_leader, + bi_so_leader, + homunculus, + koch_leader, + omx_leader, + openarm_leader, + openarm_mini, + reachy2_teleoperator, + so_leader, + unitree_g1, +) +from lerobot.utils.import_utils import register_third_party_plugins +from lerobot.utils.process import ProcessSignalHandler +from lerobot.utils.utils import init_logging +from lerobot.utils.visualization_utils import init_rerun + +logger = logging.getLogger(__name__) + + +@parser.wrap() +def rollout(cfg: RolloutConfig): + """Main entry point for policy deployment.""" + init_logging() + + if cfg.display_data: + logger.info("Initializing Rerun visualization (ip=%s, port=%s)", cfg.display_ip, cfg.display_port) + init_rerun(session_name="rollout", ip=cfg.display_ip, port=cfg.display_port) + + signal_handler = ProcessSignalHandler(use_threads=True, display_pid=False) + shutdown_event = signal_handler.shutdown_event + + logger.info("Building rollout context...") + ctx = build_rollout_context(cfg, shutdown_event) + + strategy = create_strategy(cfg.strategy) + logger.info("Rollout strategy: %s", cfg.strategy.type) + logger.info( + "Robot: %s | FPS: %.0f | Duration: %s", + cfg.robot.type if cfg.robot else "?", + cfg.fps, + f"{cfg.duration}s" if cfg.duration > 0 else "infinite", + ) + + try: + strategy.setup(ctx) + logger.info("Rollout setup complete, starting rollout...") + strategy.run(ctx) + except KeyboardInterrupt: + logger.info("Interrupted by user") + finally: + strategy.teardown(ctx) + + logger.info("Rollout finished") + + +def main(): + """CLI entry point for ``lerobot-rollout``.""" + register_third_party_plugins() + rollout() + + +if __name__ == "__main__": + main() diff --git a/src/lerobot/scripts/lerobot_train.py b/src/lerobot/scripts/lerobot_train.py index a862c640d..856006507 100644 --- a/src/lerobot/scripts/lerobot_train.py +++ b/src/lerobot/scripts/lerobot_train.py @@ -386,7 +386,8 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): sampler=sampler, pin_memory=device.type == "cuda", drop_last=False, - prefetch_factor=2 if cfg.num_workers > 0 else None, + prefetch_factor=cfg.prefetch_factor if cfg.num_workers > 0 else None, + persistent_workers=cfg.persistent_workers and cfg.num_workers > 0, ) # Prepare everything with accelerator @@ -433,6 +434,9 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): for _ in range(step, cfg.steps): start_time = time.perf_counter() batch = next(dl_iter) + for cam_key in dataset.meta.camera_keys: + if cam_key in batch and batch[cam_key].dtype == torch.uint8: + batch[cam_key] = batch[cam_key].to(dtype=torch.float32) / 255.0 batch = preprocessor(batch) train_tracker.dataloading_s = time.perf_counter() - start_time diff --git a/src/lerobot/teleoperators/gamepad/gamepad_utils.py b/src/lerobot/teleoperators/gamepad/gamepad_utils.py index 9f94b6746..c1531ca84 100644 --- a/src/lerobot/teleoperators/gamepad/gamepad_utils.py +++ b/src/lerobot/teleoperators/gamepad/gamepad_utils.py @@ -15,9 +15,22 @@ # limitations under the License. import logging +from typing import TYPE_CHECKING + +from lerobot.utils.import_utils import _hidapi_available, _pygame_available, require_package from ..utils import TeleopEvents +if TYPE_CHECKING or _pygame_available: + import pygame +else: + pygame = None # type: ignore[assignment] + +if TYPE_CHECKING or _hidapi_available: + import hid +else: + hid = None # type: ignore[assignment] + class InputController: """Base class for input controllers that generate motion deltas.""" @@ -199,6 +212,7 @@ class GamepadController(InputController): """Generate motion deltas from gamepad input.""" def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0, deadzone=0.1): + require_package("pygame", extra="gamepad") super().__init__(x_step_size, y_step_size, z_step_size) self.deadzone = deadzone self.joystick = None @@ -206,8 +220,6 @@ class GamepadController(InputController): def start(self): """Initialize pygame and the gamepad.""" - import pygame - pygame.init() pygame.joystick.init() @@ -230,8 +242,6 @@ class GamepadController(InputController): def stop(self): """Clean up pygame resources.""" - import pygame - if pygame.joystick.get_init(): if self.joystick: self.joystick.quit() @@ -240,8 +250,6 @@ class GamepadController(InputController): def update(self): """Process pygame events to get fresh gamepad readings.""" - import pygame - for event in pygame.event.get(): if event.type == pygame.JOYBUTTONDOWN: if event.button == 3: @@ -280,8 +288,6 @@ class GamepadController(InputController): def get_deltas(self): """Get the current movement deltas from gamepad state.""" - import pygame - try: # Read joystick axes # Left stick X and Y (typically axes 0 and 1) @@ -326,6 +332,7 @@ class GamepadControllerHID(InputController): z_scale: Scaling factor for Z-axis movement deadzone: Joystick deadzone to prevent drift """ + require_package("hidapi", extra="gamepad", import_name="hid") super().__init__(x_step_size, y_step_size, z_step_size) self.deadzone = deadzone self.device = None @@ -342,8 +349,6 @@ class GamepadControllerHID(InputController): def find_device(self): """Look for the gamepad device by vendor and product ID.""" - import hid - devices = hid.enumerate() for device in devices: device_name = device["product_string"] @@ -357,8 +362,6 @@ class GamepadControllerHID(InputController): def start(self): """Connect to the gamepad using HIDAPI.""" - import hid - self.device_info = self.find_device() if not self.device_info: self.running = False diff --git a/src/lerobot/teleoperators/homunculus/homunculus_arm.py b/src/lerobot/teleoperators/homunculus/homunculus_arm.py index 225235b59..4ceade847 100644 --- a/src/lerobot/teleoperators/homunculus/homunculus_arm.py +++ b/src/lerobot/teleoperators/homunculus/homunculus_arm.py @@ -45,7 +45,7 @@ class HomunculusArm(Teleoperator): name = "homunculus_arm" def __init__(self, config: HomunculusArmConfig): - require_package("pyserial", extra="hardware", import_name="serial") + require_package("pyserial", extra="pyserial-dep", import_name="serial") super().__init__(config) self.config = config self.serial = serial.Serial(config.port, config.baud_rate, timeout=1) diff --git a/src/lerobot/teleoperators/homunculus/homunculus_glove.py b/src/lerobot/teleoperators/homunculus/homunculus_glove.py index 655bae726..cd503c20a 100644 --- a/src/lerobot/teleoperators/homunculus/homunculus_glove.py +++ b/src/lerobot/teleoperators/homunculus/homunculus_glove.py @@ -71,7 +71,7 @@ class HomunculusGlove(Teleoperator): name = "homunculus_glove" def __init__(self, config: HomunculusGloveConfig): - require_package("pyserial", extra="hardware", import_name="serial") + require_package("pyserial", extra="pyserial-dep", import_name="serial") super().__init__(config) self.config = config self.serial = serial.Serial(config.port, config.baud_rate, timeout=1) diff --git a/src/lerobot/teleoperators/keyboard/teleop_keyboard.py b/src/lerobot/teleoperators/keyboard/teleop_keyboard.py index ad4402809..801789bcb 100644 --- a/src/lerobot/teleoperators/keyboard/teleop_keyboard.py +++ b/src/lerobot/teleoperators/keyboard/teleop_keyboard.py @@ -23,7 +23,7 @@ from typing import Any from lerobot.types import RobotAction from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected -from lerobot.utils.import_utils import _pynput_available +from lerobot.utils.import_utils import _pynput_available, require_package from ..teleoperator import Teleoperator from ..utils import TeleopEvents @@ -56,6 +56,7 @@ class KeyboardTeleop(Teleoperator): name = "keyboard" def __init__(self, config: KeyboardTeleopConfig): + require_package("pynput", extra="pynput-dep") super().__init__(config) self.config = config self.robot_type = config.type diff --git a/src/lerobot/teleoperators/phone/teleop_phone.py b/src/lerobot/teleoperators/phone/teleop_phone.py index f68843194..f1af248e4 100644 --- a/src/lerobot/teleoperators/phone/teleop_phone.py +++ b/src/lerobot/teleoperators/phone/teleop_phone.py @@ -21,14 +21,24 @@ import logging import threading import time +from typing import TYPE_CHECKING -import hebi import numpy as np -from teleop import Teleop from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected +from lerobot.utils.import_utils import _hebi_available, _teleop_available, require_package from lerobot.utils.rotation import Rotation +if TYPE_CHECKING or _hebi_available: + import hebi +else: + hebi = None + +if TYPE_CHECKING or _teleop_available: + from teleop import Teleop +else: + Teleop = None + from ..teleoperator import Teleoperator from .config_phone import PhoneConfig, PhoneOS @@ -74,6 +84,8 @@ class IOSPhone(BasePhone, Teleoperator): name = "ios_phone" def __init__(self, config: PhoneConfig): + require_package("hebi-py", extra="phone", import_name="hebi") + require_package("teleop", extra="phone") super().__init__(config) self.config = config self._group = None @@ -213,6 +225,8 @@ class AndroidPhone(BasePhone, Teleoperator): name = "android_phone" def __init__(self, config: PhoneConfig): + require_package("hebi-py", extra="phone", import_name="hebi") + require_package("teleop", extra="phone") super().__init__(config) self.config = config self._teleop = None diff --git a/src/lerobot/teleoperators/reachy2_teleoperator/reachy2_teleoperator.py b/src/lerobot/teleoperators/reachy2_teleoperator/reachy2_teleoperator.py index db076b20f..9afb34fd7 100644 --- a/src/lerobot/teleoperators/reachy2_teleoperator/reachy2_teleoperator.py +++ b/src/lerobot/teleoperators/reachy2_teleoperator/reachy2_teleoperator.py @@ -19,7 +19,7 @@ import logging import time from typing import TYPE_CHECKING -from lerobot.utils.import_utils import _reachy2_sdk_available +from lerobot.utils.import_utils import _reachy2_sdk_available, require_package if TYPE_CHECKING or _reachy2_sdk_available: from reachy2_sdk import ReachySDK @@ -84,6 +84,7 @@ class Reachy2Teleoperator(Teleoperator): name = "reachy2_specific" def __init__(self, config: Reachy2TeleoperatorConfig): + require_package("reachy2_sdk", extra="reachy2") super().__init__(config) self.config = config diff --git a/src/lerobot/teleoperators/unitree_g1/exo_calib.py b/src/lerobot/teleoperators/unitree_g1/exo_calib.py index 05f5180ff..e977cd8b7 100644 --- a/src/lerobot/teleoperators/unitree_g1/exo_calib.py +++ b/src/lerobot/teleoperators/unitree_g1/exo_calib.py @@ -34,7 +34,7 @@ from typing import TYPE_CHECKING import numpy as np -from lerobot.utils.import_utils import _serial_available +from lerobot.utils.import_utils import _serial_available, require_package if TYPE_CHECKING or _serial_available: import serial @@ -156,6 +156,7 @@ def run_exo_calibration( """ Run interactive calibration for an exoskeleton arm. """ + require_package("pyserial", extra="unitree_g1", import_name="serial") try: import cv2 import matplotlib.pyplot as plt diff --git a/src/lerobot/teleoperators/unitree_g1/exo_serial.py b/src/lerobot/teleoperators/unitree_g1/exo_serial.py index 9b1c71891..ce5492537 100644 --- a/src/lerobot/teleoperators/unitree_g1/exo_serial.py +++ b/src/lerobot/teleoperators/unitree_g1/exo_serial.py @@ -76,7 +76,7 @@ class ExoskeletonArm: calibration: ExoskeletonCalibration | None = None def __post_init__(self): - require_package("pyserial", extra="hardware", import_name="serial") + require_package("pyserial", extra="unitree_g1", import_name="serial") if self.calibration_fpath.is_file(): self._load_calibration() diff --git a/src/lerobot/utils/action_interpolator.py b/src/lerobot/utils/action_interpolator.py new file mode 100644 index 000000000..222dc33b5 --- /dev/null +++ b/src/lerobot/utils/action_interpolator.py @@ -0,0 +1,116 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Action interpolation for smoother robot control. + +Provides configurable Nx control rate by interpolating between consecutive actions. +Useful with RTC and action-chunking policies to reduce jerkiness. +""" + +from torch import Tensor + + +class ActionInterpolator: + """Interpolates between consecutive actions for smoother control. + + When enabled with multiplier N, produces N actions per policy action + by linearly interpolating between the previous and current action. + + Example with multiplier=3: + prev_action -> [1/3 interpolated, 2/3 interpolated, current_action] + + This effectively multiplies the control rate for smoother motion. + + Usage: + interpolator = ActionInterpolator(multiplier=2) # 2x control rate + + # In control loop: + if interpolator.needs_new_action(): + new_action = queue.get() + if new_action: + interpolator.add(new_action.cpu()) + + action = interpolator.get() + if action: + robot.send_action(action) + """ + + def __init__(self, multiplier: int = 1): + """Initialize the interpolator. + + Args: + multiplier: Control rate multiplier (1 = no interpolation, 2 = 2x, 3 = 3x, etc.) + """ + if multiplier < 1: + raise ValueError(f"multiplier must be >= 1, got {multiplier}") + self.multiplier = multiplier + self._prev: Tensor | None = None + self._buffer: list[Tensor] = [] + self._idx = 0 + + @property + def enabled(self) -> bool: + """Whether interpolation is active (multiplier > 1).""" + return self.multiplier > 1 + + def reset(self): + """Reset interpolation state (call between episodes).""" + self._prev = None + self._buffer = [] + self._idx = 0 + + def needs_new_action(self) -> bool: + """Check if a new action is needed from the queue.""" + return self._idx >= len(self._buffer) + + def add(self, action: Tensor) -> None: + """Add a new action and compute interpolated sequence. + + Args: + action: New action tensor from policy/queue (already on CPU). + """ + if self.multiplier > 1 and self._prev is not None: + self._buffer = [] + for i in range(1, self.multiplier + 1): + t = i / self.multiplier + interp = self._prev + t * (action - self._prev) + self._buffer.append(interp) + else: + # First step: no previous action yet, so run at base FPS without interpolation. + self._buffer = [action.clone()] + self._prev = action.clone() + self._idx = 0 + + def get(self) -> Tensor | None: + """Get the next interpolated action. + + Returns: + Next action tensor, or None if buffer is exhausted. + """ + if self._idx >= len(self._buffer): + return None + action = self._buffer[self._idx] + self._idx += 1 + return action + + def get_control_interval(self, fps: float) -> float: + """Get the control interval based on interpolation multiplier. + + Args: + fps: Base frames per second. + + Returns: + Control interval in seconds (divided by multiplier). + """ + return 1.0 / (fps * self.multiplier) diff --git a/src/lerobot/utils/import_utils.py b/src/lerobot/utils/import_utils.py index 8cd24b0fa..bfa87fb86 100644 --- a/src/lerobot/utils/import_utils.py +++ b/src/lerobot/utils/import_utils.py @@ -115,6 +115,14 @@ _feetech_sdk_available = is_package_available("feetech-servo-sdk", import_name=" _reachy2_sdk_available = is_package_available("reachy2_sdk") _can_available = is_package_available("python-can", "can") _unitree_sdk_available = is_package_available("unitree-sdk2py", "unitree_sdk2py") +_pyrealsense2_available = is_package_available("pyrealsense2") or is_package_available( + "pyrealsense2-macosx", import_name="pyrealsense2" +) +_zmq_available = is_package_available("pyzmq", import_name="zmq") +_hebi_available = is_package_available("hebi-py", import_name="hebi") +_teleop_available = is_package_available("teleop") +_placo_available = is_package_available("placo") +_hidapi_available = is_package_available("hidapi", import_name="hid") # Data / serialization _pandas_available = is_package_available("pandas") diff --git a/src/lerobot/utils/pedal.py b/src/lerobot/utils/pedal.py new file mode 100644 index 000000000..88f3db1f9 --- /dev/null +++ b/src/lerobot/utils/pedal.py @@ -0,0 +1,83 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generic foot pedal listener using evdev. + +Callers supply a callback receiving the pressed key code (e.g. ``"KEY_A"``) +and an optional device path. The listener runs in a daemon thread and +silently no-ops when :mod:`evdev` is not installed or the device is +unavailable. Strategy-specific key mapping logic lives in the caller. +""" + +from __future__ import annotations + +import logging +import threading +from collections.abc import Callable + +logger = logging.getLogger(__name__) + +DEFAULT_PEDAL_DEVICE = "/dev/input/by-id/usb-PCsensor_FootSwitch-event-kbd" + + +def start_pedal_listener( + on_press: Callable[[str], None], + device_path: str = DEFAULT_PEDAL_DEVICE, +) -> threading.Thread | None: + """Spawn a daemon thread that forwards pedal key-press codes to ``on_press``. + + Parameters + ---------- + on_press: + Callback invoked with the pressed key code string (e.g. ``"KEY_A"``) + on each pedal press event. The callback runs in the listener thread + and must be thread-safe. + device_path: + Linux input device path (e.g. ``/dev/input/by-id/...``). + + Returns + ------- + The started daemon :class:`threading.Thread`, or ``None`` when + :mod:`evdev` is not installed (optional dependency; silent no-op). + """ + try: + from evdev import InputDevice, categorize, ecodes + except ImportError: + return None + + def pedal_reader() -> None: + try: + dev = InputDevice(device_path) + logger.info("Pedal connected: %s", dev.name) + for ev in dev.read_loop(): + if ev.type != ecodes.EV_KEY: + continue + key = categorize(ev) + code = key.keycode + if isinstance(code, (list, tuple)): + code = code[0] + if key.keystate != 1: # only key-down events + continue + try: + on_press(code) + except Exception as cb_err: # pragma: no cover - defensive + logger.warning("Pedal callback error: %s", cb_err) + except (FileNotFoundError, PermissionError): + pass + except Exception as e: + logger.warning("Pedal error: %s", e) + + thread = threading.Thread(target=pedal_reader, daemon=True, name="PedalListener") + thread.start() + return thread diff --git a/src/lerobot/rl/process.py b/src/lerobot/utils/process.py similarity index 100% rename from src/lerobot/rl/process.py rename to src/lerobot/utils/process.py diff --git a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors index dd7d4d0e7..5584b8643 100644 --- a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors +++ b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2b8f8532c7a0b776de5e536b8b54e30b1a0c2e3d5cc25a2d86fe43e40ae5e8c +oid sha256:8a31653c11eccdd4d80fd3f6a351cd54c49b8a48db1f7e9faf38fddd7900a09f size 515400 diff --git a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors index c58bb44bc..3baa80ba7 100644 --- a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors +++ b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:224b5fa4828aa88171b68c036e8919c1eae563e2113f03b6461eadf5bf8525a6 +oid sha256:75bf051698b37dcd7517ec8025a896ab5a0551a6dde5f89d0a3d5d50966e83e6 size 31672 diff --git a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors index 9b6ef7f5d..f3d1ff59a 100644 --- a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors +++ b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:016d2fa8fe5f58017dfd46f4632fdc19dfd751e32a2c7cde2077c6f95546d6bd +oid sha256:88e10930a10041d50f2cf369e6813ac14618d13dad1c21bdde1ac7798611c6ba size 68 diff --git a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors index 5da67a1af..bdc26816f 100644 --- a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors +++ b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eca0d87a699620e4fec7e68539b0be91e4cc933f6bf12032da52c182ab6f38cf +oid sha256:89833a5ccdb7d85c83f717ff8ec68b8e822005cb8803899acaae88c578e2e3ae size 31672 diff --git a/tests/artifacts/policies/save_policy_to_safetensors.py b/tests/artifacts/policies/save_policy_to_safetensors.py index ffb3efd03..158e3e0ef 100644 --- a/tests/artifacts/policies/save_policy_to_safetensors.py +++ b/tests/artifacts/policies/save_policy_to_safetensors.py @@ -52,6 +52,9 @@ def get_policy_stats(ds_repo_id: str, policy_name: str, policy_kwargs: dict): ) batch = next(iter(dataloader)) + for key in batch: + if isinstance(batch[key], torch.Tensor) and batch[key].dtype == torch.uint8: + batch[key] = batch[key].to(dtype=torch.float32) / 255.0 batch = preprocessor(batch) loss, output_dict = policy.forward(batch) @@ -82,6 +85,9 @@ def get_policy_stats(ds_repo_id: str, policy_name: str, policy_kwargs: dict): # indicating padding (those ending with "_is_pad") dataset.reader.delta_indices = None batch = next(iter(dataloader)) + for key in batch: + if isinstance(batch[key], torch.Tensor) and batch[key].dtype == torch.uint8: + batch[key] = batch[key].to(dtype=torch.float32) / 255.0 obs = {} for k in batch: # TODO: regenerate the safetensors diff --git a/tests/datasets/test_datasets.py b/tests/datasets/test_datasets.py index 6d4c41aaa..3e1a17a62 100644 --- a/tests/datasets/test_datasets.py +++ b/tests/datasets/test_datasets.py @@ -454,6 +454,35 @@ def test_tmp_video_deletion(tmp_path, empty_lerobot_dataset_factory): ) +def test_cleanup_interrupted_episode_removes_image_temp_dirs(tmp_path, empty_lerobot_dataset_factory): + """Verify interrupted episode cleanup removes temporary image directories for both image and video features.""" + features = { + "image": {"dtype": "image", "shape": DUMMY_CHW, "names": ["channels", "height", "width"]}, + "video": {"dtype": "video", "shape": DUMMY_HWC, "names": ["height", "width", "channels"]}, + } + ds = empty_lerobot_dataset_factory( + root=tmp_path / "interrupted", features=features, streaming_encoding=False + ) + # Add one frame without saving episode simulating an interruption + ds.add_frame( + { + "image": np.random.rand(*DUMMY_CHW), + "video": np.random.rand(*DUMMY_HWC), + "task": "Dummy task", + } + ) + img_dir = ds.writer._get_image_file_dir(0, "image") + vid_img_dir = ds.writer._get_image_file_dir(0, "video") + # Precondition: both temp dirs exist after add_frame. + assert img_dir.exists() + assert vid_img_dir.exists() + + ds.writer.cleanup_interrupted_episode(episode_index=0) + + assert not img_dir.exists(), "image temp dir leaked after cleanup_interrupted_episode" + assert not vid_img_dir.exists(), "video temp dir leaked after cleanup_interrupted_episode" + + def test_tmp_mixed_deletion(tmp_path, empty_lerobot_dataset_factory): """Verify temporary image directories are removed appropriately when both image and video features are present.""" image_key = "image" diff --git a/tests/datasets/test_lerobot_dataset.py b/tests/datasets/test_lerobot_dataset.py index 49efa84d9..26406dea2 100644 --- a/tests/datasets/test_lerobot_dataset.py +++ b/tests/datasets/test_lerobot_dataset.py @@ -416,6 +416,18 @@ def test_create_initial_counts_zero(tmp_path): assert dataset.num_frames == 0 +def test_create_propagates_video_files_size_in_mb(tmp_path): + """video_files_size_in_mb passed to create() is reflected in the dataset metadata.""" + dataset = LeRobotDataset.create( + repo_id=DUMMY_REPO_ID, + fps=DEFAULT_FPS, + features=SIMPLE_FEATURES, + root=tmp_path / "ds", + video_files_size_in_mb=42.0, + ) + assert dataset.meta.video_files_size_in_mb == 42.0 + + def test_add_frame_works_in_write_mode(tmp_path): """add_frame() succeeds on a dataset created via create().""" dataset = LeRobotDataset.create( diff --git a/tests/envs/test_robotwin.py b/tests/envs/test_robotwin.py new file mode 100644 index 000000000..fcd45adbf --- /dev/null +++ b/tests/envs/test_robotwin.py @@ -0,0 +1,282 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit tests for the RoboTwin 2.0 Gymnasium wrapper. + +These tests mock out the SAPIEN-based RoboTwin runtime (task modules + +YAML config loader) so they run without the full RoboTwin installation +(SAPIEN, CuRobo, mplib, asset downloads, etc.). +""" + +from __future__ import annotations + +from contextlib import contextmanager +from unittest.mock import MagicMock, patch + +import gymnasium as gym +import numpy as np +import pytest + +from lerobot.envs.robotwin import ( + ACTION_DIM, + ROBOTWIN_CAMERA_NAMES, + ROBOTWIN_TASKS, + RoboTwinEnv, + create_robotwin_envs, +) + +# --------------------------------------------------------------------------- +# Fixtures / helpers +# --------------------------------------------------------------------------- + + +def _make_mock_task_env( + height: int = 240, + width: int = 320, + cameras: tuple[str, ...] = ROBOTWIN_CAMERA_NAMES, +) -> MagicMock: + """Return a mock that mimics the RoboTwin task class API. + + RoboTwin's real get_obs returns + {"observation": {cam: {"rgb": img}}, "joint_action": {"vector": np.ndarray}, ...} + so the mock follows the same nested shape. + """ + obs_dict = { + "observation": {cam: {"rgb": np.zeros((height, width, 3), dtype=np.uint8)} for cam in cameras}, + "joint_action": {"vector": np.zeros(ACTION_DIM, dtype=np.float32)}, + "endpose": {}, + } + + mock = MagicMock() + mock.get_obs.return_value = obs_dict + mock.setup_demo.return_value = None + mock.take_action.return_value = None + mock.eval_success = False + mock.check_success.return_value = False + mock.close_env.return_value = None + return mock + + +@contextmanager +def _patch_runtime(mock_task_instance: MagicMock): + """Patch both the task-class loader and the YAML config loader so the + env can construct + reset without a real RoboTwin install.""" + task_cls = MagicMock(return_value=mock_task_instance) + fake_setup = { + "head_camera_h": 240, + "head_camera_w": 320, + "left_embodiment_config": {}, + "right_embodiment_config": {}, + "left_robot_file": "", + "right_robot_file": "", + "dual_arm_embodied": True, + "render_freq": 0, + "task_name": "beat_block_hammer", + "task_config": "demo_clean", + } + with ( + patch("lerobot.envs.robotwin._load_robotwin_task", return_value=task_cls), + patch("lerobot.envs.robotwin._load_robotwin_setup_kwargs", return_value=fake_setup), + ): + yield + + +# --------------------------------------------------------------------------- +# RoboTwinEnv unit tests +# --------------------------------------------------------------------------- + + +class TestRoboTwinEnv: + def test_observation_space_shape(self): + """observation_space should have the configured h×w×3 for every camera.""" + h, w = 240, 320 + env = RoboTwinEnv( + task_name="beat_block_hammer", + observation_height=h, + observation_width=w, + camera_names=["head_camera", "left_camera"], + ) + pixels_space = env.observation_space["pixels"] + assert pixels_space["head_camera"].shape == (h, w, 3) + assert pixels_space["left_camera"].shape == (h, w, 3) + assert "right_camera" not in pixels_space + + def test_action_space(self): + env = RoboTwinEnv(task_name="beat_block_hammer") + assert env.action_space.shape == (ACTION_DIM,) + assert env.action_space.dtype == np.float32 + + def test_reset_returns_correct_obs_keys(self): + mock_task = _make_mock_task_env() + env = RoboTwinEnv(task_name="beat_block_hammer") + with _patch_runtime(mock_task): + obs, info = env.reset() + + assert "pixels" in obs + for cam in ROBOTWIN_CAMERA_NAMES: + assert cam in obs["pixels"], f"Missing camera '{cam}' in obs" + assert "agent_pos" in obs + assert obs["agent_pos"].shape == (ACTION_DIM,) + assert info["is_success"] is False + + def test_reset_calls_setup_demo(self): + mock_task = _make_mock_task_env() + env = RoboTwinEnv(task_name="beat_block_hammer") + with _patch_runtime(mock_task): + env.reset(seed=42) + # setup_demo receives the full YAML-derived kwargs plus seed + is_test; + # we only assert the caller-provided bits. + assert mock_task.setup_demo.call_count == 1 + call_kwargs = mock_task.setup_demo.call_args.kwargs + assert call_kwargs["seed"] == 42 + assert call_kwargs["is_test"] is True + + def test_step_returns_correct_types(self): + mock_task = _make_mock_task_env() + env = RoboTwinEnv(task_name="beat_block_hammer") + action = np.zeros(ACTION_DIM, dtype=np.float32) + with _patch_runtime(mock_task): + env.reset() + obs, reward, terminated, truncated, info = env.step(action) + + assert isinstance(obs, dict) + assert isinstance(reward, float) + assert isinstance(terminated, bool) + assert isinstance(truncated, bool) + assert isinstance(info, dict) + + def test_step_wrong_action_shape_raises(self): + mock_task = _make_mock_task_env() + env = RoboTwinEnv(task_name="beat_block_hammer") + bad_action = np.zeros(7, dtype=np.float32) # wrong dim + with _patch_runtime(mock_task): + env.reset() + with pytest.raises(ValueError, match="Expected 1-D action"): + env.step(bad_action) + + def test_success_terminates_episode(self): + mock_task = _make_mock_task_env() + mock_task.check_success.return_value = True + env = RoboTwinEnv(task_name="beat_block_hammer") + action = np.zeros(ACTION_DIM, dtype=np.float32) + with _patch_runtime(mock_task): + env.reset() + _, _, terminated, _, info = env.step(action) + assert terminated is True + assert info["is_success"] is True + + def test_truncation_after_episode_length(self): + mock_task = _make_mock_task_env() + env = RoboTwinEnv(task_name="beat_block_hammer", episode_length=2) + action = np.zeros(ACTION_DIM, dtype=np.float32) + with _patch_runtime(mock_task): + env.reset() + env.step(action) # step 1 + _, _, _, truncated, _ = env.step(action) # step 2 → truncated + assert truncated is True + + def test_close_calls_close_env(self): + mock_task = _make_mock_task_env() + env = RoboTwinEnv(task_name="beat_block_hammer") + with _patch_runtime(mock_task): + env.reset() + env.close() + mock_task.close_env.assert_called_once() + + def test_black_frame_for_missing_camera(self): + """If a camera key is absent from get_obs(), a black frame is returned.""" + # Mock exposes only head_camera; we ask for both head_camera + left_camera. + mock_task = _make_mock_task_env(height=10, width=10, cameras=("head_camera",)) + env = RoboTwinEnv( + task_name="beat_block_hammer", + camera_names=["head_camera", "left_camera"], + observation_height=10, + observation_width=10, + ) + with _patch_runtime(mock_task): + obs, _ = env.reset() + assert obs["pixels"]["left_camera"].shape == (10, 10, 3) + assert obs["pixels"]["left_camera"].sum() == 0 + + def test_task_and_task_description_attributes(self): + env = RoboTwinEnv(task_name="beat_block_hammer") + assert env.task == "beat_block_hammer" + assert isinstance(env.task_description, str) + + def test_deferred_init_env_is_none_before_reset(self): + env = RoboTwinEnv(task_name="beat_block_hammer") + assert env._env is None # noqa: SLF001 (testing internal state) + + +# --------------------------------------------------------------------------- +# create_robotwin_envs tests +# --------------------------------------------------------------------------- + + +class TestCreateRoboTwinEnvs: + def test_returns_correct_structure(self): + mock_task = _make_mock_task_env() + with _patch_runtime(mock_task): + envs = create_robotwin_envs( + task="beat_block_hammer", + n_envs=1, + env_cls=gym.vector.SyncVectorEnv, + ) + assert "beat_block_hammer" in envs + assert 0 in envs["beat_block_hammer"] + assert isinstance(envs["beat_block_hammer"][0], gym.vector.SyncVectorEnv) + + def test_multi_task(self): + mock_task = _make_mock_task_env() + with _patch_runtime(mock_task): + envs = create_robotwin_envs( + task="beat_block_hammer,click_bell", + n_envs=1, + env_cls=gym.vector.SyncVectorEnv, + ) + assert set(envs.keys()) == {"beat_block_hammer", "click_bell"} + + def test_unknown_task_raises(self): + with pytest.raises(ValueError, match="Unknown RoboTwin tasks"): + create_robotwin_envs( + task="not_a_real_task", + n_envs=1, + env_cls=gym.vector.SyncVectorEnv, + ) + + def test_invalid_n_envs_raises(self): + with pytest.raises(ValueError, match="n_envs must be a positive int"): + create_robotwin_envs( + task="beat_block_hammer", + n_envs=0, + env_cls=gym.vector.SyncVectorEnv, + ) + + +# --------------------------------------------------------------------------- +# ROBOTWIN_TASKS list +# --------------------------------------------------------------------------- + + +def test_task_list_not_empty(): + assert len(ROBOTWIN_TASKS) >= 50 + + +def test_all_tasks_are_strings(): + assert all(isinstance(t, str) and t for t in ROBOTWIN_TASKS) + + +def test_no_duplicate_tasks(): + assert len(ROBOTWIN_TASKS) == len(set(ROBOTWIN_TASKS)) diff --git a/tests/policies/rtc/test_action_interpolator.py b/tests/policies/rtc/test_action_interpolator.py index 9a4276df1..3eb239d7e 100644 --- a/tests/policies/rtc/test_action_interpolator.py +++ b/tests/policies/rtc/test_action_interpolator.py @@ -17,9 +17,9 @@ import pytest import torch -from lerobot.policies.rtc.action_interpolator import ActionInterpolator from lerobot.policies.rtc.action_queue import ActionQueue from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.utils.action_interpolator import ActionInterpolator # ====================== Fixtures ====================== diff --git a/tests/policies/rtc/test_configuration_rtc.py b/tests/policies/rtc/test_configuration_rtc.py index bb4550eaa..40d171c0c 100644 --- a/tests/policies/rtc/test_configuration_rtc.py +++ b/tests/policies/rtc/test_configuration_rtc.py @@ -26,7 +26,7 @@ def test_rtc_config_default_initialization(): """Test RTCConfig initializes with default values.""" config = RTCConfig() - assert config.enabled is False + assert config.enabled is True assert config.prefix_attention_schedule == RTCAttentionSchedule.LINEAR assert config.max_guidance_weight == 10.0 assert config.execution_horizon == 10 diff --git a/tests/policies/rtc/test_rtc_relative_actions.py b/tests/policies/rtc/test_rtc_relative_actions.py index fa888ec05..66667ea56 100644 --- a/tests/policies/rtc/test_rtc_relative_actions.py +++ b/tests/policies/rtc/test_rtc_relative_actions.py @@ -22,7 +22,7 @@ from lerobot.configs.types import ( PolicyFeature, RTCAttentionSchedule, ) -from lerobot.processor import TransitionKey, batch_to_transition +from lerobot.processor import TransitionKey, batch_to_transition, create_transition from lerobot.processor.normalize_processor import NormalizerProcessorStep, UnnormalizerProcessorStep from lerobot.processor.relative_action_processor import ( AbsoluteActionsProcessorStep, @@ -52,6 +52,9 @@ _rtc_debug_mod = _import_rtc_module("lerobot.policies.rtc.debug_tracker", "debug _rtc_mod = _import_rtc_module("lerobot.policies.rtc.modeling_rtc", "modeling_rtc.py") RTCProcessor = _rtc_mod.RTCProcessor +_rtc_relative_mod = _import_rtc_module("lerobot.policies.rtc.relative", "relative.py") +reanchor_relative_rtc_prefix = _rtc_relative_mod.reanchor_relative_rtc_prefix + ACTION_DIM = 6 CHUNK_SIZE = 50 EXECUTION_HORIZON = 10 @@ -187,7 +190,7 @@ class TestRTCDenoiseWithRelativeLeftovers: class TestFullPipelineRelativeRTC: - """End-to-end test of the RTC + relative actions pipeline matching eval_with_real_robot.py flow.""" + """End-to-end test of the RTC + relative actions pipeline matching lerobot-rollout flow.""" def test_preprocessor_caches_state_for_postprocessor(self): """Preprocessor's relative step should cache state so postprocessor can convert back.""" @@ -218,7 +221,9 @@ class TestFullPipelineRelativeRTC: def test_roundtrip_with_identity_normalization(self): """Actions → relative → normalize → [model] → unnormalize → absolute should recover originals. - Using mean=0, std=1 normalization (identity).""" + + Using mean=0, std=1 normalization (identity). + """ relative_step, normalizer, unnormalizer, absolute_step = _make_relative_pipeline() state = torch.randn(1, ACTION_DIM) @@ -240,7 +245,7 @@ class TestFullPipelineRelativeRTC: torch.testing.assert_close(recovered, actions, atol=1e-5, rtol=1e-5) def test_eval_loop_simulation(self): - """Simulate the eval_with_real_robot.py loop with relative actions. + """Simulate the lerobot-rollout loop with relative actions. Iteration 1: No leftovers → model generates relative actions → store for RTC Iteration 2: Use leftovers as RTC guidance → model generates new relative actions @@ -400,13 +405,113 @@ class TestStateRebasingApproximation: assert error_excluded < 1e-6, f"Excluded joint should have zero error, got {error_excluded}" +class TestRTCReanchoringWithStateNormalizer: + """RTC re-anchoring under non-identity OBS_STATE normalization.""" + + @staticmethod + def _build_normalizer_with_state_stats(): + """Build a relative-action preprocessor with non-trivial OBS_STATE stats.""" + features = { + ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(ACTION_DIM,)), + OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(ACTION_DIM,)), + } + norm_map = { + FeatureType.ACTION: NormalizationMode.MEAN_STD, + FeatureType.STATE: NormalizationMode.MEAN_STD, + } + stats = { + ACTION: { + "mean": torch.zeros(ACTION_DIM).numpy(), + "std": (0.5 * torch.ones(ACTION_DIM)).numpy(), + }, + OBS_STATE: { + "mean": (5.0 * torch.ones(ACTION_DIM)).numpy(), + "std": (2.0 * torch.ones(ACTION_DIM)).numpy(), + }, + } + relative_step = RelativeActionsProcessorStep(enabled=True) + normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats) + return relative_step, normalizer + + def test_reanchor_with_raw_state_matches_normalize_of_absolute_minus_state(self): + """Reanchoring with the raw cached state yields ``normalize(prev_actions_absolute - raw_state)``.""" + relative_step, normalizer = self._build_normalizer_with_state_stats() + + raw_state = torch.tensor([[7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]) + relative_step(batch_to_transition({OBS_STATE: raw_state.clone()})) + + prev_actions_absolute = torch.tensor([[2.0, 3.0, 4.0, 5.0, 6.0, 7.0]] * 5) + + result = reanchor_relative_rtc_prefix( + prev_actions_absolute=prev_actions_absolute, + current_state=relative_step.get_cached_state(), + relative_step=relative_step, + normalizer_step=normalizer, + policy_device="cpu", + ) + + expected_relative = to_relative_actions(prev_actions_absolute, raw_state, [True] * ACTION_DIM) + expected = normalizer(create_transition(action=expected_relative))[TransitionKey.ACTION] + torch.testing.assert_close(result, expected, atol=1e-5, rtol=1e-5) + + def test_reanchor_with_normalized_state_produces_wrong_result(self): + """Reanchoring with raw vs. normalized state produces meaningfully different outputs.""" + relative_step, normalizer = self._build_normalizer_with_state_stats() + + raw_state = torch.tensor([[7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]) + relative_step(batch_to_transition({OBS_STATE: raw_state.clone()})) + + normalized_obs = normalizer(batch_to_transition({OBS_STATE: raw_state.clone()})) + normalized_state = normalized_obs[TransitionKey.OBSERVATION][OBS_STATE] + assert not torch.allclose(normalized_state, raw_state) + + prev_actions_absolute = torch.tensor([[2.0, 3.0, 4.0, 5.0, 6.0, 7.0]] * 5) + + result_raw = reanchor_relative_rtc_prefix( + prev_actions_absolute=prev_actions_absolute, + current_state=raw_state, + relative_step=relative_step, + normalizer_step=normalizer, + policy_device="cpu", + ) + result_normalized = reanchor_relative_rtc_prefix( + prev_actions_absolute=prev_actions_absolute, + current_state=normalized_state, + relative_step=relative_step, + normalizer_step=normalizer, + policy_device="cpu", + ) + + max_abs_diff = (result_raw - result_normalized).abs().max() + assert max_abs_diff > 0.5, ( + f"Raw and normalized state produced near-identical outputs (max diff {max_abs_diff:.4f}); " + "OBS_STATE stats are too close to identity to be sensitive." + ) + + def test_engine_pipeline_cached_state_is_raw_after_full_preprocess(self): + """``get_cached_state()`` returns raw OBS_STATE after the full preprocessor pipeline runs.""" + relative_step, normalizer = self._build_normalizer_with_state_stats() + + raw_state = torch.tensor([[7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]) + + transition = batch_to_transition({OBS_STATE: raw_state.clone()}) + transition = relative_step(transition) + preprocessed = normalizer(transition) + + cached = relative_step.get_cached_state() + torch.testing.assert_close(cached, raw_state, atol=1e-6, rtol=1e-6) + + post_normalize_state = preprocessed[TransitionKey.OBSERVATION][OBS_STATE] + assert not torch.allclose(cached, post_normalize_state, atol=1e-3) + + def _detect_relative_actions(preprocessor) -> bool: - """Mirror of the helper in eval_with_real_robot.py for testing without importing it.""" + """Mirror of the helper in lerobot-rollout for testing without importing it.""" return any(isinstance(step, RelativeActionsProcessorStep) and step.enabled for step in preprocessor.steps) class TestDetectRelativeActions: - """Test the _detect_relative_actions helper logic used by eval_with_real_robot.py.""" + """Test the _detect_relative_actions helper logic used by lerobot-rollout.""" def test_detects_enabled_relative_step(self): class FakePipeline: diff --git a/tests/policies/test_policies.py b/tests/policies/test_policies.py index 2d50446fe..e9388b3ed 100644 --- a/tests/policies/test_policies.py +++ b/tests/policies/test_policies.py @@ -196,6 +196,8 @@ def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs): for key in batch: if isinstance(batch[key], torch.Tensor): + if batch[key].dtype == torch.uint8: + batch[key] = batch[key].to(dtype=torch.float32) / 255.0 batch[key] = batch[key].to(DEVICE, non_blocking=True) # Test updating the policy (and test that it does not mutate the batch) diff --git a/tests/teleoperators/test_reachy2_teleoperator.py b/tests/teleoperators/test_reachy2_teleoperator.py index dd8c5904c..f0274cb75 100644 --- a/tests/teleoperators/test_reachy2_teleoperator.py +++ b/tests/teleoperators/test_reachy2_teleoperator.py @@ -18,6 +18,11 @@ from unittest.mock import MagicMock, patch import pytest +from lerobot.utils.import_utils import is_package_available + +if not is_package_available("reachy2_sdk"): + pytest.skip("reachy2_sdk not available", allow_module_level=True) + from lerobot.teleoperators.reachy2_teleoperator import ( REACHY2_ANTENNAS_JOINTS, REACHY2_L_ARM_JOINTS, diff --git a/tests/test_cli_peft.py b/tests/test_cli_peft.py index 5d653ee6b..82f41affa 100644 --- a/tests/test_cli_peft.py +++ b/tests/test_cli_peft.py @@ -24,10 +24,6 @@ def lerobot_train(args): return run_command(cmd="lerobot-train", module="lerobot_train", args=args) -def lerobot_record(args): - return run_command(cmd="lerobot-record", module="lerobot_record", args=args) - - def resolve_model_id_for_peft_training(policy_type): """PEFT training needs pretrained models, this finds the pretrained model of a policy type for PEFT training.""" if policy_type == "smolvla": @@ -155,81 +151,3 @@ def test_peft_training_params_are_fewer(policy_type, tmp_path): f"--output_dir={output_dir}", ] ) - - -class DummyRobot: - name = "dummy" - cameras = [] - action_features = {"foo": 1.0, "bar": 2.0} - observation_features = {"obs1": 1.0, "obs2": 2.0} - is_connected = True - - def connect(self, *args): - pass - - def disconnect(self): - pass - - -def dummy_make_robot_from_config(*args, **kwargs): - return DummyRobot() - - -@pytest.mark.parametrize("policy_type", ["smolvla"]) -@skip_if_package_missing("peft") -def test_peft_record_loads_policy(policy_type, tmp_path): - """Train a policy with PEFT and attempt to load it with `lerobot-record`.""" - from peft import PeftModel - - output_dir = tmp_path / f"output_{policy_type}" - model_id = resolve_model_id_for_peft_training(policy_type) - - lerobot_train( - [ - f"--policy.path={model_id}", - "--policy.push_to_hub=false", - "--policy.input_features=null", - "--policy.output_features=null", - "--peft.method=LORA", - "--dataset.repo_id=lerobot/pusht", - "--dataset.episodes=[0, 1]", - "--steps=1", - f"--output_dir={output_dir}", - ] - ) - - policy_dir = output_dir / "checkpoints" / "last" / "pretrained_model" - dataset_dir = tmp_path / "eval_pusht" - single_task = "move the table" - loaded_policy = None - - def dummy_record_loop(*args, **kwargs): - nonlocal loaded_policy - - if "dataset" not in kwargs: - return - - dataset = kwargs["dataset"] - dataset.add_frame({"task": single_task}) - loaded_policy = kwargs["policy"] - - with ( - patch("lerobot.scripts.lerobot_record.make_robot_from_config", dummy_make_robot_from_config), - # disable record loop since we're only interested in successful loading of the policy. - patch("lerobot.scripts.lerobot_record.record_loop", dummy_record_loop), - # disable speech output - patch("lerobot.utils.utils.say"), - ): - lerobot_record( - [ - f"--policy.path={policy_dir}", - "--robot.type=so101_follower", - "--robot.port=/dev/null", - "--dataset.repo_id=lerobot/eval_pusht", - f'--dataset.single_task="{single_task}"', - f"--dataset.root={dataset_dir}", - "--dataset.push_to_hub=false", - ] - ) - - assert isinstance(loaded_policy, PeftModel) diff --git a/tests/test_control_robot.py b/tests/test_control_robot.py index 28e91a149..dd10c0c1c 100644 --- a/tests/test_control_robot.py +++ b/tests/test_control_robot.py @@ -21,8 +21,9 @@ import pytest pytest.importorskip("datasets", reason="datasets is required (install lerobot[dataset])") pytest.importorskip("deepdiff", reason="deepdiff is required (install lerobot[hardware])") +from lerobot.configs.dataset import DatasetRecordConfig from lerobot.scripts.lerobot_calibrate import CalibrateConfig, calibrate -from lerobot.scripts.lerobot_record import DatasetRecordConfig, RecordConfig, record +from lerobot.scripts.lerobot_record import RecordConfig, record from lerobot.scripts.lerobot_replay import DatasetReplayConfig, ReplayConfig, replay from lerobot.scripts.lerobot_teleoperate import TeleoperateConfig, teleoperate from tests.fixtures.constants import DUMMY_REPO_ID diff --git a/tests/test_robomme_env.py b/tests/test_robomme_env.py new file mode 100644 index 000000000..20646430a --- /dev/null +++ b/tests/test_robomme_env.py @@ -0,0 +1,232 @@ +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit tests for the RoboMME env wrapper and config. + +RoboMME requires Linux + ManiSkill (Vulkan/SAPIEN), so tests that touch the +env wrapper mock the ``robomme`` package. Tests that only exercise the +dataclass config run without any mocking. +""" + +from __future__ import annotations + +import sys +from types import ModuleType +from unittest.mock import MagicMock + +import numpy as np + + +def _install_robomme_stub(): + """Register a minimal stub for the ``robomme`` package on sys.modules.""" + stub = ModuleType("robomme") + wrapper_stub = ModuleType("robomme.env_record_wrapper") + + class FakeBuilder: + def __init__(self, **kwargs): + pass + + def make_env_for_episode(self, episode_idx: int, max_steps: int): + env = MagicMock() + obs = { + "front_rgb_list": [np.zeros((256, 256, 3), dtype=np.uint8)], + "wrist_rgb_list": [np.zeros((256, 256, 3), dtype=np.uint8)], + "joint_state_list": [np.zeros(7, dtype=np.float32)], + "gripper_state_list": [np.zeros(2, dtype=np.float32)], + } + env.reset.return_value = (obs, {"status": "ongoing", "task_goal": "pick the cube"}) + env.step.return_value = (obs, 0.0, False, False, {"status": "ongoing", "task_goal": ""}) + return env + + wrapper_stub.BenchmarkEnvBuilder = FakeBuilder + stub.env_record_wrapper = wrapper_stub + sys.modules["robomme"] = stub + sys.modules["robomme.env_record_wrapper"] = wrapper_stub + + +def _uninstall_robomme_stub(): + sys.modules.pop("robomme", None) + sys.modules.pop("robomme.env_record_wrapper", None) + + +# --------------------------------------------------------------------------- +# Config tests (no sim required) +# --------------------------------------------------------------------------- + + +def test_robomme_env_config_defaults(): + from lerobot.envs.configs import RoboMMEEnv + + cfg = RoboMMEEnv() + assert cfg.task == "PickXtimes" + assert cfg.fps == 10 + assert cfg.episode_length == 300 + assert cfg.action_space == "joint_angle" + assert cfg.dataset_split == "test" + assert cfg.task_ids is None + + +def test_robomme_env_config_type(): + from lerobot.envs.configs import RoboMMEEnv + + cfg = RoboMMEEnv() + assert cfg.type == "robomme" + + +def test_robomme_features_map(): + from lerobot.envs.configs import RoboMMEEnv + from lerobot.utils.constants import ACTION, OBS_IMAGES, OBS_STATE + + cfg = RoboMMEEnv() + assert cfg.features_map[ACTION] == ACTION + assert cfg.features_map["pixels/image"] == f"{OBS_IMAGES}.image" + assert cfg.features_map["pixels/wrist_image"] == f"{OBS_IMAGES}.wrist_image" + assert cfg.features_map["agent_pos"] == OBS_STATE + + +def test_robomme_features_action_dim_joint_angle(): + from lerobot.envs.configs import RoboMMEEnv + from lerobot.utils.constants import ACTION + + cfg = RoboMMEEnv(action_space="joint_angle") + assert cfg.features[ACTION].shape == (8,) + + +def test_robomme_features_action_dim_ee_pose(): + """`ee_pose` uses a 7-D action; __post_init__ sets the correct shape.""" + from lerobot.envs.configs import RoboMMEEnv + from lerobot.utils.constants import ACTION + + cfg = RoboMMEEnv(action_space="ee_pose") + assert cfg.features[ACTION].shape == (7,) + + +# --------------------------------------------------------------------------- +# Obs conversion (pure Python, no sim) +# --------------------------------------------------------------------------- + + +def test_convert_obs_list_format(): + """_convert_obs takes the last element from list-format obs fields and + emits a nested ``pixels`` dict (image, wrist_image) plus ``agent_pos``. + + The nested layout is required so ``preprocess_observation()`` in + ``envs/utils.py`` maps each camera to ``observation.images.``. + """ + _install_robomme_stub() + try: + from lerobot.envs.robomme import RoboMMEGymEnv + + env = RoboMMEGymEnv.__new__(RoboMMEGymEnv) + + front = np.full((256, 256, 3), 42, dtype=np.uint8) + wrist = np.full((256, 256, 3), 7, dtype=np.uint8) + joints = np.arange(7, dtype=np.float32) + gripper = np.array([0.5, 0.5], dtype=np.float32) + + obs_raw = { + "front_rgb_list": [np.zeros_like(front), front], + "wrist_rgb_list": [np.zeros_like(wrist), wrist], + "joint_state_list": [np.zeros(7, dtype=np.float32), joints], + "gripper_state_list": [np.zeros(2, dtype=np.float32), gripper], + } + + result = env._convert_obs(obs_raw) + np.testing.assert_array_equal(result["pixels"]["image"], front) + np.testing.assert_array_equal(result["pixels"]["wrist_image"], wrist) + assert result["agent_pos"].shape == (8,) + np.testing.assert_array_almost_equal(result["agent_pos"][:7], joints) + assert result["agent_pos"][7] == gripper[0] + finally: + _uninstall_robomme_stub() + + +def test_convert_obs_array_format(): + """_convert_obs also handles non-list (direct array) obs.""" + _install_robomme_stub() + try: + from lerobot.envs.robomme import RoboMMEGymEnv + + env = RoboMMEGymEnv.__new__(RoboMMEGymEnv) + + front = np.zeros((256, 256, 3), dtype=np.uint8) + obs_raw = { + "front_rgb_list": front, + "wrist_rgb_list": front, + "joint_state_list": np.zeros(7, dtype=np.float32), + "gripper_state_list": np.zeros(2, dtype=np.float32), + } + result = env._convert_obs(obs_raw) + assert result["pixels"]["image"].shape == (256, 256, 3) + assert result["pixels"]["wrist_image"].shape == (256, 256, 3) + assert result["agent_pos"].shape == (8,) + finally: + _uninstall_robomme_stub() + + +# --------------------------------------------------------------------------- +# create_robomme_envs (mocked sim) +# --------------------------------------------------------------------------- + + +def test_create_robomme_envs_returns_correct_structure(): + """Single task -> {task_name: {task_id: VectorEnv}} with one entry per task_id.""" + _install_robomme_stub() + try: + from lerobot.envs.robomme import create_robomme_envs + + env_cls = MagicMock(return_value=MagicMock()) + result = create_robomme_envs( + task="PickXtimes", + n_envs=1, + task_ids=[0, 1], + env_cls=env_cls, + ) + + assert "PickXtimes" in result + assert 0 in result["PickXtimes"] + assert 1 in result["PickXtimes"] + assert env_cls.call_count == 2 + finally: + _uninstall_robomme_stub() + + +def test_create_robomme_envs_multi_task(): + """Comma-separated task list produces one suite per task.""" + _install_robomme_stub() + try: + from lerobot.envs.robomme import create_robomme_envs + + env_cls = MagicMock(return_value=MagicMock()) + result = create_robomme_envs( + task="PickXtimes,BinFill,StopCube", + n_envs=1, + env_cls=env_cls, + ) + + assert set(result.keys()) == {"PickXtimes", "BinFill", "StopCube"} + finally: + _uninstall_robomme_stub() + + +def test_create_robomme_envs_raises_on_invalid_env_cls(): + _install_robomme_stub() + try: + import pytest + + from lerobot.envs.robomme import create_robomme_envs + + with pytest.raises(ValueError, match="env_cls must be a callable"): + create_robomme_envs(task="PickXtimes", n_envs=1, env_cls=None) + finally: + _uninstall_robomme_stub() diff --git a/tests/test_rollout.py b/tests/test_rollout.py new file mode 100644 index 000000000..5a1ec4703 --- /dev/null +++ b/tests/test_rollout.py @@ -0,0 +1,345 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Minimal tests for the rollout module's public API.""" + +from __future__ import annotations + +import dataclasses +from unittest.mock import MagicMock + +import pytest +import torch + +pytest.importorskip("datasets", reason="datasets is required (install lerobot[dataset])") + +# --------------------------------------------------------------------------- +# Import smoke tests +# --------------------------------------------------------------------------- + + +def test_rollout_top_level_imports(): + import lerobot.rollout + + for name in lerobot.rollout.__all__: + assert hasattr(lerobot.rollout, name), f"Missing export: {name}" + + +def test_inference_submodule_imports(): + import lerobot.rollout.inference + + for name in lerobot.rollout.inference.__all__: + assert hasattr(lerobot.rollout.inference, name), f"Missing export: {name}" + + +def test_strategies_submodule_imports(): + import lerobot.rollout.strategies + + for name in lerobot.rollout.strategies.__all__: + assert hasattr(lerobot.rollout.strategies, name), f"Missing export: {name}" + + +# --------------------------------------------------------------------------- +# Config tests +# --------------------------------------------------------------------------- + + +def test_strategy_config_types(): + from lerobot.rollout import ( + BaseStrategyConfig, + DAggerStrategyConfig, + HighlightStrategyConfig, + SentryStrategyConfig, + ) + + assert BaseStrategyConfig().type == "base" + assert SentryStrategyConfig().type == "sentry" + assert HighlightStrategyConfig().type == "highlight" + assert DAggerStrategyConfig().type == "dagger" + + +def test_dagger_config_invalid_input_device(): + from lerobot.rollout import DAggerStrategyConfig + + with pytest.raises(ValueError, match="input_device must be 'keyboard' or 'pedal'"): + DAggerStrategyConfig(input_device="joystick") + + +def test_dagger_config_defaults(): + from lerobot.rollout import DAggerStrategyConfig + + cfg = DAggerStrategyConfig() + assert cfg.num_episodes is None + assert cfg.record_autonomous is False + assert cfg.input_device == "keyboard" + + +def test_inference_config_types(): + from lerobot.rollout import RTCInferenceConfig, SyncInferenceConfig + + assert SyncInferenceConfig().type == "sync" + + rtc = RTCInferenceConfig() + assert rtc.type == "rtc" + assert rtc.queue_threshold == 30 + assert rtc.rtc is not None + + +def test_sentry_config_defaults(): + from lerobot.rollout import SentryStrategyConfig + + cfg = SentryStrategyConfig() + assert cfg.upload_every_n_episodes == 5 + assert cfg.target_video_file_size_mb is None + + +# --------------------------------------------------------------------------- +# RolloutRingBuffer +# --------------------------------------------------------------------------- + + +def test_ring_buffer_append_and_eviction(): + from lerobot.rollout.ring_buffer import RolloutRingBuffer + + buf = RolloutRingBuffer(max_seconds=0.5, max_memory_mb=100.0, fps=10.0) + # max_frames = 5 + for i in range(8): + buf.append({"val": i}) + assert len(buf) == 5 + + +def test_ring_buffer_drain(): + from lerobot.rollout.ring_buffer import RolloutRingBuffer + + buf = RolloutRingBuffer(max_seconds=1.0, max_memory_mb=100.0, fps=10.0) + for i in range(3): + buf.append({"val": i}) + frames = buf.drain() + assert len(frames) == 3 + assert len(buf) == 0 + assert buf.estimated_bytes == 0 + + +def test_ring_buffer_clear(): + from lerobot.rollout.ring_buffer import RolloutRingBuffer + + buf = RolloutRingBuffer(max_seconds=1.0, max_memory_mb=100.0, fps=10.0) + buf.append({"val": 1}) + buf.clear() + assert len(buf) == 0 + assert buf.estimated_bytes == 0 + + +def test_ring_buffer_tensor_bytes(): + from lerobot.rollout.ring_buffer import RolloutRingBuffer + + buf = RolloutRingBuffer(max_seconds=1.0, max_memory_mb=100.0, fps=10.0) + t = torch.zeros(100, dtype=torch.float32) # 400 bytes + buf.append({"tensor": t}) + assert buf.estimated_bytes >= 400 + + +# --------------------------------------------------------------------------- +# ThreadSafeRobot +# --------------------------------------------------------------------------- + + +def test_thread_safe_robot_delegates(): + from lerobot.rollout.robot_wrapper import ThreadSafeRobot + from tests.mocks.mock_robot import MockRobot, MockRobotConfig + + robot = MockRobot(MockRobotConfig(n_motors=3)) + robot.connect() + wrapper = ThreadSafeRobot(robot) + + obs = wrapper.get_observation() + assert "motor_1.pos" in obs + assert "motor_2.pos" in obs + assert "motor_3.pos" in obs + + action = {"motor_1.pos": 0.0, "motor_2.pos": 1.0, "motor_3.pos": 2.0} + result = wrapper.send_action(action) + assert result == action + + robot.disconnect() + + +def test_thread_safe_robot_properties(): + from lerobot.rollout.robot_wrapper import ThreadSafeRobot + from tests.mocks.mock_robot import MockRobot, MockRobotConfig + + robot = MockRobot(MockRobotConfig(n_motors=3)) + robot.connect() + wrapper = ThreadSafeRobot(robot) + + assert wrapper.name == "mock_robot" + assert "motor_1.pos" in wrapper.observation_features + assert "motor_1.pos" in wrapper.action_features + assert wrapper.is_connected is True + assert wrapper.inner is robot + + robot.disconnect() + + +# --------------------------------------------------------------------------- +# Strategy factory +# --------------------------------------------------------------------------- + + +def test_create_strategy_dispatches(): + from lerobot.rollout import ( + BaseStrategy, + BaseStrategyConfig, + DAggerStrategy, + DAggerStrategyConfig, + SentryStrategy, + SentryStrategyConfig, + create_strategy, + ) + + assert isinstance(create_strategy(BaseStrategyConfig()), BaseStrategy) + assert isinstance(create_strategy(SentryStrategyConfig()), SentryStrategy) + assert isinstance(create_strategy(DAggerStrategyConfig()), DAggerStrategy) + + +def test_create_strategy_unknown_raises(): + from lerobot.rollout import create_strategy + + cfg = MagicMock() + cfg.type = "bogus" + with pytest.raises(ValueError, match="Unknown strategy type"): + create_strategy(cfg) + + +# --------------------------------------------------------------------------- +# Inference factory +# --------------------------------------------------------------------------- + + +def test_create_inference_engine_sync(): + from lerobot.rollout import SyncInferenceConfig, SyncInferenceEngine, create_inference_engine + + engine = create_inference_engine( + SyncInferenceConfig(), + policy=MagicMock(), + preprocessor=MagicMock(), + postprocessor=MagicMock(), + robot_wrapper=MagicMock(robot_type="mock"), + hw_features={}, + dataset_features={}, + ordered_action_keys=["k"], + task="test", + fps=30.0, + device="cpu", + ) + assert isinstance(engine, SyncInferenceEngine) + + +# --------------------------------------------------------------------------- +# Pure functions +# --------------------------------------------------------------------------- + + +def test_estimate_max_episode_seconds_no_video(): + from lerobot.rollout.strategies import estimate_max_episode_seconds + + assert estimate_max_episode_seconds({}, fps=30.0) == 300.0 + + +def test_estimate_max_episode_seconds_with_video(): + from lerobot.rollout.strategies import estimate_max_episode_seconds + + features = {"cam": {"dtype": "video", "shape": (480, 640, 3)}} + result = estimate_max_episode_seconds(features, fps=30.0) + assert result > 0 + # With a real camera, duration should differ from the fallback + assert result != 300.0 + + +def test_safe_push_to_hub(): + from lerobot.rollout.strategies import safe_push_to_hub + + ds = MagicMock() + ds.num_episodes = 0 + assert safe_push_to_hub(ds) is False + ds.push_to_hub.assert_not_called() + + ds.num_episodes = 5 + assert safe_push_to_hub(ds, tags=["test"]) is True + ds.push_to_hub.assert_called_once_with(tags=["test"], private=False) + + +# --------------------------------------------------------------------------- +# DAgger state machine +# --------------------------------------------------------------------------- + + +def test_dagger_full_transition_cycle(): + from lerobot.rollout.strategies import DAggerEvents, DAggerPhase + + events = DAggerEvents() + assert events.phase == DAggerPhase.AUTONOMOUS + + # AUTONOMOUS -> PAUSED + events.request_transition("pause_resume") + old, new = events.consume_transition() + assert (old, new) == (DAggerPhase.AUTONOMOUS, DAggerPhase.PAUSED) + + # PAUSED -> CORRECTING + events.request_transition("correction") + old, new = events.consume_transition() + assert (old, new) == (DAggerPhase.PAUSED, DAggerPhase.CORRECTING) + + # CORRECTING -> PAUSED + events.request_transition("correction") + old, new = events.consume_transition() + assert (old, new) == (DAggerPhase.CORRECTING, DAggerPhase.PAUSED) + + # PAUSED -> AUTONOMOUS + events.request_transition("pause_resume") + old, new = events.consume_transition() + assert (old, new) == (DAggerPhase.PAUSED, DAggerPhase.AUTONOMOUS) + + +def test_dagger_invalid_transition_ignored(): + from lerobot.rollout.strategies import DAggerEvents, DAggerPhase + + events = DAggerEvents() + events.request_transition("correction") # Not valid from AUTONOMOUS + assert events.consume_transition() is None + assert events.phase == DAggerPhase.AUTONOMOUS + + +def test_dagger_events_reset(): + from lerobot.rollout.strategies import DAggerEvents, DAggerPhase + + events = DAggerEvents() + events.request_transition("pause_resume") + events.consume_transition() # -> PAUSED + events.upload_requested.set() + events.reset() + assert events.phase == DAggerPhase.AUTONOMOUS + assert not events.upload_requested.is_set() + + +# --------------------------------------------------------------------------- +# Context dataclass +# --------------------------------------------------------------------------- + + +def test_rollout_context_fields(): + from lerobot.rollout import RolloutContext + + field_names = {f.name for f in dataclasses.fields(RolloutContext)} + assert field_names == {"runtime", "hardware", "policy", "processors", "data"} diff --git a/tests/utils/test_process.py b/tests/utils/test_process.py index c19d6677b..1ede0bfeb 100644 --- a/tests/utils/test_process.py +++ b/tests/utils/test_process.py @@ -24,7 +24,7 @@ import pytest pytest.importorskip("datasets", reason="datasets is required (install lerobot[dataset])") -from lerobot.rl.process import ProcessSignalHandler # noqa: E402 +from lerobot.utils.process import ProcessSignalHandler # noqa: E402 # Fixture to reset shutdown_event_counter and original signal handlers before and after each test diff --git a/uv.lock b/uv.lock index a66f044ff..323b9bc9d 100644 --- a/uv.lock +++ b/uv.lock @@ -2,30 +2,39 @@ version = 1 revision = 2 requires-python = ">=3.12" resolution-markers = [ - "python_full_version >= '3.14' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'linux'", + "python_full_version >= '3.15' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", + "python_full_version >= '3.15' and platform_machine == 's390x' and sys_platform == 'linux'", + "python_full_version == '3.14.*' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", "python_full_version == '3.13.*' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", + "python_full_version == '3.14.*' and platform_machine == 's390x' and sys_platform == 'linux'", "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'linux'", "python_full_version < '3.13' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'linux'", - "(python_full_version >= '3.14' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.14' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version >= '3.14' and platform_machine == 'armv7l' and sys_platform == 'linux')", + "(python_full_version >= '3.15' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.15' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version >= '3.15' and platform_machine == 'armv7l' and sys_platform == 'linux')", + "(python_full_version == '3.14.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.14.*' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version == '3.14.*' and platform_machine == 'armv7l' and sys_platform == 'linux')", "(python_full_version == '3.13.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.13.*' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version == '3.13.*' and platform_machine == 'armv7l' and sys_platform == 'linux')", "(python_full_version < '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version < '3.13' and platform_machine == 'armv7l' and sys_platform == 'linux')", - "(python_full_version >= '3.14' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", + "(python_full_version >= '3.15' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.15' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", + "python_full_version >= '3.15' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", + "python_full_version >= '3.15' and platform_machine != 's390x' and sys_platform == 'emscripten'", + "python_full_version >= '3.15' and platform_machine == 's390x' and sys_platform == 'emscripten'", + "(python_full_version == '3.14.*' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.14.*' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", "(python_full_version == '3.13.*' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", + "python_full_version == '3.14.*' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", "(python_full_version < '3.13' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version < '3.13' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", - "python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform == 'emscripten'", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'emscripten'", + "python_full_version == '3.14.*' and platform_machine != 's390x' and sys_platform == 'emscripten'", "python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform == 'emscripten'", + "python_full_version == '3.14.*' and platform_machine == 's390x' and sys_platform == 'emscripten'", "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'emscripten'", "python_full_version < '3.13' and platform_machine != 's390x' and sys_platform == 'emscripten'", "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'emscripten'", - "(python_full_version >= '3.14' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform == 'win32')", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'win32'", + "(python_full_version >= '3.15' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.15' and platform_machine != 's390x' and sys_platform == 'win32')", + "python_full_version >= '3.15' and platform_machine == 's390x' and sys_platform == 'win32'", + "(python_full_version == '3.14.*' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.14.*' and platform_machine != 's390x' and sys_platform == 'win32')", "(python_full_version == '3.13.*' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform == 'win32')", + "python_full_version == '3.14.*' and platform_machine == 's390x' and sys_platform == 'win32'", "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'win32'", "(python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version < '3.13' and platform_machine != 's390x' and sys_platform == 'win32')", "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'win32'", @@ -202,6 +211,71 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/da/42/e921fccf5015463e32a3cf6ee7f980a6ed0f395ceeaa45060b61d86486c2/anyio-4.13.0-py3-none-any.whl", hash = "sha256:08b310f9e24a9594186fd75b4f73f4a4152069e3853f1ed8bfbf58369f4ad708", size = 114353, upload-time = "2026-03-24T12:59:08.246Z" }, ] +[[package]] +name = "appnope" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee", size = 4170, upload-time = "2024-02-06T09:43:11.258Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c", size = 4321, upload-time = "2024-02-06T09:43:09.663Z" }, +] + +[[package]] +name = "argon2-cffi" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argon2-cffi-bindings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/89/ce5af8a7d472a67cc819d5d998aa8c82c5d860608c4db9f46f1162d7dab9/argon2_cffi-25.1.0.tar.gz", hash = "sha256:694ae5cc8a42f4c4e2bf2ca0e64e51e23a040c6a517a85074683d3959e1346c1", size = 45706, upload-time = "2025-06-03T06:55:32.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/d3/a8b22fa575b297cd6e3e3b0155c7e25db170edf1c74783d6a31a2490b8d9/argon2_cffi-25.1.0-py3-none-any.whl", hash = "sha256:fdc8b074db390fccb6eb4a3604ae7231f219aa669a2652e0f20e16ba513d5741", size = 14657, upload-time = "2025-06-03T06:55:30.804Z" }, +] + +[[package]] +name = "argon2-cffi-bindings" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/2d/db8af0df73c1cf454f71b2bbe5e356b8c1f8041c979f505b3d3186e520a9/argon2_cffi_bindings-25.1.0.tar.gz", hash = "sha256:b957f3e6ea4d55d820e40ff76f450952807013d361a65d7f28acc0acbf29229d", size = 1783441, upload-time = "2025-07-30T10:02:05.147Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/97/3c0a35f46e52108d4707c44b95cfe2afcafc50800b5450c197454569b776/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:3d3f05610594151994ca9ccb3c771115bdb4daef161976a266f0dd8aa9996b8f", size = 54393, upload-time = "2025-07-30T10:01:40.97Z" }, + { url = "https://files.pythonhosted.org/packages/9d/f4/98bbd6ee89febd4f212696f13c03ca302b8552e7dbf9c8efa11ea4a388c3/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8b8efee945193e667a396cbc7b4fb7d357297d6234d30a489905d96caabde56b", size = 29328, upload-time = "2025-07-30T10:01:41.916Z" }, + { url = "https://files.pythonhosted.org/packages/43/24/90a01c0ef12ac91a6be05969f29944643bc1e5e461155ae6559befa8f00b/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3c6702abc36bf3ccba3f802b799505def420a1b7039862014a65db3205967f5a", size = 31269, upload-time = "2025-07-30T10:01:42.716Z" }, + { url = "https://files.pythonhosted.org/packages/d4/d3/942aa10782b2697eee7af5e12eeff5ebb325ccfb86dd8abda54174e377e4/argon2_cffi_bindings-25.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a1c70058c6ab1e352304ac7e3b52554daadacd8d453c1752e547c76e9c99ac44", size = 86558, upload-time = "2025-07-30T10:01:43.943Z" }, + { url = "https://files.pythonhosted.org/packages/0d/82/b484f702fec5536e71836fc2dbc8c5267b3f6e78d2d539b4eaa6f0db8bf8/argon2_cffi_bindings-25.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e2fd3bfbff3c5d74fef31a722f729bf93500910db650c925c2d6ef879a7e51cb", size = 92364, upload-time = "2025-07-30T10:01:44.887Z" }, + { url = "https://files.pythonhosted.org/packages/c9/c1/a606ff83b3f1735f3759ad0f2cd9e038a0ad11a3de3b6c673aa41c24bb7b/argon2_cffi_bindings-25.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4f9665de60b1b0e99bcd6be4f17d90339698ce954cfd8d9cf4f91c995165a92", size = 85637, upload-time = "2025-07-30T10:01:46.225Z" }, + { url = "https://files.pythonhosted.org/packages/44/b4/678503f12aceb0262f84fa201f6027ed77d71c5019ae03b399b97caa2f19/argon2_cffi_bindings-25.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ba92837e4a9aa6a508c8d2d7883ed5a8f6c308c89a4790e1e447a220deb79a85", size = 91934, upload-time = "2025-07-30T10:01:47.203Z" }, + { url = "https://files.pythonhosted.org/packages/f0/c7/f36bd08ef9bd9f0a9cff9428406651f5937ce27b6c5b07b92d41f91ae541/argon2_cffi_bindings-25.1.0-cp314-cp314t-win32.whl", hash = "sha256:84a461d4d84ae1295871329b346a97f68eade8c53b6ed9a7ca2d7467f3c8ff6f", size = 28158, upload-time = "2025-07-30T10:01:48.341Z" }, + { url = "https://files.pythonhosted.org/packages/b3/80/0106a7448abb24a2c467bf7d527fe5413b7fdfa4ad6d6a96a43a62ef3988/argon2_cffi_bindings-25.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b55aec3565b65f56455eebc9b9f34130440404f27fe21c3b375bf1ea4d8fbae6", size = 32597, upload-time = "2025-07-30T10:01:49.112Z" }, + { url = "https://files.pythonhosted.org/packages/05/b8/d663c9caea07e9180b2cb662772865230715cbd573ba3b5e81793d580316/argon2_cffi_bindings-25.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:87c33a52407e4c41f3b70a9c2d3f6056d88b10dad7695be708c5021673f55623", size = 28231, upload-time = "2025-07-30T10:01:49.92Z" }, + { url = "https://files.pythonhosted.org/packages/1d/57/96b8b9f93166147826da5f90376e784a10582dd39a393c99bb62cfcf52f0/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:aecba1723ae35330a008418a91ea6cfcedf6d31e5fbaa056a166462ff066d500", size = 54121, upload-time = "2025-07-30T10:01:50.815Z" }, + { url = "https://files.pythonhosted.org/packages/0a/08/a9bebdb2e0e602dde230bdde8021b29f71f7841bd54801bcfd514acb5dcf/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2630b6240b495dfab90aebe159ff784d08ea999aa4b0d17efa734055a07d2f44", size = 29177, upload-time = "2025-07-30T10:01:51.681Z" }, + { url = "https://files.pythonhosted.org/packages/b6/02/d297943bcacf05e4f2a94ab6f462831dc20158614e5d067c35d4e63b9acb/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:7aef0c91e2c0fbca6fc68e7555aa60ef7008a739cbe045541e438373bc54d2b0", size = 31090, upload-time = "2025-07-30T10:01:53.184Z" }, + { url = "https://files.pythonhosted.org/packages/c1/93/44365f3d75053e53893ec6d733e4a5e3147502663554b4d864587c7828a7/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e021e87faa76ae0d413b619fe2b65ab9a037f24c60a1e6cc43457ae20de6dc6", size = 81246, upload-time = "2025-07-30T10:01:54.145Z" }, + { url = "https://files.pythonhosted.org/packages/09/52/94108adfdd6e2ddf58be64f959a0b9c7d4ef2fa71086c38356d22dc501ea/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e924cfc503018a714f94a49a149fdc0b644eaead5d1f089330399134fa028a", size = 87126, upload-time = "2025-07-30T10:01:55.074Z" }, + { url = "https://files.pythonhosted.org/packages/72/70/7a2993a12b0ffa2a9271259b79cc616e2389ed1a4d93842fac5a1f923ffd/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c87b72589133f0346a1cb8d5ecca4b933e3c9b64656c9d175270a000e73b288d", size = 80343, upload-time = "2025-07-30T10:01:56.007Z" }, + { url = "https://files.pythonhosted.org/packages/78/9a/4e5157d893ffc712b74dbd868c7f62365618266982b64accab26bab01edc/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1db89609c06afa1a214a69a462ea741cf735b29a57530478c06eb81dd403de99", size = 86777, upload-time = "2025-07-30T10:01:56.943Z" }, + { url = "https://files.pythonhosted.org/packages/74/cd/15777dfde1c29d96de7f18edf4cc94c385646852e7c7b0320aa91ccca583/argon2_cffi_bindings-25.1.0-cp39-abi3-win32.whl", hash = "sha256:473bcb5f82924b1becbb637b63303ec8d10e84c8d241119419897a26116515d2", size = 27180, upload-time = "2025-07-30T10:01:57.759Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c6/a759ece8f1829d1f162261226fbfd2c6832b3ff7657384045286d2afa384/argon2_cffi_bindings-25.1.0-cp39-abi3-win_amd64.whl", hash = "sha256:a98cd7d17e9f7ce244c0803cad3c23a7d379c301ba618a5fa76a67d116618b98", size = 31715, upload-time = "2025-07-30T10:01:58.56Z" }, + { url = "https://files.pythonhosted.org/packages/42/b9/f8d6fa329ab25128b7e98fd83a3cb34d9db5b059a9847eddb840a0af45dd/argon2_cffi_bindings-25.1.0-cp39-abi3-win_arm64.whl", hash = "sha256:b0fdbcf513833809c882823f98dc2f931cf659d9a1429616ac3adebb49f5db94", size = 27149, upload-time = "2025-07-30T10:01:59.329Z" }, +] + +[[package]] +name = "arrow" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/33/032cdc44182491aa708d06a68b62434140d8c50820a087fac7af37703357/arrow-1.4.0.tar.gz", hash = "sha256:ed0cc050e98001b8779e84d461b0098c4ac597e88704a655582b21d116e526d7", size = 152931, upload-time = "2025-10-18T17:46:46.761Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/c9/d7977eaacb9df673210491da99e6a247e93df98c715fc43fd136ce1d3d33/arrow-1.4.0-py3-none-any.whl", hash = "sha256:749f0769958ebdc79c173ff0b0670d59051a535fa26e8eba02953dc19eb43205", size = 68797, upload-time = "2025-10-18T17:46:45.663Z" }, +] + [[package]] name = "asttokens" version = "3.0.1" @@ -211,6 +285,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/39/e7eaf1799466a4aef85b6a4fe7bd175ad2b1c6345066aa33f1f58d4b18d0/asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a", size = 27047, upload-time = "2025-11-15T16:43:16.109Z" }, ] +[[package]] +name = "async-lru" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/1f/989ecfef8e64109a489fff357450cb73fa73a865a92bd8c272170a6922c2/async_lru-2.3.0.tar.gz", hash = "sha256:89bdb258a0140d7313cf8f4031d816a042202faa61d0ab310a0a538baa1c24b6", size = 16332, upload-time = "2026-03-19T01:04:32.413Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/e2/c2e3abf398f80732e58b03be77bde9022550d221dd8781bf586bd4d97cc1/async_lru-2.3.0-py3-none-any.whl", hash = "sha256:eea27b01841909316f2cc739807acea1c623df2be8c5cfad7583286397bb8315", size = 8403, upload-time = "2026-03-19T01:04:30.883Z" }, +] + [[package]] name = "attrs" version = "26.1.0" @@ -256,6 +339,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b1/c9/15bb4fd7a1f39d70db35af2b9c20a0ae19e4220eb58a8b8446e903b98d72/av-15.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:9a20c5eba3ec49c2f4b281797021923fc68a86aeb66c5cda4fd0252fa8004951", size = 31487337, upload-time = "2025-08-30T04:41:30.591Z" }, ] +[[package]] +name = "babel" +version = "2.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/b2/51899539b6ceeeb420d40ed3cd4b7a40519404f9baf3d4ac99dc413a834b/babel-2.18.0.tar.gz", hash = "sha256:b80b99a14bd085fcacfa15c9165f651fbb3406e66cc603abf11c5750937c992d", size = 9959554, upload-time = "2026-02-01T12:30:56.078Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/f5/21d2de20e8b8b0408f0681956ca2c69f1320a3848ac50e6e7f39c6159675/babel-2.18.0-py3-none-any.whl", hash = "sha256:e2b422b277c2b9a9630c1d7903c2a00d0830c409c59ac8cae9081c92f1aeba35", size = 10196845, upload-time = "2026-02-01T12:30:53.445Z" }, +] + [[package]] name = "bddl" version = "1.0.1" @@ -269,12 +361,42 @@ dependencies = [ sdist = { url = "https://files.pythonhosted.org/packages/5c/37/0211f82891a9f14efcfd2b2096f8d9e4351398ad637fdd1ee59cfc580b0e/bddl-1.0.1.tar.gz", hash = "sha256:1fa4e6e5050b93888ff6fd8455c39bfb29d3864ce06b4c37c0f781f513a2ae26", size = 164809, upload-time = "2022-03-08T01:48:23.564Z" } [[package]] -name = "certifi" -version = "2026.2.25" +name = "beautifulsoup4" +version = "4.14.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/af/2d/7bf41579a8986e348fa033a31cdd0e4121114f6bce2457e8876010b092dd/certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7", size = 155029, upload-time = "2026-02-25T02:54:17.342Z" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/b0/1c6a16426d389813b48d95e26898aff79abbde42ad353958ad95cc8c9b21/beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86", size = 627737, upload-time = "2025-11-30T15:08:26.084Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", size = 153684, upload-time = "2026-02-25T02:54:15.766Z" }, + { url = "https://files.pythonhosted.org/packages/1a/39/47f9197bdd44df24d67ac8893641e16f386c984a0619ef2ee4c51fbbc019/beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb", size = 107721, upload-time = "2025-11-30T15:08:24.087Z" }, +] + +[[package]] +name = "bleach" +version = "6.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/07/18/3c8523962314be6bf4c8989c79ad9531c825210dd13a8669f6b84336e8bd/bleach-6.3.0.tar.gz", hash = "sha256:6f3b91b1c0a02bb9a78b5a454c92506aa0fdf197e1d5e114d2e00c6f64306d22", size = 203533, upload-time = "2025-10-27T17:57:39.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/3a/577b549de0cc09d95f11087ee63c739bba856cd3952697eec4c4bb91350a/bleach-6.3.0-py3-none-any.whl", hash = "sha256:fe10ec77c93ddf3d13a73b035abaac7a9f5e436513864ccdad516693213c65d6", size = 164437, upload-time = "2025-10-27T17:57:37.538Z" }, +] + +[package.optional-dependencies] +css = [ + { name = "tinycss2" }, +] + +[[package]] +name = "certifi" +version = "2026.4.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/25/ee/6caf7a40c36a1220410afe15a1cc64993a1f864871f698c0f93acb72842a/certifi-2026.4.22.tar.gz", hash = "sha256:8d455352a37b71bf76a79caa83a3d6c25afee4a385d632127b6afb3963f1c580", size = 137077, upload-time = "2026-04-22T11:26:11.191Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/30/7cd8fdcdfbc5b869528b079bfb76dcdf6056b1a2097a662e5e8c04f42965/certifi-2026.4.22-py3-none-any.whl", hash = "sha256:3cb2210c8f88ba2318d29b0388d1023c8492ff72ecdde4ebdaddbb13a31b1c4a", size = 135707, upload-time = "2026-04-22T11:26:09.372Z" }, ] [[package]] @@ -418,14 +540,14 @@ wheels = [ [[package]] name = "click" -version = "8.3.2" +version = "8.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/57/75/31212c6bf2503fdf920d87fee5d7a86a2e3bcf444984126f13d8e4016804/click-8.3.2.tar.gz", hash = "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5", size = 302856, upload-time = "2026-04-03T19:14:45.118Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/63/f9e1ea081ce35720d8b92acde70daaedace594dc93b693c869e0d5910718/click-8.3.3.tar.gz", hash = "sha256:398329ad4837b2ff7cbe1dd166a4c0f8900c3ca3a218de04466f38f6497f18a2", size = 328061, upload-time = "2026-04-22T15:11:27.506Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/20/71885d8b97d4f3dde17b1fdb92dbd4908b00541c5a3379787137285f602e/click-8.3.2-py3-none-any.whl", hash = "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d", size = 108379, upload-time = "2026-04-03T19:14:43.505Z" }, + { url = "https://files.pythonhosted.org/packages/ae/44/c1221527f6a71a01ec6fbad7fa78f1d50dfa02217385cf0fa3eec7087d59/click-8.3.3-py3-none-any.whl", hash = "sha256:a2bf429bb3033c89fa4936ffb35d5cb471e3719e1f3c8a7c3fff0b8314305613", size = 110502, upload-time = "2026-04-22T15:11:25.044Z" }, ] [[package]] @@ -674,6 +796,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] +[[package]] +name = "comm" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/13/7d740c5849255756bc17888787313b61fd38a0a8304fc4f073dfc46122aa/comm-0.2.3.tar.gz", hash = "sha256:2dc8048c10962d55d7ad693be1e7045d891b7ce8d999c97963a5e3e99c055971", size = 6319, upload-time = "2025-07-25T14:02:04.452Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl", hash = "sha256:c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417", size = 7294, upload-time = "2025-07-25T14:02:02.896Z" }, +] + [[package]] name = "contourpy" version = "1.3.3" @@ -841,10 +972,10 @@ wheels = [ [[package]] name = "cuda-pathfinder" -version = "1.5.2" +version = "1.5.3" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f2/f9/1b9b60a30fc463c14cdea7a77228131a0ccc89572e8df9cb86c9648271ab/cuda_pathfinder-1.5.2-py3-none-any.whl", hash = "sha256:0c5f160a7756c5b072723cbbd6d861e38917ef956c68150b02f0b6e9271c71fa", size = 49988, upload-time = "2026-04-06T23:01:05.17Z" }, + { url = "https://files.pythonhosted.org/packages/d3/d6/ac63065d33dd700fee7ebd7d287332401b54e31b9346e142f871e1f0b116/cuda_pathfinder-1.5.3-py3-none-any.whl", hash = "sha256:dff021123aedbb4117cc7ec81717bbfe198fb4e8b5f1ee57e0e084fec5c8577d", size = 49991, upload-time = "2026-04-14T20:09:27.037Z" }, ] [[package]] @@ -935,6 +1066,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2b/5f/c52bd1255db763d0cdcb7084d2e90c42119cb229302c56bdf1d0aa78abd2/deepdiff-8.6.2-py3-none-any.whl", hash = "sha256:4d22034a866c3928303a9332c279362f714192d9305bac17c498720d095fd1b4", size = 91979, upload-time = "2026-03-18T17:16:32.171Z" }, ] +[[package]] +name = "defusedxml" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/d5/c66da9b79e5bdb124974bfe172b4daf3c984ebd9c2a06e2b8a4dc7331c72/defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69", size = 75520, upload-time = "2021-03-08T10:59:26.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604, upload-time = "2021-03-08T10:59:24.45Z" }, +] + [[package]] name = "diffusers" version = "0.35.2" @@ -974,13 +1114,12 @@ wheels = [ [[package]] name = "dm-control" -version = "1.0.38" +version = "1.0.40" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "absl-py" }, { name = "dm-env" }, - { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "dm-tree", version = "0.1.10", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "dm-tree" }, { name = "glfw" }, { name = "labmaze" }, { name = "lxml" }, @@ -994,9 +1133,9 @@ dependencies = [ { name = "setuptools" }, { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/7c/d7/ff2c4703c43c3cf14aa092bfb9a5eb740849eee61c25c0d5aed6a57985a1/dm_control-1.0.38.tar.gz", hash = "sha256:f5966799662d8914bb4f181d2be939da2079f0e08e198f901bb54e61d46c0b90", size = 56274133, upload-time = "2026-03-11T08:31:50.853Z" } +sdist = { url = "https://files.pythonhosted.org/packages/78/27/1d3caa7fa7557b70d766f437979636b6a8c99b14f6e8b8f84795cad9f1df/dm_control-1.0.40.tar.gz", hash = "sha256:af5828af47fe50466008d53b141893a05c4e2779169fc8ef469d1828a016266e", size = 56273764, upload-time = "2026-04-25T22:05:39.202Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/0e/2bed3a6d225921e31bf9200a9c4470f7bc3a12b940bfbca22fa7aa9dfa0e/dm_control-1.0.38-py3-none-any.whl", hash = "sha256:ddb71c4b360a503eabf3c62ba151a0d9404cbe22b23561202b90310558690371", size = 56446251, upload-time = "2026-03-11T08:31:45.102Z" }, + { url = "https://files.pythonhosted.org/packages/2d/eb/a762d6f15e6c4faccef8fdcdae50cf8f232800a36b70aef93d38a787bb58/dm_control-1.0.40-py3-none-any.whl", hash = "sha256:cd15b1d95f5b320b7924e518715b8ac132043d575588b7e122f21016f49c7e89", size = 56446428, upload-time = "2026-04-25T22:05:33.561Z" }, ] [[package]] @@ -1005,8 +1144,7 @@ version = "1.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "absl-py" }, - { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "dm-tree", version = "0.1.10", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "dm-tree" }, { name = "numpy" }, ] sdist = { url = "https://files.pythonhosted.org/packages/62/c9/93e8d6239d5806508a2ee4b370e67c6069943ca149f59f533923737a99b7/dm-env-1.6.tar.gz", hash = "sha256:a436eb1c654c39e0c986a516cee218bea7140b510fceff63f97eb4fcff3d93de", size = 20187, upload-time = "2022-12-21T00:25:29.306Z" } @@ -1018,22 +1156,11 @@ wheels = [ name = "dm-tree" version = "0.1.9" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.14' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'linux'", - "(python_full_version >= '3.14' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.14' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version >= '3.14' and platform_machine == 'armv7l' and sys_platform == 'linux')", - "(python_full_version >= '3.14' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", - "python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform == 'emscripten'", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'emscripten'", - "(python_full_version >= '3.14' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform == 'win32')", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'win32'", -] dependencies = [ - { name = "absl-py", marker = "python_full_version >= '3.14'" }, - { name = "attrs", marker = "python_full_version >= '3.14'" }, - { name = "numpy", marker = "python_full_version >= '3.14'" }, - { name = "wrapt", marker = "python_full_version >= '3.14'" }, + { name = "absl-py" }, + { name = "attrs" }, + { name = "numpy" }, + { name = "wrapt" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a6/83/ce29720ccf934c6cfa9b9c95ebbe96558386e66886626066632b5e44afed/dm_tree-0.1.9.tar.gz", hash = "sha256:a4c7db3d3935a5a2d5e4b383fc26c6b0cd6f78c6d4605d3e7b518800ecd5342b", size = 35623, upload-time = "2025-01-30T20:45:37.13Z" } wheels = [ @@ -1050,58 +1177,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c5/37/15603079854394f16e3833a7b50696c1f3cbf30a2243a119f64f18a16f36/dm_tree-0.1.9-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1f5d1e96b3a7de22b25b13a5eb30f41f8cf9c02dd4479a24920de99e780903c", size = 153052, upload-time = "2025-01-30T20:45:35.907Z" }, ] -[[package]] -name = "dm-tree" -version = "0.1.10" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version == '3.13.*' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", - "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'linux'", - "python_full_version < '3.13' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", - "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'linux'", - "(python_full_version == '3.13.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.13.*' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version == '3.13.*' and platform_machine == 'armv7l' and sys_platform == 'linux')", - "(python_full_version < '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version < '3.13' and platform_machine == 'armv7l' and sys_platform == 'linux')", - "(python_full_version == '3.13.*' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", - "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", - "(python_full_version < '3.13' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version < '3.13' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", - "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", - "python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform == 'emscripten'", - "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'emscripten'", - "python_full_version < '3.13' and platform_machine != 's390x' and sys_platform == 'emscripten'", - "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'emscripten'", - "(python_full_version == '3.13.*' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform == 'win32')", - "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'win32'", - "(python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version < '3.13' and platform_machine != 's390x' and sys_platform == 'win32')", - "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'win32'", -] -dependencies = [ - { name = "absl-py", marker = "python_full_version < '3.14'" }, - { name = "attrs", marker = "python_full_version < '3.14'" }, - { name = "numpy", marker = "python_full_version < '3.14'" }, - { name = "wrapt", marker = "python_full_version < '3.14'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/5a/66/a3ec619d22b6baffa5ab853e8dc6ec9d0c837127948af59bb15b988d7312/dm_tree-0.1.10.tar.gz", hash = "sha256:22f37b599e01cc3402a17f79c257a802aebd8d326de05b54657650845956208a", size = 35748, upload-time = "2026-03-31T17:35:39.03Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/34/a1/17e0d68eec978c483db4712b14d083ee01484381b29ea85edb2b20210bd0/dm_tree-0.1.10-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:94af18e4fd22ce69eccae89eeed8ed498b6b4cc4957f4ed10b4160e59f620e1d", size = 315976, upload-time = "2026-03-31T17:35:15.21Z" }, - { url = "https://files.pythonhosted.org/packages/cc/6f/ed603715fbc29c887a8985252e2cfe0d449497aea96bac51010159771617/dm_tree-0.1.10-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b442a0c1e9d0960e0314a2e4af81fd328a87921b6d6db6dc41bfa420536884d6", size = 184053, upload-time = "2026-03-31T17:35:16.512Z" }, - { url = "https://files.pythonhosted.org/packages/83/eb/1d55c679cee9a54e552480d308535753c72e2250cf720d7aa777bff2a4fe/dm_tree-0.1.10-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:012c2b376e88d3685c73a4b5c23be41fe933e14e380dcd90172971690b0e02d2", size = 186506, upload-time = "2026-03-31T17:35:17.593Z" }, - { url = "https://files.pythonhosted.org/packages/89/2d/adef6924f8dc7f1665eea4ce066387820c14a629d0e1005568892d56ea6a/dm_tree-0.1.10-cp312-cp312-win_amd64.whl", hash = "sha256:da8d5b8995bea1b6bb93f457e0dad5d16e6e2344a6488ced55320e7f3fd50f56", size = 112708, upload-time = "2026-03-31T17:35:18.699Z" }, - { url = "https://files.pythonhosted.org/packages/d6/29/f39e8412c16740f4c914c6674a04a66ace344ce5cb99b537c2270ef4f204/dm_tree-0.1.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:4a782f0382be16d66c9ed003e6992e56674504a1d9636f44d2807123f5df6343", size = 316108, upload-time = "2026-03-31T17:35:20.139Z" }, - { url = "https://files.pythonhosted.org/packages/02/83/1b94d45351bd75a83976a88c9fcf109da6ce336f38a3b443703bb6b18e5d/dm_tree-0.1.10-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0e8f8f1354f178112732b30d2293bc53d212ea64a9556db80a926af3d4647a6b", size = 183834, upload-time = "2026-03-31T17:35:21.463Z" }, - { url = "https://files.pythonhosted.org/packages/2f/23/bd3e75cbff06a464339d32667d740acf49812b027142a013b54d2c4d830a/dm_tree-0.1.10-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6d7134c0805294c640b94d85cc725084f0c5087bcda5a7fb38eeb7f479ecc37c", size = 186187, upload-time = "2026-03-31T17:35:23.495Z" }, - { url = "https://files.pythonhosted.org/packages/aa/75/4b460253b9af862388940404b5df6a22b399800c850aab4724c95f8635f9/dm_tree-0.1.10-cp313-cp313-win_amd64.whl", hash = "sha256:b42e04482880b017d931511d7b5997be372fff26a1ee9b9be55eef03ef1c2918", size = 112768, upload-time = "2026-03-31T17:35:24.622Z" }, - { url = "https://files.pythonhosted.org/packages/cb/ca/3b40a8a50f9c3492b795b157d769180edb5f2605e3c61ae826208f917baa/dm_tree-0.1.10-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:bde02efacca66514524922538b8a0c5dc15d482565d1c796edc34a726b376830", size = 324138, upload-time = "2026-03-31T17:35:25.627Z" }, - { url = "https://files.pythonhosted.org/packages/83/e4/33c9218aa607f610e2b0334fc824c2abd5a6bc232bf0726cf275f88e639d/dm_tree-0.1.10-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:033f9a063e1e19b6c65fb5c76079bd923044f5a6095357ad2637845513d47938", size = 185110, upload-time = "2026-03-31T17:35:26.784Z" }, - { url = "https://files.pythonhosted.org/packages/6c/da/f8811666d61b6829ba1c2716c4119039428dd86078eddd120354aaf26a3b/dm_tree-0.1.10-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6d4237da7b072fff1e93db109ab545f00d2b978ead35e85e7a84908e15197826", size = 187013, upload-time = "2026-03-31T17:35:27.969Z" }, - { url = "https://files.pythonhosted.org/packages/94/8d/135ddeea875fd1a2768e7aee6c224f92c9b7643ead1ec8b68bdbee52c60a/dm_tree-0.1.10-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:f395390d6acfb5d39c564c8bbcaf35352a81eb2f0d34d449739039b2ef786e14", size = 316599, upload-time = "2026-03-31T17:35:29.339Z" }, - { url = "https://files.pythonhosted.org/packages/cb/50/1eda610e9ca8ac59950ae028080e7c5320d7abc5567d6723d0cb3623e838/dm_tree-0.1.10-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c0f54547fbd4b82e88c71694b3836c90059b97102d3e36209f5d2fa66950964", size = 184263, upload-time = "2026-03-31T17:35:30.534Z" }, - { url = "https://files.pythonhosted.org/packages/c7/59/07461ceb563702ba3943725bdf0e04be4de0ed7ef093837cdd2d67141d2a/dm_tree-0.1.10-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cf6706ac425272c9b7e05f05a23a1ff3e670fb59a787f6089a638eea2d06f1d0", size = 186328, upload-time = "2026-03-31T17:35:31.894Z" }, - { url = "https://files.pythonhosted.org/packages/88/af/d9c84787fefe9f7c35f474a945217c38396f2ca5ab06432fb566e32a7d1a/dm_tree-0.1.10-cp314-cp314-win_amd64.whl", hash = "sha256:a132047e846e769ddacefe77c42ae79bf3d0e9fce2a6adb638a0ea4cbadb8cdb", size = 114799, upload-time = "2026-03-31T17:35:33.361Z" }, - { url = "https://files.pythonhosted.org/packages/fd/2c/2aaa63a510db520cd9e0c51e053a608486169bb9710f51f4ecf5699cebb4/dm_tree-0.1.10-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:23682221f63ad011dbd762ce5314740d7900b0426a2681614ea2472369b0c49c", size = 324205, upload-time = "2026-03-31T17:35:34.679Z" }, - { url = "https://files.pythonhosted.org/packages/b0/89/a5a302bcf9c345e6bd0498627ee2aa12f0a1c3538d08a2f5884d3c6783ba/dm_tree-0.1.10-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8baeb3db1e92587d686022fb67a52f6c584a7d32bd98444ed3aafb399ad9ce67", size = 185113, upload-time = "2026-03-31T17:35:36.179Z" }, - { url = "https://files.pythonhosted.org/packages/cc/e8/2d4fbc54bb68905588945cfb47c05445c66cab2d822b05827f1c62e23a70/dm_tree-0.1.10-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2236c9a4cf64ed0b04004a94902f39341be652b70dce322b33f08ada9b146baa", size = 187009, upload-time = "2026-03-31T17:35:37.584Z" }, -] - [[package]] name = "docopt" version = "0.6.2" @@ -1244,16 +1319,16 @@ wheels = [ [[package]] name = "farama-notifications" -version = "0.0.4" +version = "0.0.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2e/2c/8384832b7a6b1fd6ba95bbdcae26e7137bb3eedc955c42fd5cdcc086cfbf/Farama-Notifications-0.0.4.tar.gz", hash = "sha256:13fceff2d14314cf80703c8266462ebf3733c7d165336eee998fc58e545efd18", size = 2131, upload-time = "2023-02-27T18:28:41.047Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/91/14397890dde30adc4bee6462158933806207bc5dd10d7b4d09d5c33845cf/farama_notifications-0.0.6.tar.gz", hash = "sha256:b19acac4bb41d76e59e03394b5dd165f4761c86fa327f56307a35cbee3b60158", size = 2517, upload-time = "2026-04-24T08:43:57.603Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/05/2c/ffc08c54c05cdce6fbed2aeebc46348dbe180c6d2c541c7af7ba0aa5f5f8/Farama_Notifications-0.0.4-py3-none-any.whl", hash = "sha256:14de931035a41961f7c056361dc7f980762a143d05791ef5794a751a2caf05ae", size = 2511, upload-time = "2023-02-27T18:28:39.447Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f0/21f81892e4ed10f4ec3ef2e7cf8635fb76e7c0907c55d0da66be50094760/farama_notifications-0.0.6-py3-none-any.whl", hash = "sha256:f84839188efa1ce5bb361c2a84881b2dc2c0d0d7fb661ff00421820170930935", size = 2897, upload-time = "2026-04-24T08:43:56.785Z" }, ] [[package]] name = "fastapi" -version = "0.135.3" +version = "0.136.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc" }, @@ -1262,9 +1337,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f7/e6/7adb4c5fa231e82c35b8f5741a9f2d055f520c29af5546fd70d3e8e1cd2e/fastapi-0.135.3.tar.gz", hash = "sha256:bd6d7caf1a2bdd8d676843cdcd2287729572a1ef524fc4d65c17ae002a1be654", size = 396524, upload-time = "2026-04-01T16:23:58.188Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5d/45/c130091c2dfa061bbfe3150f2a5091ef1adf149f2a8d2ae769ecaf6e99a2/fastapi-0.136.1.tar.gz", hash = "sha256:7af665ad7acfa0a3baf8983d393b6b471b9da10ede59c60045f49fbc89a0fa7f", size = 397448, upload-time = "2026-04-23T16:49:44.046Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/84/a4/5caa2de7f917a04ada20018eccf60d6cc6145b0199d55ca3711b0fc08312/fastapi-0.135.3-py3-none-any.whl", hash = "sha256:9b0f590c813acd13d0ab43dd8494138eb58e484bfac405db1f3187cfc5810d98", size = 117734, upload-time = "2026-04-01T16:23:59.328Z" }, + { url = "https://files.pythonhosted.org/packages/5a/ff/2e4eca3ade2c22fe1dea7043b8ee9dabe47753349eb1b56a202de8af6349/fastapi-0.136.1-py3-none-any.whl", hash = "sha256:a6e9d7eeada96c93a4d69cb03836b44fa34e2854accb7244a1ece36cd4781c3f", size = 117683, upload-time = "2026-04-23T16:49:42.437Z" }, ] [[package]] @@ -1287,11 +1362,11 @@ sdist = { url = "https://files.pythonhosted.org/packages/5f/8e/c53d6f9a8bf3a86a6 [[package]] name = "filelock" -version = "3.25.2" +version = "3.29.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/94/b8/00651a0f559862f3bb7d6f7477b192afe3f583cc5e26403b44e59a55ab34/filelock-3.25.2.tar.gz", hash = "sha256:b64ece2b38f4ca29dd3e810287aa8c48182bbecd1ae6e9ae126c9b35f1382694", size = 40480, upload-time = "2026-03-11T20:45:38.487Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/fe/997687a931ab51049acce6fa1f23e8f01216374ea81374ddee763c493db5/filelock-3.29.0.tar.gz", hash = "sha256:69974355e960702e789734cb4871f884ea6fe50bd8404051a3530bc07809cf90", size = 57571, upload-time = "2026-04-19T15:39:10.068Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a4/a5/842ae8f0c08b61d6484b52f99a03510a3a72d23141942d216ebe81fefbce/filelock-3.25.2-py3-none-any.whl", hash = "sha256:ca8afb0da15f229774c9ad1b455ed96e85a81373065fb10446672f64444ddf70", size = 26759, upload-time = "2026-03-11T20:45:37.437Z" }, + { url = "https://files.pythonhosted.org/packages/81/47/dd9a212ef6e343a6857485ffe25bba537304f1913bdbed446a23f7f592e1/filelock-3.29.0-py3-none-any.whl", hash = "sha256:96f5f6344709aa1572bbf631c640e4ebeeb519e08da902c39a001882f30ac258", size = 39812, upload-time = "2026-04-19T15:39:08.752Z" }, ] [[package]] @@ -1353,6 +1428,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fd/ba/56147c165442cc5ba7e82ecf301c9a68353cede498185869e6e02b4c264f/fonttools-4.62.1-py3-none-any.whl", hash = "sha256:7487782e2113861f4ddcc07c3436450659e3caa5e470b27dc2177cade2d8e7fd", size = 1152647, upload-time = "2026-03-13T13:54:22.735Z" }, ] +[[package]] +name = "fqdn" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/3e/a80a8c077fd798951169626cde3e239adeba7dab75deb3555716415bd9b0/fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f", size = 6015, upload-time = "2021-03-11T07:16:29.08Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/58/8acf1b3e91c58313ce5cb67df61001fc9dcd21be4fadb76c1a2d540e09ed/fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014", size = 9121, upload-time = "2021-03-11T07:16:28.351Z" }, +] + [[package]] name = "frozenlist" version = "1.8.0" @@ -1479,14 +1563,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.46" +version = "3.1.47" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/df/b5/59d16470a1f0dfe8c793f9ef56fd3826093fc52b3bd96d6b9d6c26c7e27b/gitpython-3.1.46.tar.gz", hash = "sha256:400124c7d0ef4ea03f7310ac2fbf7151e09ff97f2a3288d64a440c584a29c37f", size = 215371, upload-time = "2026-01-01T15:37:32.073Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c1/bd/50db468e9b1310529a19fce651b3b0e753b5c07954d486cba31bbee9a5d5/gitpython-3.1.47.tar.gz", hash = "sha256:dba27f922bd2b42cb54c87a8ab3cb6beb6bf07f3d564e21ac848913a05a8a3cd", size = 216978, upload-time = "2026-04-22T02:44:44.059Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/09/e21df6aef1e1ffc0c816f0522ddc3f6dcded766c3261813131c78a704470/gitpython-3.1.46-py3-none-any.whl", hash = "sha256:79812ed143d9d25b6d176a10bb511de0f9c67b1fa641d82097b0ab90398a2058", size = 208620, upload-time = "2026-01-01T15:37:30.574Z" }, + { url = "https://files.pythonhosted.org/packages/f2/c5/a1bc0996af85757903cf2bf444a7824e68e0035ce63fb41d6f76f9def68b/gitpython-3.1.47-py3-none-any.whl", hash = "sha256:489f590edfd6d20571b2c0e72c6a6ac6915ee8b8cd04572330e3842207a78905", size = 209547, upload-time = "2026-04-22T02:44:41.271Z" }, ] [[package]] @@ -1633,7 +1717,7 @@ wheels = [ [[package]] name = "gymnasium" -version = "1.2.3" +version = "1.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cloudpickle" }, @@ -1641,9 +1725,9 @@ dependencies = [ { name = "numpy" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/76/59/653a9417d98ed3e29ef9734ba52c3495f6c6823b8d5c0c75369f25111708/gymnasium-1.2.3.tar.gz", hash = "sha256:2b2cb5b5fbbbdf3afb9f38ca952cc48aa6aa3e26561400d940747fda3ad42509", size = 829230, upload-time = "2025-12-18T16:51:10.234Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4d/ff/14b6880d703dfaca204490979d3254ccd280c99550798993319902873658/gymnasium-1.3.0.tar.gz", hash = "sha256:6939e86e835d6b71b6ba6bfd360487420876deafc79bfb7bacba83a7c446bcf3", size = 830646, upload-time = "2026-04-22T13:47:14.155Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/56/d3/ea5f088e3638dbab12e5c20d6559d5b3bdaeaa1f2af74e526e6815836285/gymnasium-1.2.3-py3-none-any.whl", hash = "sha256:e6314bba8f549c7fdcc8677f7cd786b64908af6e79b57ddaa5ce1825bffb5373", size = 952113, upload-time = "2025-12-18T16:51:08.445Z" }, + { url = "https://files.pythonhosted.org/packages/e9/73/fda6a25f3beeb5e49d74330b44092b9e5a547395ccd478d1103ddcbff1fc/gymnasium-1.3.0-py3-none-any.whl", hash = "sha256:6b8c159a8540dcbcb221722d7efda24d78ebbcbc3bd2ea1c2611aa2a34471fc2", size = 953904, upload-time = "2026-04-22T13:47:12.13Z" }, ] [[package]] @@ -1846,7 +1930,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "1.10.1" +version = "1.12.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -1859,9 +1943,9 @@ dependencies = [ { name = "typer" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e4/28/baf5d745559503ce8d28cf5bc9551f5ac59158eafd7b6a6afff0bcdb0f50/huggingface_hub-1.10.1.tar.gz", hash = "sha256:696c53cf9c2ac9befbfb5dd41d05392a031c69fc6930d1ed9671debd405b6fff", size = 758094, upload-time = "2026-04-09T15:01:18.928Z" } +sdist = { url = "https://files.pythonhosted.org/packages/56/52/1b54cb569509c725a32c1315261ac9fd0e6b91bbbf74d86fca10d3376164/huggingface_hub-1.12.0.tar.gz", hash = "sha256:7c3fe85e24b652334e5d456d7a812cd9a071e75630fac4365d9165ab5e4a34b6", size = 763091, upload-time = "2026-04-24T13:32:08.674Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/83/8c/c7a33f3efaa8d6a5bc40e012e5ecc2d72c2e6124550ca9085fe0ceed9993/huggingface_hub-1.10.1-py3-none-any.whl", hash = "sha256:6b981107a62fbe68c74374418983399c632e35786dcd14642a9f2972633c8b5a", size = 642630, upload-time = "2026-04-09T15:01:17.35Z" }, + { url = "https://files.pythonhosted.org/packages/7e/2b/ef03ddb96bd1123503c2bd6932001020292deea649e9bf4caa2cb65a85bf/huggingface_hub-1.12.0-py3-none-any.whl", hash = "sha256:d74939969585ee35748bd66de09baf84099d461bda7287cd9043bfb99b0e424d", size = 646806, upload-time = "2026-04-24T13:32:06.717Z" }, ] [[package]] @@ -1880,20 +1964,20 @@ wheels = [ [[package]] name = "identify" -version = "2.6.18" +version = "2.6.19" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/46/c4/7fb4db12296cdb11893d61c92048fe617ee853f8523b9b296ac03b43757e/identify-2.6.18.tar.gz", hash = "sha256:873ac56a5e3fd63e7438a7ecbc4d91aca692eb3fefa4534db2b7913f3fc352fd", size = 99580, upload-time = "2026-03-15T18:39:50.319Z" } +sdist = { url = "https://files.pythonhosted.org/packages/52/63/51723b5f116cc04b061cb6f5a561790abf249d25931d515cd375e063e0f4/identify-2.6.19.tar.gz", hash = "sha256:6be5020c38fcb07da56c53733538a3081ea5aa70d36a156f83044bfbf9173842", size = 99567, upload-time = "2026-04-17T18:39:50.265Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/46/33/92ef41c6fad0233e41d3d84ba8e8ad18d1780f1e5d99b3c683e6d7f98b63/identify-2.6.18-py2.py3-none-any.whl", hash = "sha256:8db9d3c8ea9079db92cafb0ebf97abdc09d52e97f4dcf773a2e694048b7cd737", size = 99394, upload-time = "2026-03-15T18:39:48.915Z" }, + { url = "https://files.pythonhosted.org/packages/94/84/d9273cd09688070a6523c4aee4663a8538721b2b755c4962aafae0011e72/identify-2.6.19-py2.py3-none-any.whl", hash = "sha256:20e6a87f786f768c092a721ad107fc9df0eb89347be9396cadf3f4abbd1fb78a", size = 99397, upload-time = "2026-04-17T18:39:49.221Z" }, ] [[package]] name = "idna" -version = "3.11" +version = "3.13" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ce/cc/762dfb036166873f0059f3b7de4565e1b5bc3d6f28a414c13da27e442f99/idna-3.13.tar.gz", hash = "sha256:585ea8fe5d69b9181ec1afba340451fba6ba764af97026f92a91d4eef164a242", size = 194210, upload-time = "2026-04-22T16:42:42.314Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, + { url = "https://files.pythonhosted.org/packages/5d/13/ad7d7ca3808a898b4612b6fe93cde56b53f3034dcde235acb1f0e1df24c6/idna-3.13-py3-none-any.whl", hash = "sha256:892ea0cde124a99ce773decba204c5552b69c3c67ffd5f232eb7696135bc8bb3", size = 68629, upload-time = "2026-04-22T16:42:40.909Z" }, ] [[package]] @@ -1950,9 +2034,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] +[[package]] +name = "ipykernel" +version = "6.31.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "appnope", marker = "sys_platform == 'darwin'" }, + { name = "comm" }, + { name = "debugpy" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "matplotlib-inline" }, + { name = "nest-asyncio" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a5/1d/d5ba6edbfe6fae4c3105bca3a9c889563cc752c7f2de45e333164c7f4846/ipykernel-6.31.0.tar.gz", hash = "sha256:2372ce8bc1ff4f34e58cafed3a0feb2194b91fc7cad0fc72e79e47b45ee9e8f6", size = 167493, upload-time = "2025-10-20T11:42:39.948Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/d8/502954a4ec0efcf264f99b65b41c3c54e65a647d9f0d6f62cd02227d242c/ipykernel-6.31.0-py3-none-any.whl", hash = "sha256:abe5386f6ced727a70e0eb0cf1da801fa7c5fa6ff82147747d5a0406cd8c94af", size = 117003, upload-time = "2025-10-20T11:42:37.502Z" }, +] + [[package]] name = "ipython" -version = "9.12.0" +version = "9.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -1962,13 +2070,14 @@ dependencies = [ { name = "matplotlib-inline" }, { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, { name = "prompt-toolkit" }, + { name = "psutil" }, { name = "pygments" }, { name = "stack-data" }, { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3a/73/7114f80a8f9cabdb13c27732dce24af945b2923dcab80723602f7c8bc2d8/ipython-9.12.0.tar.gz", hash = "sha256:01daa83f504b693ba523b5a407246cabde4eb4513285a3c6acaff11a66735ee4", size = 4428879, upload-time = "2026-03-27T09:42:45.312Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/c4/87cda5842cf5c31837c06ddb588e11c3c35d8ece89b7a0108c06b8c9b00a/ipython-9.13.0.tar.gz", hash = "sha256:7e834b6afc99f020e3f05966ced34792f40267d64cb1ea9043886dab0dde5967", size = 4430549, upload-time = "2026-04-24T12:24:55.221Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/59/22/906c8108974c673ebef6356c506cebb6870d48cedea3c41e949e2dd556bb/ipython-9.12.0-py3-none-any.whl", hash = "sha256:0f2701e8ee86e117e37f50563205d36feaa259d2e08d4a6bc6b6d74b18ce128d", size = 625661, upload-time = "2026-03-27T09:42:42.831Z" }, + { url = "https://files.pythonhosted.org/packages/b9/86/3060e8029b7cc505cce9a0137431dda81d0a3fde93a8f0f50ee0bf37a795/ipython-9.13.0-py3-none-any.whl", hash = "sha256:57f9d4639e20818d328d287c7b549af3d05f12486ea8f2e7f73e52a36ec4d201", size = 627274, upload-time = "2026-04-24T12:24:53.038Z" }, ] [[package]] @@ -1983,6 +2092,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, ] +[[package]] +name = "ipywidgets" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "comm" }, + { name = "ipython" }, + { name = "jupyterlab-widgets" }, + { name = "traitlets" }, + { name = "widgetsnbextension" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/ae/c5ce1edc1afe042eadb445e95b0671b03cee61895264357956e61c0d2ac0/ipywidgets-8.1.8.tar.gz", hash = "sha256:61f969306b95f85fba6b6986b7fe45d73124d1d9e3023a8068710d47a22ea668", size = 116739, upload-time = "2025-11-01T21:18:12.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/6d/0d9848617b9f753b87f214f1c682592f7ca42de085f564352f10f0843026/ipywidgets-8.1.8-py3-none-any.whl", hash = "sha256:ecaca67aed704a338f88f67b1181b58f821ab5dc89c1f0f5ef99db43c1c2921e", size = 139808, upload-time = "2025-11-01T21:18:10.956Z" }, +] + [[package]] name = "ischedule" version = "1.2.7" @@ -1992,6 +2117,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/0c/937caa3558fa5787927585cce67af4446fec8996d4c28c88d7efdbad6b14/ischedule-1.2.7-py3-none-any.whl", hash = "sha256:2a327e57e3ea179d3d9675cef1e7a0719f2a936c61a9d1ae288c3a5f5156939d", size = 5273, upload-time = "2024-03-15T19:13:09.105Z" }, ] +[[package]] +name = "isoduration" +version = "20.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "arrow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/1a/3c8edc664e06e6bd06cce40c6b22da5f1429aa4224d0c590f3be21c91ead/isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9", size = 11649, upload-time = "2020-11-01T11:00:00.312Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/55/e5326141505c5d5e34c5e0935d2908a74e4561eca44108fbfb9c13d2911a/isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042", size = 11321, upload-time = "2020-11-01T10:59:58.02Z" }, +] + [[package]] name = "jedi" version = "0.19.2" @@ -2016,6 +2153,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, ] +[[package]] +name = "json5" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/4b/6f8906aaf67d501e259b0adab4d312945bb7211e8b8d4dcc77c92320edaa/json5-0.14.0.tar.gz", hash = "sha256:b3f492fad9f6cdbced8b7d40b28b9b1c9701c5f561bef0d33b81c2ff433fefcb", size = 52656, upload-time = "2026-03-27T22:50:48.108Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b8/42/cf027b4ac873b076189d935b135397675dac80cb29acb13e1ab86ad6c631/json5-0.14.0-py3-none-any.whl", hash = "sha256:56cf861bab076b1178eb8c92e1311d273a9b9acea2ccc82c276abf839ebaef3a", size = 36271, upload-time = "2026-03-27T22:50:47.073Z" }, +] + [[package]] name = "jsonlines" version = "4.0.0" @@ -2028,46 +2174,253 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f8/62/d9ba6323b9202dd2fe166beab8a86d29465c41a0288cbe229fac60c1ab8d/jsonlines-4.0.0-py3-none-any.whl", hash = "sha256:185b334ff2ca5a91362993f42e83588a360cf95ce4b71a73548502bda52a7c55", size = 8701, upload-time = "2023-09-01T12:34:42.563Z" }, ] +[[package]] +name = "jsonpointer" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/c7/af399a2e7a67fd18d63c40c5e62d3af4e67b836a2107468b6a5ea24c4304/jsonpointer-3.1.1.tar.gz", hash = "sha256:0b801c7db33a904024f6004d526dcc53bbb8a4a0f4e32bfd10beadf60adf1900", size = 9068, upload-time = "2026-03-23T22:32:32.458Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/6a/a83720e953b1682d2d109d3c2dbb0bc9bf28cc1cbc205be4ef4be5da709d/jsonpointer-3.1.1-py3-none-any.whl", hash = "sha256:8ff8b95779d071ba472cf5bc913028df06031797532f08a7d5b602d8b2a488ca", size = 7659, upload-time = "2026-03-23T22:32:31.568Z" }, +] + [[package]] name = "jsonschema" version = "4.26.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "attrs", marker = "sys_platform == 'linux'" }, - { name = "jsonschema-specifications", marker = "sys_platform == 'linux'" }, - { name = "referencing", marker = "sys_platform == 'linux'" }, - { name = "rpds-py", marker = "sys_platform == 'linux'" }, + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, ] sdist = { url = "https://files.pythonhosted.org/packages/b3/fc/e067678238fa451312d4c62bf6e6cf5ec56375422aee02f9cb5f909b3047/jsonschema-4.26.0.tar.gz", hash = "sha256:0c26707e2efad8aa1bfc5b7ce170f3fccc2e4918ff85989ba9ffa9facb2be326", size = 366583, upload-time = "2026-01-07T13:41:07.246Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/69/90/f63fb5873511e014207a475e2bb4e8b2e570d655b00ac19a9a0ca0a385ee/jsonschema-4.26.0-py3-none-any.whl", hash = "sha256:d489f15263b8d200f8387e64b4c3a75f06629559fb73deb8fdfb525f2dab50ce", size = 90630, upload-time = "2026-01-07T13:41:05.306Z" }, ] +[package.optional-dependencies] +format-nongpl = [ + { name = "fqdn" }, + { name = "idna" }, + { name = "isoduration" }, + { name = "jsonpointer" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "rfc3987-syntax" }, + { name = "uri-template" }, + { name = "webcolors" }, +] + [[package]] name = "jsonschema-specifications" version = "2025.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "referencing", marker = "sys_platform == 'linux'" }, + { name = "referencing" }, ] sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, ] +[[package]] +name = "jupyter" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipykernel" }, + { name = "ipywidgets" }, + { name = "jupyter-console" }, + { name = "jupyterlab" }, + { name = "nbconvert" }, + { name = "notebook" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/f3/af28ea964ab8bc1e472dba2e82627d36d470c51f5cd38c37502eeffaa25e/jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a", size = 5714959, upload-time = "2024-08-30T07:15:48.299Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/64/285f20a31679bf547b75602702f7800e74dbabae36ef324f716c02804753/jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83", size = 2657, upload-time = "2024-08-30T07:15:47.045Z" }, +] + +[[package]] +name = "jupyter-client" +version = "8.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-core" }, + { name = "python-dateutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/e4/ba649102a3bc3fbca54e7239fb924fd434c766f855693d86de0b1f2bec81/jupyter_client-8.8.0.tar.gz", hash = "sha256:d556811419a4f2d96c869af34e854e3f059b7cc2d6d01a9cd9c85c267691be3e", size = 348020, upload-time = "2026-01-08T13:55:47.938Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/0b/ceb7694d864abc0a047649aec263878acb9f792e1fec3e676f22dc9015e3/jupyter_client-8.8.0-py3-none-any.whl", hash = "sha256:f93a5b99c5e23a507b773d3a1136bd6e16c67883ccdbd9a829b0bbdb98cd7d7a", size = 107371, upload-time = "2026-01-08T13:55:45.562Z" }, +] + +[[package]] +name = "jupyter-console" +version = "6.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipykernel" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "pyzmq" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bd/2d/e2fd31e2fc41c14e2bcb6c976ab732597e907523f6b2420305f9fc7fdbdb/jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539", size = 34363, upload-time = "2023-03-06T14:13:31.02Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/77/71d78d58f15c22db16328a476426f7ac4a60d3a5a7ba3b9627ee2f7903d4/jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485", size = 24510, upload-time = "2023-03-06T14:13:28.229Z" }, +] + [[package]] name = "jupyter-core" version = "5.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "platformdirs", marker = "sys_platform == 'linux'" }, - { name = "traitlets", marker = "sys_platform == 'linux'" }, + { name = "platformdirs" }, + { name = "traitlets" }, ] sdist = { url = "https://files.pythonhosted.org/packages/02/49/9d1284d0dc65e2c757b74c6687b6d319b02f822ad039e5c512df9194d9dd/jupyter_core-5.9.1.tar.gz", hash = "sha256:4d09aaff303b9566c3ce657f580bd089ff5c91f5f89cf7d8846c3cdf465b5508", size = 89814, upload-time = "2025-10-16T19:19:18.444Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/e7/e7/80988e32bf6f73919a113473a604f5a8f09094de312b9d52b79c2df7612b/jupyter_core-5.9.1-py3-none-any.whl", hash = "sha256:ebf87fdc6073d142e114c72c9e29a9d7ca03fad818c5d300ce2adc1fb0743407", size = 29032, upload-time = "2025-10-16T19:19:16.783Z" }, ] +[[package]] +name = "jupyter-events" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonschema", extra = ["format-nongpl"] }, + { name = "packaging" }, + { name = "python-json-logger" }, + { name = "pyyaml" }, + { name = "referencing" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/f8/475c4241b2b75af0deaae453ed003c6c851766dbc44d332d8baf245dc931/jupyter_events-0.12.1.tar.gz", hash = "sha256:faff25f77218335752f35f23c5fe6e4a392a7bd99a5939ccb9b8fbf594636cf3", size = 62854, upload-time = "2026-04-20T23:17:50.66Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/6c/6fcde0c8f616ed360ffd3587f7db9e225a7e62b583a04494d2f069cf64ea/jupyter_events-0.12.1-py3-none-any.whl", hash = "sha256:c366585253f537a627da52fa7ca7410c5b5301fe893f511e7b077c2d93ec8bcf", size = 19512, upload-time = "2026-04-20T23:17:48.927Z" }, +] + +[[package]] +name = "jupyter-lsp" +version = "2.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/36/ff/1e4a61f5170a9a1d978f3ac3872449de6c01fc71eaf89657824c878b1549/jupyter_lsp-2.3.1.tar.gz", hash = "sha256:fdf8a4aa7d85813976d6e29e95e6a2c8f752701f926f2715305249a3829805a6", size = 55677, upload-time = "2026-04-02T08:10:06.749Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/e8/9d61dcbd1dce8ef418f06befd4ac084b4720429c26b0b1222bc218685eff/jupyter_lsp-2.3.1-py3-none-any.whl", hash = "sha256:71b954d834e85ff3096400554f2eefaf7fe37053036f9a782b0f7c5e42dadb81", size = 77513, upload-time = "2026-04-02T08:10:01.753Z" }, +] + +[[package]] +name = "jupyter-server" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "argon2-cffi" }, + { name = "jinja2" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "jupyter-events" }, + { name = "jupyter-server-terminals" }, + { name = "nbconvert" }, + { name = "nbformat" }, + { name = "packaging" }, + { name = "prometheus-client" }, + { name = "pywinpty", marker = "os_name == 'nt' and sys_platform != 'linux'" }, + { name = "pyzmq" }, + { name = "send2trash" }, + { name = "terminado" }, + { name = "tornado" }, + { name = "traitlets" }, + { name = "websocket-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/ac/e040ec363d7b6b1f11304cc9f209dac4517ece5d5e01821366b924a64a50/jupyter_server-2.17.0.tar.gz", hash = "sha256:c38ea898566964c888b4772ae1ed58eca84592e88251d2cfc4d171f81f7e99d5", size = 731949, upload-time = "2025-08-21T14:42:54.042Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/80/a24767e6ca280f5a49525d987bf3e4d7552bf67c8be07e8ccf20271f8568/jupyter_server-2.17.0-py3-none-any.whl", hash = "sha256:e8cb9c7db4251f51ed307e329b81b72ccf2056ff82d50524debde1ee1870e13f", size = 388221, upload-time = "2025-08-21T14:42:52.034Z" }, +] + +[[package]] +name = "jupyter-server-terminals" +version = "0.5.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywinpty", marker = "os_name == 'nt' and sys_platform != 'linux'" }, + { name = "terminado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/a7/bcd0a9b0cbba88986fe944aaaf91bfda603e5a50bda8ed15123f381a3b2f/jupyter_server_terminals-0.5.4.tar.gz", hash = "sha256:bbda128ed41d0be9020349f9f1f2a4ab9952a73ed5f5ac9f1419794761fb87f5", size = 31770, upload-time = "2026-01-14T16:53:20.213Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/2d/6674563f71c6320841fc300911a55143925112a72a883e2ca71fba4c618d/jupyter_server_terminals-0.5.4-py3-none-any.whl", hash = "sha256:55be353fc74a80bc7f3b20e6be50a55a61cd525626f578dcb66a5708e2007d14", size = 13704, upload-time = "2026-01-14T16:53:18.738Z" }, +] + +[[package]] +name = "jupyterlab" +version = "4.5.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "async-lru" }, + { name = "httpx" }, + { name = "ipykernel" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyter-lsp" }, + { name = "jupyter-server" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "packaging" }, + { name = "setuptools" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/d5/730628e03fff2e8a8e8ccdaedde1489ab1309f9a4fa2536248884e30b7c7/jupyterlab-4.5.6.tar.gz", hash = "sha256:642fe2cfe7f0f5922a8a558ba7a0d246c7bc133b708dfe43f7b3a826d163cf42", size = 23970670, upload-time = "2026-03-11T14:17:04.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/1b/dad6fdcc658ed7af26fdf3841e7394072c9549a8b896c381ab49dd11e2d9/jupyterlab-4.5.6-py3-none-any.whl", hash = "sha256:d6b3dac883aa4d9993348e0f8e95b24624f75099aed64eab6a4351a9cdd1e580", size = 12447124, upload-time = "2026-03-11T14:17:00.229Z" }, +] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/90/51/9187be60d989df97f5f0aba133fa54e7300f17616e065d1ada7d7646b6d6/jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d", size = 512900, upload-time = "2023-11-23T09:26:37.44Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/dd/ead9d8ea85bf202d90cc513b533f9c363121c7792674f78e0d8a854b63b4/jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780", size = 15884, upload-time = "2023-11-23T09:26:34.325Z" }, +] + +[[package]] +name = "jupyterlab-server" +version = "2.28.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "babel" }, + { name = "jinja2" }, + { name = "json5" }, + { name = "jsonschema" }, + { name = "jupyter-server" }, + { name = "packaging" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/2c/90153f189e421e93c4bb4f9e3f59802a1f01abd2ac5cf40b152d7f735232/jupyterlab_server-2.28.0.tar.gz", hash = "sha256:35baa81898b15f93573e2deca50d11ac0ae407ebb688299d3a5213265033712c", size = 76996, upload-time = "2025-10-22T13:59:18.37Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/07/a000fe835f76b7e1143242ab1122e6362ef1c03f23f83a045c38859c2ae0/jupyterlab_server-2.28.0-py3-none-any.whl", hash = "sha256:e4355b148fdcf34d312bbbc80f22467d6d20460e8b8736bf235577dd18506968", size = 59830, upload-time = "2025-10-22T13:59:16.767Z" }, +] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.16" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/2d/ef58fed122b268c69c0aa099da20bc67657cdfb2e222688d5731bd5b971d/jupyterlab_widgets-3.0.16.tar.gz", hash = "sha256:423da05071d55cf27a9e602216d35a3a65a3e41cdf9c5d3b643b814ce38c19e0", size = 897423, upload-time = "2025-11-01T21:11:29.724Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/b5/36c712098e6191d1b4e349304ef73a8d06aed77e56ceaac8c0a306c7bda1/jupyterlab_widgets-3.0.16-py3-none-any.whl", hash = "sha256:45fa36d9c6422cf2559198e4db481aa243c7a32d9926b500781c830c80f7ecf8", size = 914926, upload-time = "2025-11-01T21:11:28.008Z" }, +] + [[package]] name = "jupytext" version = "1.19.1" @@ -2188,6 +2541,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5b/0f/13f0d54305e66c14c90512f3682f713273ec9aa94d107be7947157b37a74/labmaze-1.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:5af997598cc46b1929d1c5a1febc32fd56c75874fe481a2a5982c65cee8450c9", size = 4811813, upload-time = "2023-10-04T17:20:30.837Z" }, ] +[[package]] +name = "lark" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/34/28fff3ab31ccff1fd4f6c7c7b0ceb2b6968d8ea4950663eadcb5720591a0/lark-1.3.1.tar.gz", hash = "sha256:b426a7a6d6d53189d318f2b6236ab5d6429eaf09259f1ca33eb716eed10d2905", size = 382732, upload-time = "2025-10-27T18:25:56.653Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/3d/14ce75ef66813643812f3093ab17e46d3a206942ce7376d31ec2d36229e7/lark-1.3.1-py3-none-any.whl", hash = "sha256:c629b661023a014c37da873b4ff58a817398d12635d3bbb2c5a03be7fe5d1e12", size = 113151, upload-time = "2025-10-27T18:25:54.882Z" }, +] + [[package]] name = "lazy-loader" version = "0.5" @@ -2244,7 +2606,9 @@ all = [ { name = "hebi-py" }, { name = "hf-libero", marker = "sys_platform == 'linux'" }, { name = "hidapi" }, + { name = "ipykernel" }, { name = "jsonlines" }, + { name = "jupyter" }, { name = "matplotlib" }, { name = "metaworld" }, { name = "mock-serial", marker = "sys_platform != 'win32'" }, @@ -2334,10 +2698,15 @@ dataset-viz = [ { name = "rerun-sdk" }, { name = "torchcodec", marker = "(platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and sys_platform == 'linux') or (platform_machine != 'x86_64' and sys_platform == 'darwin') or (sys_platform != 'darwin' and sys_platform != 'linux' and sys_platform != 'win32')" }, ] +deepdiff-dep = [ + { name = "deepdiff" }, +] dev = [ { name = "debugpy" }, { name = "grpcio" }, { name = "grpcio-tools" }, + { name = "ipykernel" }, + { name = "jupyter" }, { name = "mypy" }, { name = "pre-commit" }, { name = "protobuf" }, @@ -2350,13 +2719,17 @@ diffusion = [ { name = "diffusers" }, ] dynamixel = [ + { name = "deepdiff" }, { name = "dynamixel-sdk" }, + { name = "pyserial" }, ] evaluation = [ { name = "av" }, ] feetech = [ + { name = "deepdiff" }, { name = "feetech-servo-sdk" }, + { name = "pyserial" }, ] gamepad = [ { name = "hidapi" }, @@ -2365,8 +2738,7 @@ gamepad = [ groot = [ { name = "decord", marker = "platform_machine == 'AMD64' or platform_machine == 'x86_64'" }, { name = "diffusers" }, - { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "dm-tree", version = "0.1.10", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "dm-tree" }, { name = "flash-attn", marker = "sys_platform != 'darwin'" }, { name = "ninja" }, { name = "peft" }, @@ -2390,8 +2762,10 @@ hilserl = [ { name = "transformers" }, ] hopejr = [ + { name = "deepdiff" }, { name = "feetech-servo-sdk" }, { name = "pygame" }, + { name = "pyserial" }, ] intelrealsense = [ { name = "pyrealsense2", marker = "sys_platform != 'darwin'" }, @@ -2401,7 +2775,9 @@ kinematics = [ { name = "placo" }, ] lekiwi = [ + { name = "deepdiff" }, { name = "feetech-servo-sdk" }, + { name = "pyserial" }, { name = "pyzmq" }, ] libero = [ @@ -2433,6 +2809,10 @@ multi-task-dit = [ { name = "diffusers" }, { name = "transformers" }, ] +notebook = [ + { name = "ipykernel" }, + { name = "jupyter" }, +] openarms = [ { name = "python-can" }, ] @@ -2469,6 +2849,15 @@ pusht = [ pygame-dep = [ { name = "pygame" }, ] +pynput-dep = [ + { name = "pynput" }, +] +pyserial-dep = [ + { name = "pyserial" }, +] +pyzmq-dep = [ + { name = "pyzmq" }, +] qwen-vl-utils-dep = [ { name = "qwen-vl-utils" }, ] @@ -2520,6 +2909,7 @@ unitree-g1 = [ { name = "onnx" }, { name = "onnxruntime" }, { name = "pygame" }, + { name = "pyserial" }, { name = "pyzmq" }, ] video-benchmark = [ @@ -2550,7 +2940,7 @@ requires-dist = [ { name = "datasets", marker = "extra == 'dataset'", specifier = ">=4.0.0,<5.0.0" }, { name = "debugpy", marker = "extra == 'dev'", specifier = ">=1.8.1,<1.9.0" }, { name = "decord", marker = "(platform_machine == 'AMD64' and extra == 'groot') or (platform_machine == 'x86_64' and extra == 'groot')", specifier = ">=0.6.0,<1.0.0" }, - { name = "deepdiff", marker = "extra == 'hardware'", specifier = ">=7.0.1,<9.0.0" }, + { name = "deepdiff", marker = "extra == 'deepdiff-dep'", specifier = ">=7.0.1,<9.0.0" }, { name = "diffusers", marker = "extra == 'diffusers-dep'", specifier = ">=0.27.2,<0.36.0" }, { name = "dm-tree", marker = "extra == 'groot'", specifier = ">=0.1.8,<1.0.0" }, { name = "draccus", specifier = "==0.10.0" }, @@ -2570,7 +2960,9 @@ requires-dist = [ { name = "hf-libero", marker = "sys_platform == 'linux' and extra == 'libero'", specifier = ">=0.1.3,<0.2.0" }, { name = "hidapi", marker = "extra == 'gamepad'", specifier = ">=0.14.0,<0.15.0" }, { name = "huggingface-hub", specifier = ">=1.0.0,<2.0.0" }, + { name = "ipykernel", marker = "extra == 'notebook'", specifier = ">=6.0.0,<7.0.0" }, { name = "jsonlines", marker = "extra == 'dataset'", specifier = ">=4.0.0,<5.0.0" }, + { name = "jupyter", marker = "extra == 'notebook'", specifier = ">=1.0.0,<2.0.0" }, { name = "lerobot", extras = ["aloha"], marker = "extra == 'all'" }, { name = "lerobot", extras = ["async"], marker = "extra == 'all'" }, { name = "lerobot", extras = ["av-dep"], marker = "extra == 'dataset'" }, @@ -2587,6 +2979,9 @@ requires-dist = [ { name = "lerobot", extras = ["dataset"], marker = "extra == 'metaworld'" }, { name = "lerobot", extras = ["dataset"], marker = "extra == 'pusht'" }, { name = "lerobot", extras = ["dataset"], marker = "extra == 'training'" }, + { name = "lerobot", extras = ["deepdiff-dep"], marker = "extra == 'dynamixel'" }, + { name = "lerobot", extras = ["deepdiff-dep"], marker = "extra == 'feetech'" }, + { name = "lerobot", extras = ["deepdiff-dep"], marker = "extra == 'hardware'" }, { name = "lerobot", extras = ["dev"], marker = "extra == 'all'" }, { name = "lerobot", extras = ["diffusers-dep"], marker = "extra == 'diffusion'" }, { name = "lerobot", extras = ["diffusers-dep"], marker = "extra == 'groot'" }, @@ -2613,6 +3008,7 @@ requires-dist = [ { name = "lerobot", extras = ["matplotlib-dep"], marker = "extra == 'unitree-g1'" }, { name = "lerobot", extras = ["metaworld"], marker = "extra == 'all'" }, { name = "lerobot", extras = ["multi-task-dit"], marker = "extra == 'all'" }, + { name = "lerobot", extras = ["notebook"], marker = "extra == 'dev'" }, { name = "lerobot", extras = ["openarms"], marker = "extra == 'all'" }, { name = "lerobot", extras = ["peft"], marker = "extra == 'all'" }, { name = "lerobot", extras = ["peft-dep"], marker = "extra == 'groot'" }, @@ -2626,6 +3022,13 @@ requires-dist = [ { name = "lerobot", extras = ["pygame-dep"], marker = "extra == 'gamepad'" }, { name = "lerobot", extras = ["pygame-dep"], marker = "extra == 'hopejr'" }, { name = "lerobot", extras = ["pygame-dep"], marker = "extra == 'unitree-g1'" }, + { name = "lerobot", extras = ["pynput-dep"], marker = "extra == 'hardware'" }, + { name = "lerobot", extras = ["pyserial-dep"], marker = "extra == 'dynamixel'" }, + { name = "lerobot", extras = ["pyserial-dep"], marker = "extra == 'feetech'" }, + { name = "lerobot", extras = ["pyserial-dep"], marker = "extra == 'hardware'" }, + { name = "lerobot", extras = ["pyserial-dep"], marker = "extra == 'unitree-g1'" }, + { name = "lerobot", extras = ["pyzmq-dep"], marker = "extra == 'lekiwi'" }, + { name = "lerobot", extras = ["pyzmq-dep"], marker = "extra == 'unitree-g1'" }, { name = "lerobot", extras = ["qwen-vl-utils-dep"], marker = "extra == 'sarm'" }, { name = "lerobot", extras = ["qwen-vl-utils-dep"], marker = "extra == 'wallx'" }, { name = "lerobot", extras = ["reachy2"], marker = "extra == 'all'" }, @@ -2679,16 +3082,15 @@ requires-dist = [ { name = "pydantic", marker = "extra == 'sarm'", specifier = ">=2.0.0,<3.0.0" }, { name = "pygame", marker = "extra == 'pygame-dep'", specifier = ">=2.5.1,<2.7.0" }, { name = "pymunk", marker = "extra == 'pusht'", specifier = ">=6.6.0,<7.0.0" }, - { name = "pynput", marker = "extra == 'hardware'", specifier = ">=1.7.8,<1.9.0" }, + { name = "pynput", marker = "extra == 'pynput-dep'", specifier = ">=1.7.8,<1.9.0" }, { name = "pyrealsense2", marker = "sys_platform != 'darwin' and extra == 'intelrealsense'", specifier = ">=2.55.1.6486,<2.57.0" }, { name = "pyrealsense2-macosx", marker = "sys_platform == 'darwin' and extra == 'intelrealsense'", specifier = ">=2.54,<2.57.0" }, - { name = "pyserial", marker = "extra == 'hardware'", specifier = ">=3.5,<4.0" }, + { name = "pyserial", marker = "extra == 'pyserial-dep'", specifier = ">=3.5,<4.0" }, { name = "pytest", marker = "extra == 'test'", specifier = ">=8.1.0,<9.0.0" }, { name = "pytest-cov", marker = "extra == 'test'", specifier = ">=5.0.0,<8.0.0" }, { name = "pytest-timeout", marker = "extra == 'test'", specifier = ">=2.4.0,<3.0.0" }, { name = "python-can", marker = "extra == 'can-dep'", specifier = ">=4.2.0,<5.0.0" }, - { name = "pyzmq", marker = "extra == 'lekiwi'", specifier = ">=26.2.1,<28.0.0" }, - { name = "pyzmq", marker = "extra == 'unitree-g1'", specifier = ">=26.2.1,<28.0.0" }, + { name = "pyzmq", marker = "extra == 'pyzmq-dep'", specifier = ">=26.2.1,<28.0.0" }, { name = "qwen-vl-utils", marker = "extra == 'qwen-vl-utils-dep'", specifier = ">=0.0.11,<0.1.0" }, { name = "reachy2-sdk", marker = "extra == 'reachy2'", specifier = ">=1.0.15,<1.1.0" }, { name = "requests", specifier = ">=2.32.0,<3.0.0" }, @@ -2710,7 +3112,7 @@ requires-dist = [ { name = "transformers", marker = "extra == 'transformers-dep'", specifier = "==5.3.0" }, { name = "wandb", marker = "extra == 'training'", specifier = ">=0.24.0,<0.25.0" }, ] -provides-extras = ["dataset", "training", "hardware", "viz", "core-scripts", "evaluation", "dataset-viz", "av-dep", "pygame-dep", "placo-dep", "transformers-dep", "grpcio-dep", "can-dep", "peft-dep", "scipy-dep", "diffusers-dep", "qwen-vl-utils-dep", "matplotlib-dep", "feetech", "dynamixel", "damiao", "robstride", "openarms", "gamepad", "hopejr", "lekiwi", "unitree-g1", "reachy2", "kinematics", "intelrealsense", "phone", "diffusion", "wallx", "pi", "smolvla", "multi-task-dit", "groot", "sarm", "xvla", "hilserl", "async", "peft", "dev", "test", "video-benchmark", "aloha", "pusht", "libero", "metaworld", "all"] +provides-extras = ["dataset", "training", "hardware", "viz", "core-scripts", "evaluation", "dataset-viz", "av-dep", "pygame-dep", "placo-dep", "transformers-dep", "grpcio-dep", "can-dep", "peft-dep", "scipy-dep", "diffusers-dep", "qwen-vl-utils-dep", "matplotlib-dep", "pyserial-dep", "deepdiff-dep", "pynput-dep", "pyzmq-dep", "feetech", "dynamixel", "damiao", "robstride", "openarms", "gamepad", "hopejr", "lekiwi", "unitree-g1", "reachy2", "kinematics", "intelrealsense", "phone", "diffusion", "wallx", "pi", "smolvla", "multi-task-dit", "groot", "sarm", "xvla", "hilserl", "async", "peft", "dev", "notebook", "test", "video-benchmark", "aloha", "pusht", "libero", "metaworld", "all"] [[package]] name = "librt" @@ -2790,82 +3192,82 @@ wheels = [ [[package]] name = "lxml" -version = "6.0.3" +version = "6.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/42/149c7747977db9d68faee960c1a3391eb25e94d4bb677f8e2df8328e4098/lxml-6.0.3.tar.gz", hash = "sha256:a1664c5139755df44cab3834f4400b331b02205d62d3fdcb1554f63439bf3372", size = 4237567, upload-time = "2026-04-09T14:39:09.664Z" } +sdist = { url = "https://files.pythonhosted.org/packages/28/30/9abc9e34c657c33834eaf6cd02124c61bdf5944d802aa48e69be8da3585d/lxml-6.1.0.tar.gz", hash = "sha256:bfd57d8008c4965709a919c3e9a98f76c2c7cb319086b3d26858250620023b13", size = 4197006, upload-time = "2026-04-18T04:32:51.613Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ac/4c/552571c619edd607432cbbf25e312a5d02859f2a7de421494a644b48451e/lxml-6.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ad6952810349cbfb843fe15e8afc580b2712359ae42b1d2b05d097bd48c4aea4", size = 8570109, upload-time = "2026-04-09T14:34:50.969Z" }, - { url = "https://files.pythonhosted.org/packages/ac/49/cf08843a6a923cd1eef40797a31e61424ac257c43634b5c9cff3bee93696/lxml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b81ec1ecac3be8c1ff1e00ca1c1baf8122e87db9000cd2549963847bd5e3b41", size = 4623404, upload-time = "2026-04-09T14:34:53.79Z" }, - { url = "https://files.pythonhosted.org/packages/b6/59/ffde0037a781b10c854abdf9e34fbf60d8f375ce8026551982b9f26695cc/lxml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:448e69211e59c39f398990753d15ba49f7218ec128f64ac8012ef16762e509a3", size = 4929662, upload-time = "2026-04-09T14:34:55.763Z" }, - { url = "https://files.pythonhosted.org/packages/84/29/c468055e45954a93e1bc043a964d327d6784552d6551dc2364a1f83c53a1/lxml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6289cb9145fbbc5b0e159c9fcd7fc09446dadc6b60b72c4d1012e80c7c727970", size = 5092106, upload-time = "2026-04-09T14:34:58.522Z" }, - { url = "https://files.pythonhosted.org/packages/59/a3/8400c79a6defe609e24ce7b580f48d53f08acbf4c998eede0083a89f16f0/lxml-6.0.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b68c29aac4788438b07d768057836de47589c7deaa3ad8dc4af488dfc27be388", size = 5004214, upload-time = "2026-04-09T14:35:00.531Z" }, - { url = "https://files.pythonhosted.org/packages/57/b5/797246619cd0831c8d239f91fd4683683abbe7144854c6f33c68a6ea9f42/lxml-6.0.3-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:50293e024afe5e2c25da2be68c8ceca8618912a0701a73f75b488317c8081aa6", size = 5630889, upload-time = "2026-04-09T14:35:02.89Z" }, - { url = "https://files.pythonhosted.org/packages/a0/fa/b86302385dc896d02ebb2803e4522a923acaa30e6cb35223492257ee24ab/lxml-6.0.3-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac65c08ba1bd90f662cb1d5c79f7ae4c53b1c100f0bb6ec5df1f40ac29028a7e", size = 5237728, upload-time = "2026-04-09T14:35:05.827Z" }, - { url = "https://files.pythonhosted.org/packages/9b/7d/812c054b7d15f4dfb3a6fc877c2936023fcd8ac8b53807f996c8c60c4f57/lxml-6.0.3-cp312-cp312-manylinux_2_28_i686.whl", hash = "sha256:16fbcf06ae534b2fa5bcdc19fcf6abd9df2e74fe8563147d1c5a687a130efed4", size = 5349527, upload-time = "2026-04-09T14:35:08.121Z" }, - { url = "https://files.pythonhosted.org/packages/b8/4a/33a572874924809928747cd156b172b04cd19c1ec1d10925fc77dfeb676d/lxml-6.0.3-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:3a0484bd1e84f82766befcbd71cccd7307dacfe08071e4dbc1d9a9b498d321e8", size = 4693177, upload-time = "2026-04-09T14:35:10.4Z" }, - { url = "https://files.pythonhosted.org/packages/36/d5/71842813ca0c43718f641e770195e278832f8c01870eaac857a3de34448a/lxml-6.0.3-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c137f8c8419c3de93e2998131d94628805f148e52b34da6d7533454e4d78bc2a", size = 5243928, upload-time = "2026-04-09T14:35:12.393Z" }, - { url = "https://files.pythonhosted.org/packages/da/a7/330845ae467c6086ef35977c335bb252fa11490082335f9ccfd0465bdfb7/lxml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:775266571f7027b1d77f5fce18a247b24f51a4404bdc1b90ec56be9b1e3801b9", size = 5046937, upload-time = "2026-04-09T14:35:15.209Z" }, - { url = "https://files.pythonhosted.org/packages/02/3d/b58b0aee0cf7e0b7eb5d24795a129c634c6d07f032d8b902bb0859319d13/lxml-6.0.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:aa18653b795d2c273b8676f7ad2ca916d846d15864e335f746658e4c28eb5168", size = 4776758, upload-time = "2026-04-09T14:35:17.758Z" }, - { url = "https://files.pythonhosted.org/packages/8c/4c/f421b50f08c1b724a24c4a778db8888d0a2d948b4dd08b80f4f05a0804ff/lxml-6.0.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:cbffd22fc8e4d80454efa968b0c93440a00b8b8a817ce0c29d2c6cb5ad324362", size = 5644912, upload-time = "2026-04-09T14:35:20.438Z" }, - { url = "https://files.pythonhosted.org/packages/a7/99/eabfedb111ca1f26c8fe890413eabc7e2b0010f075fdf5bceb42737c3894/lxml-6.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:7373ede7ccb89e6f6e39c1423b3a4d4ee48035d3b4619a6addced5c8b48d0ecc", size = 5233509, upload-time = "2026-04-09T14:35:23.137Z" }, - { url = "https://files.pythonhosted.org/packages/9f/17/050a105ca1154025b68c19901d45292cbdcee6f25bd056c178ad6b55e534/lxml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e759ff1b244725fef428c6b54f3dab4954c293b2d242a5f2e79db5cc3873de51", size = 5260150, upload-time = "2026-04-09T14:35:25.385Z" }, - { url = "https://files.pythonhosted.org/packages/61/a0/ed83517d12e9fe00101a21fe08a168fd69f57875d9416353e2a38c401df7/lxml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:f179bae37ad673f57756b59f26833b7922230bef471fdb29492428f152bae8c6", size = 3595160, upload-time = "2026-04-09T14:35:27.519Z" }, - { url = "https://files.pythonhosted.org/packages/55/d3/101726831f45951fe3ddd03cffbd2a4ac6261fc63ada399e6f7051d43af6/lxml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:8eeec925ad7f81886d413b3a1f8715551f75543519229a9b35e957771e1826d5", size = 3996108, upload-time = "2026-04-09T14:35:29.608Z" }, - { url = "https://files.pythonhosted.org/packages/49/9f/ab1c58ad55bfcd4b55bafd98f19ff24f34315441f13aa787d5220def0702/lxml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:f96bba9a26a064ce9e11099bad12fb08384b64d3acc0acf94bf386ca5cf4f95f", size = 3658906, upload-time = "2026-04-09T14:35:32.451Z" }, - { url = "https://files.pythonhosted.org/packages/86/a6/2cdc9c5a634b1b890927f968febc2474fa3eb6fed99db82ea3c008bbbda4/lxml-6.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:83c1d75e9d124ab82a4ddaf59135112f0dc49526b47355e5928ae6126a68e236", size = 8559579, upload-time = "2026-04-09T14:35:35.644Z" }, - { url = "https://files.pythonhosted.org/packages/97/3c/adfbcdab17f89f72e069c5df5661c81e0511e3cdb353550f778e9ffaa08e/lxml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b683665d0287308adafc90a5617a51a508d8af8c7040693693bb333b5f4474fe", size = 4617332, upload-time = "2026-04-09T14:35:38.901Z" }, - { url = "https://files.pythonhosted.org/packages/5e/d4/ee1a5c734a5ad79024fa85808f3efc18d5733813141e2bb2726a7d9d8bea/lxml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ed31e5852cd938704bc6c7a3822cbf84c7fa00ebfa914a1b4e2392d44f45bdfb", size = 4922821, upload-time = "2026-04-09T14:35:41.521Z" }, - { url = "https://files.pythonhosted.org/packages/f1/1f/87efcc0b93ba4f95303ec8f80164f3c50db20a3a5612a285133f9ad6cb7e/lxml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8922a30704a4421d69a19e0499db5861da686c0bccc3a79cf3946e3155cf25f9", size = 5081226, upload-time = "2026-04-09T14:35:44.02Z" }, - { url = "https://files.pythonhosted.org/packages/65/8b/fd0fadd9ec8a6ac9d694014ccdb9504e28705abb2e08c9ca23c609020325/lxml-6.0.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9a1adb0e220cb8691202ba9d97646a06292657a122df4b92733861d42f7cf4d2", size = 4992884, upload-time = "2026-04-09T14:35:46.769Z" }, - { url = "https://files.pythonhosted.org/packages/68/75/2fb0e534225214c6386496b7847195d7297b913cf563c5ccea394afc346b/lxml-6.0.3-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:821fd53699eb498990c915ba955a392d07246454c9405e6c1d0692362503013d", size = 5613383, upload-time = "2026-04-09T14:35:49.303Z" }, - { url = "https://files.pythonhosted.org/packages/54/3a/8f560f8fb2f5f092e18ac7a13a94b77e0e5213fe7c424d12e98393dcc7d8/lxml-6.0.3-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:04b7cedf52e125f86d0d426635e7fbe8e353d4cc272a1757888e3c072424381d", size = 5228398, upload-time = "2026-04-09T14:35:51.611Z" }, - { url = "https://files.pythonhosted.org/packages/aa/d5/6bf993c02a0173eb5883ace61958c55c245d3daf7753fb5f931a9691b440/lxml-6.0.3-cp313-cp313-manylinux_2_28_i686.whl", hash = "sha256:9d98063e6ae0da5084ec46952bb0a5ccb5e2cad168e32b4d65d1ec84e4b4ebd4", size = 5342198, upload-time = "2026-04-09T14:35:54.311Z" }, - { url = "https://files.pythonhosted.org/packages/bb/18/637130349ca6aa33b6dc4796732835ede5017a811c5f55763a1c468f7971/lxml-6.0.3-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:ce01ab3449015358f766a1950b3d818eedf9d4cdec3fa87e4eecaad10c0784db", size = 4699178, upload-time = "2026-04-09T14:35:56.647Z" }, - { url = "https://files.pythonhosted.org/packages/bb/19/239daafcc1cfa42b8aa6384509a9fd2cb1aa281679c6e8395adf9ccbc189/lxml-6.0.3-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d38c25bad123d6ce30bb37931d90a4e8a167cd796eeae9cd16c2bfce52718f8e", size = 5231869, upload-time = "2026-04-09T14:36:00.41Z" }, - { url = "https://files.pythonhosted.org/packages/0a/74/db7fcadc651b988502bed00d48acfd8b997ecb5dd52ebcc05f39bf946d9e/lxml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b8e0779780026979f217603385995202f364adc9807bd21210d81b9f562fc4e", size = 5043669, upload-time = "2026-04-09T14:36:02.463Z" }, - { url = "https://files.pythonhosted.org/packages/55/99/af795b579182fa04aa87fcb0bd112e22705d982f71eb53874a8d356b4091/lxml-6.0.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:8c082ad2398664213a4bb5d133e2eb8bf239220b7d6688f8c8ffa9050057501f", size = 4769745, upload-time = "2026-04-09T14:36:04.716Z" }, - { url = "https://files.pythonhosted.org/packages/52/4d/10e652edc55d206188a1b738d1033aad3497886d34cb7f5fc753e67ecb49/lxml-6.0.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dfc80c74233fe01157ab550fb12b9d07a2f1fa7c5900cefb484e3bf02e856fbc", size = 5635496, upload-time = "2026-04-09T14:36:06.815Z" }, - { url = "https://files.pythonhosted.org/packages/ab/68/95371835ec15bb46feee27b090bcabbe579f4ad04efbef08e2713bcfea16/lxml-6.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:5c45bdcdc2ca6cf26fddff3faa5de7a2ed7c7f6016b3de80125313a37f972378", size = 5223564, upload-time = "2026-04-09T14:36:09.057Z" }, - { url = "https://files.pythonhosted.org/packages/aa/a6/0a9e5b63e8959487551be5d5496bb758ed2424c77ed7b25a9b8aae3b60c6/lxml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:99457524afd384c330dc51e527976653d543ccadfa815d9f2d92c5911626e536", size = 5250124, upload-time = "2026-04-09T14:36:11.337Z" }, - { url = "https://files.pythonhosted.org/packages/d9/80/de3d3a790edf6d026c829fe8ccf54845058f57f8bb788e420c3b227eecef/lxml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:c8e3b8a54e65393ce1d5c7d9753fe756f0d96089e7163b20ddec3e5bb56a963e", size = 3596004, upload-time = "2026-04-09T14:36:13.446Z" }, - { url = "https://files.pythonhosted.org/packages/9f/cf/43c9a5926060e39d99593921f37d7e88f129bc32ab6266b8460483abd613/lxml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:724b26a38cef98d6869d00a33cb66083bee967598e44f6a8e53f1dd283c851b0", size = 3994750, upload-time = "2026-04-09T14:36:15.686Z" }, - { url = "https://files.pythonhosted.org/packages/e5/d3/b224dbc282bfef52d2e05645e405b5ed89c6391144dc09864229fe9ce88c/lxml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:f27373113fda6621e4201f529908a24c8a190c2af355aed4711dadca44db4673", size = 3657620, upload-time = "2026-04-09T14:36:17.952Z" }, - { url = "https://files.pythonhosted.org/packages/d3/40/b637359bacf3813f1174d15b08516020ba5beb355e04377105d561e6e00a/lxml-6.0.3-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8c08926678852a233bf1ef645c4d683d56107f814482f8f41b21ef2c7659790e", size = 8575318, upload-time = "2026-04-09T14:36:20.608Z" }, - { url = "https://files.pythonhosted.org/packages/7f/91/d5286a45202ed91f1e428e68c6e1c11bcb2b42715c48424871fc73485b05/lxml-6.0.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2ce76d113a7c3bf42761ec1de7ca615b0cbf9d8ae478eb1d6c20111d9c9fc098", size = 4623084, upload-time = "2026-04-09T14:36:24.015Z" }, - { url = "https://files.pythonhosted.org/packages/8a/5f/7ea1af571ee13ed1e5fba007fd83cd0794723ca76a51eed0ef9513363b1f/lxml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:83eca62141314d641ebe8089ffa532bbf572ea07dd6255b58c40130d06bb2509", size = 4948797, upload-time = "2026-04-09T14:36:26.662Z" }, - { url = "https://files.pythonhosted.org/packages/82/be/3a9b8d787d9877cbe17e02ef5af2523bd14ecc177ce308397c485c56fe18/lxml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d8781d812bb8efd47c35651639da38980383ff0d0c1f3269ade23e3a90799079", size = 5085983, upload-time = "2026-04-09T14:36:29.486Z" }, - { url = "https://files.pythonhosted.org/packages/c4/2b/645abaef837b11414c81513c31b308a001fb8cd370f665c3ebc854be5ba5/lxml-6.0.3-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:19b079e81aa3a31b523a224b0dd46da4f56e1b1e248eef9a599e5c885c788813", size = 5031039, upload-time = "2026-04-09T14:36:31.735Z" }, - { url = "https://files.pythonhosted.org/packages/3b/4f/561f30b77e9edbb373e2b6b7203a7d6ab219c495abca219536c66f3a44b2/lxml-6.0.3-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6c055bafdcb53e7f9f75e22c009cd183dd410475e21c296d599531d7f03d1bf5", size = 5646718, upload-time = "2026-04-09T14:36:34.127Z" }, - { url = "https://files.pythonhosted.org/packages/d7/ba/2a72e673d109b563c2ab77097f2f4ca64e2927d2f04836ba07aaabe1da0e/lxml-6.0.3-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f1594a183cee73f9a1dbfd35871c4e04b461f47eeb9bcf80f7d7856b1b136d", size = 5239360, upload-time = "2026-04-09T14:36:37.195Z" }, - { url = "https://files.pythonhosted.org/packages/52/98/4e5a4ef87d846af90cc9c1ee2f8af2af34c221e620aad317b3a535361b93/lxml-6.0.3-cp314-cp314-manylinux_2_28_i686.whl", hash = "sha256:a6380c5035598e4665272ad3fc86c96ddb2a220d4059cce5ba4b660f78346ad9", size = 5351233, upload-time = "2026-04-09T14:36:39.634Z" }, - { url = "https://files.pythonhosted.org/packages/cb/b8/cff0af5fe48ede6b1949dc2e14171470c0c68a15789037c1fed90602b89d/lxml-6.0.3-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:143ac903fb6c9be6da613390825c8e8bb8c8d71517d43882031f6b9bc89770ef", size = 4696677, upload-time = "2026-04-09T14:36:42.037Z" }, - { url = "https://files.pythonhosted.org/packages/0c/6e/0b2a918fb15c30b00ff112df16c548df011db37b58d764bd17f47db74905/lxml-6.0.3-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c4fff7d77f440378cd841e340398edf5dbefee334816efbf521bb6e31651e54e", size = 5250503, upload-time = "2026-04-09T14:36:44.417Z" }, - { url = "https://files.pythonhosted.org/packages/57/1b/4697918f9d4c2e643e2c59cedb37c2f3a9f76fb1217d767f6dff476813d8/lxml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:631567ffc3ddb989ccdcd28f6b9fa5aab1ec7fc0e99fe65572b006a6aad347e2", size = 5084563, upload-time = "2026-04-09T14:36:46.762Z" }, - { url = "https://files.pythonhosted.org/packages/7b/8c/d7ec96246f0632773912c6556288d3b6bb6580f3a967441ca4636ddc3f73/lxml-6.0.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:38acf7171535ffa7fff1fcec8b82ebd4e55cd02e581efe776928108421accaa1", size = 4737407, upload-time = "2026-04-09T14:36:49.826Z" }, - { url = "https://files.pythonhosted.org/packages/d2/0c/603e35bf77aeb28c972f39eece35e7c0f6579ff33a7bed095cc2f7f942d9/lxml-6.0.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:06b9f3ac459b4565bbaa97aa5512aa7f9a1188c662f0108364f288f6daf35773", size = 5670919, upload-time = "2026-04-09T14:36:52.231Z" }, - { url = "https://files.pythonhosted.org/packages/92/08/6d3f188e6705cf0bfd8b5788055c7381bb3ffa786dfba9fa0b0ed5778506/lxml-6.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2773dbe2cedee81f2769bd5d24ceb4037706cf032e1703513dd0e9476cd9375f", size = 5237771, upload-time = "2026-04-09T14:36:55.286Z" }, - { url = "https://files.pythonhosted.org/packages/f1/4c/01639533b90e9ff622909c113df2ab2dbdd1d78540eb153d13b66a9c96ba/lxml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:30c437d8bb9a9a9edff27e85b694342e47a26a6abc249abe00584a4824f9d80d", size = 5263862, upload-time = "2026-04-09T14:36:58.247Z" }, - { url = "https://files.pythonhosted.org/packages/06/0e/bd1157d7b09d1f5e1d580c124203cee656130a3f8908365760a593b21daf/lxml-6.0.3-cp314-cp314-win32.whl", hash = "sha256:1b60a3a1205f869bd47874787c792087174453b1a869db4837bf5b3ff92be017", size = 3656378, upload-time = "2026-04-09T14:37:47.74Z" }, - { url = "https://files.pythonhosted.org/packages/c5/cc/d50cbce8cd5687670868bea33bbeefa0866c5e5d02c5e11c4a04c79fc45e/lxml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:5b6913a68d98c58c673667c864500ba31bc9b0f462effac98914e9a92ebacd2e", size = 4062518, upload-time = "2026-04-09T14:37:49.911Z" }, - { url = "https://files.pythonhosted.org/packages/fd/c7/ece11a1e51390502894838aa384e9f98af7bef4d6806a927197153a16972/lxml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:1b36a3c73f2a6d9c2bfae78089ca7aedae5c2ee5fd5214a15f00b2f89e558ba7", size = 3741064, upload-time = "2026-04-09T14:37:52.185Z" }, - { url = "https://files.pythonhosted.org/packages/2c/ae/918d7f89635fb6456cd732c12246c0e504dd9c49e8006f3593c9ecdb90ff/lxml-6.0.3-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:239e9a6be3a79c03ec200d26f7bb17a4414704a208059e20050bf161e2d8848a", size = 8826590, upload-time = "2026-04-09T14:37:00.862Z" }, - { url = "https://files.pythonhosted.org/packages/07/cf/bda0ae583758704719976b9ea69c8b089fa5f92e49683e517386539b21cf/lxml-6.0.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:16e5cbaa1a6351f2abefa4072e9aac1f09103b47fe7ab4496d54e5995b065162", size = 4735028, upload-time = "2026-04-09T14:37:03.602Z" }, - { url = "https://files.pythonhosted.org/packages/2f/0e/3bfb18778c6f73c7ead2d49a256501fa3052888b899826f5d1df1fbdf83b/lxml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:89f8746c206d8cf2c167221831645d6cc2b24464afd9c428a5eb3fd34c584eb1", size = 4969184, upload-time = "2026-04-09T14:37:05.914Z" }, - { url = "https://files.pythonhosted.org/packages/29/e6/796c77751a682d6d1bb9aa3fe43851b41a21b0377100e246a4a83a81d668/lxml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5d559a84b2fd583e5bcf8ec4af1ec895f98811684d5fbd6524ea31a04f92d4ad", size = 5103548, upload-time = "2026-04-09T14:37:08.605Z" }, - { url = "https://files.pythonhosted.org/packages/f9/5e/a02aee214f657f29d4690d88161de8ffb8f1b5139e792bae313b9479e317/lxml-6.0.3-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7966fbce2d18fde579d5593933d36ad98cc7c8dc7f2b1916d127057ce0415062", size = 5027775, upload-time = "2026-04-09T14:37:11.283Z" }, - { url = "https://files.pythonhosted.org/packages/20/e5/65dd25f2c366879d696d1c720af9a96fa0969d2d135a27b6140222fc6f68/lxml-6.0.3-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a1f258e6aa0e6eda2c1199f5582c062c96c7d4a28d96d0c4daa79e39b3f2a764", size = 5595348, upload-time = "2026-04-09T14:37:13.618Z" }, - { url = "https://files.pythonhosted.org/packages/f7/1f/2f0e80d7fd2ad9755d771af4ad46ea14bf871bc5a1d2d365a3f948940ddf/lxml-6.0.3-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:738aef404c862d2c3cd951364ee7175c9d50e8290f5726611c4208c0fba8d186", size = 5224217, upload-time = "2026-04-09T14:37:16.519Z" }, - { url = "https://files.pythonhosted.org/packages/3b/28/e1aaeee7d6a4c9f24a3e4535a4e19ce64b99eefbe7437d325b61623b1817/lxml-6.0.3-cp314-cp314t-manylinux_2_28_i686.whl", hash = "sha256:5c35e5c3ed300990a46a144d3514465713f812b35dacfa83e928c60db7c90af7", size = 5312245, upload-time = "2026-04-09T14:37:19.387Z" }, - { url = "https://files.pythonhosted.org/packages/0a/ac/9633cb919124473e03c62862b0494bf0e1705f902fbd9627be4f648bddfb/lxml-6.0.3-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:4ff774b43712b0cf40d9888a5494ca39aefe990c946511cc947b9fddcf74a29b", size = 4637952, upload-time = "2026-04-09T14:37:21.648Z" }, - { url = "https://files.pythonhosted.org/packages/50/aa/135baeea457d41989bafa78e437fe3a370c793aab0d8fb3da73ccae10095/lxml-6.0.3-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d20af2784c763928d0d0879cbc5a3739e4d81eefa0d68962d3478bff4c13e644", size = 5232782, upload-time = "2026-04-09T14:37:24.6Z" }, - { url = "https://files.pythonhosted.org/packages/0e/77/d05183ac8440cbc4c6fa386edb7ba9718bee4f097e58485b1cd1f9479d56/lxml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:fdb7786ebefaa0dad0d399dfeaf146b370a14591af2f3aea59e06f931a426678", size = 5083889, upload-time = "2026-04-09T14:37:27.432Z" }, - { url = "https://files.pythonhosted.org/packages/6d/58/e9fda8fb82775491ad0290c7b17252f944b6c3a6974cd820d65910690351/lxml-6.0.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:c71a387ea133481e725079cff22de45593bf0b834824de22829365ab1d2386c9", size = 4758658, upload-time = "2026-04-09T14:37:29.81Z" }, - { url = "https://files.pythonhosted.org/packages/8b/32/4aae9f004f79f9d200efd8343809cfe46077f8e5bd58f08708c320a20fcd/lxml-6.0.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:841b89fc3d910d61c7c267db6bb7dc3a8b3dac240edb66220fcdf96fe70a0552", size = 5619494, upload-time = "2026-04-09T14:37:33.482Z" }, - { url = "https://files.pythonhosted.org/packages/f9/49/407fa9e3c91e7c6d0762eaeedd50d4695bcd26db817e933ca689eb1f3df4/lxml-6.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:ac2d6cdafa29672d6a604c641bf67ace3fd0735ec6885501a94943379219ddbf", size = 5228386, upload-time = "2026-04-09T14:37:36.058Z" }, - { url = "https://files.pythonhosted.org/packages/99/92/39982f818acbb1dd67dd5d20c2a06bcb9f1f3b9a8ff0021e367904f82417/lxml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:609bf136a7339aeca2bd4268c7cd190f33d13118975fe9964eda8e5138f42802", size = 5247973, upload-time = "2026-04-09T14:37:38.836Z" }, - { url = "https://files.pythonhosted.org/packages/66/68/fcdbb78c8cda81a86e17b31abf103b7e474e474a09fb291a99e7a9b43eb8/lxml-6.0.3-cp314-cp314t-win32.whl", hash = "sha256:bf98f5f87f6484302e7cce4e2ca5af43562902852063d916c3e2f1c115fdce60", size = 3896249, upload-time = "2026-04-09T14:37:41.068Z" }, - { url = "https://files.pythonhosted.org/packages/88/fb/6292681ac4a4223b700569ce98f71662cb07c5a3ade4f346f5f0d5c574cf/lxml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:d3d65e511e4e656ec67b472110f7a72cbf8547ca15f76fe74cffa4e97412a064", size = 4391091, upload-time = "2026-04-09T14:37:43.357Z" }, - { url = "https://files.pythonhosted.org/packages/99/39/a0f486360a6f1b36fd2f5eb62d037652bef503d82b6f853aee6664cdfcac/lxml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:cbc7ce67f85b92db97c92219985432be84dc1ba9a028e68c6933e89551234df2", size = 3816374, upload-time = "2026-04-09T14:37:45.532Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d4/9326838b59dc36dfae42eec9656b97520f9997eee1de47b8316aaeed169c/lxml-6.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d2f17a16cd8751e8eb233a7e41aecdf8e511712e00088bf9be455f604cd0d28d", size = 8570663, upload-time = "2026-04-18T04:27:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/d8/a4/053745ce1f8303ccbb788b86c0db3a91b973675cefc42566a188637b7c40/lxml-6.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f0cea5b1d3e6e77d71bd2b9972eb2446221a69dc52bb0b9c3c6f6e5700592d93", size = 4624024, upload-time = "2026-04-18T04:27:52.594Z" }, + { url = "https://files.pythonhosted.org/packages/90/97/a517944b20f8fd0932ad2109482bee4e29fe721416387a363306667941f6/lxml-6.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc46da94826188ed45cb53bd8e3fc076ae22675aea2087843d4735627f867c6d", size = 4930895, upload-time = "2026-04-18T04:32:56.29Z" }, + { url = "https://files.pythonhosted.org/packages/94/7c/e08a970727d556caa040a44773c7b7e3ad0f0d73dedc863543e9a8b931f2/lxml-6.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9147d8e386ec3b82c3b15d88927f734f565b0aaadef7def562b853adca45784a", size = 5093820, upload-time = "2026-04-18T04:32:58.94Z" }, + { url = "https://files.pythonhosted.org/packages/88/ee/2a5c2aa2c32016a226ca25d3e1056a8102ea6e1fe308bf50213586635400/lxml-6.1.0-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5715e0e28736a070f3f34a7ccc09e2fdcba0e3060abbcf61a1a5718ff6d6b105", size = 5005790, upload-time = "2026-04-18T04:33:01.272Z" }, + { url = "https://files.pythonhosted.org/packages/e3/38/a0db9be8f38ad6043ab9429487c128dd1d30f07956ef43040402f8da49e8/lxml-6.1.0-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4937460dc5df0cdd2f06a86c285c28afda06aefa3af949f9477d3e8df430c485", size = 5630827, upload-time = "2026-04-18T04:33:04.036Z" }, + { url = "https://files.pythonhosted.org/packages/31/ba/3c13d3fc24b7cacf675f808a3a1baabf43a30d0cd24c98f94548e9aa58eb/lxml-6.1.0-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bc783ee3147e60a25aa0445ea82b3e8aabb83b240f2b95d32cb75587ff781814", size = 5240445, upload-time = "2026-04-18T04:33:06.87Z" }, + { url = "https://files.pythonhosted.org/packages/55/ba/eeef4ccba09b2212fe239f46c1692a98db1878e0872ae320756488878a94/lxml-6.1.0-cp312-cp312-manylinux_2_28_i686.whl", hash = "sha256:40d9189f80075f2e1f88db21ef815a2b17b28adf8e50aaf5c789bfe737027f32", size = 5350121, upload-time = "2026-04-18T04:33:09.365Z" }, + { url = "https://files.pythonhosted.org/packages/7e/01/1da87c7b587c38d0cbe77a01aae3b9c1c49ed47d76918ef3db8fc151b1ca/lxml-6.1.0-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:05b9b8787e35bec69e68daf4952b2e6dfcfb0db7ecf1a06f8cdfbbac4eb71aad", size = 4694949, upload-time = "2026-04-18T04:33:11.628Z" }, + { url = "https://files.pythonhosted.org/packages/a1/88/7db0fe66d5aaf128443ee1623dec3db1576f3e4c17751ec0ef5866468590/lxml-6.1.0-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0f0f08beb0182e3e9a86fae124b3c47a7b41b7b69b225e1377db983802404e54", size = 5243901, upload-time = "2026-04-18T04:33:13.95Z" }, + { url = "https://files.pythonhosted.org/packages/00/a8/1346726af7d1f6fca1f11223ba34001462b0a3660416986d37641708d57c/lxml-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73becf6d8c81d4c76b1014dbd3584cb26d904492dcf73ca85dc8bff08dcd6d2d", size = 5048054, upload-time = "2026-04-18T04:33:16.965Z" }, + { url = "https://files.pythonhosted.org/packages/2e/b7/85057012f035d1a0c87e02f8c723ca3c3e6e0728bcf4cb62080b21b1c1e3/lxml-6.1.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1ae225f66e5938f4fa29d37e009a3bb3b13032ac57eb4eb42afa44f6e4054e69", size = 4777324, upload-time = "2026-04-18T04:33:19.832Z" }, + { url = "https://files.pythonhosted.org/packages/75/6c/ad2f94a91073ef570f33718040e8e160d5fb93331cf1ab3ca1323f939e2d/lxml-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:690022c7fae793b0489aa68a658822cea83e0d5933781811cabbf5ea3bcfe73d", size = 5645702, upload-time = "2026-04-18T04:33:22.436Z" }, + { url = "https://files.pythonhosted.org/packages/3b/89/0bb6c0bd549c19004c60eea9dc554dd78fd647b72314ef25d460e0d208c6/lxml-6.1.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:63aeafc26aac0be8aff14af7871249e87ea1319be92090bfd632ec68e03b16a5", size = 5232901, upload-time = "2026-04-18T04:33:26.21Z" }, + { url = "https://files.pythonhosted.org/packages/a1/d9/d609a11fb567da9399f525193e2b49847b5a409cdebe737f06a8b7126bdc/lxml-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:264c605ab9c0e4aa1a679636f4582c4d3313700009fac3ec9c3412ed0d8f3e1d", size = 5261333, upload-time = "2026-04-18T04:33:28.984Z" }, + { url = "https://files.pythonhosted.org/packages/a6/3a/ac3f99ec8ac93089e7dd556f279e0d14c24de0a74a507e143a2e4b496e7c/lxml-6.1.0-cp312-cp312-win32.whl", hash = "sha256:56971379bc5ee8037c5a0f09fa88f66cdb7d37c3e38af3e45cf539f41131ac1f", size = 3596289, upload-time = "2026-04-18T04:27:42.819Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a7/0a915557538593cb1bbeedcd40e13c7a261822c26fecbbdb71dad0c2f540/lxml-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:bba078de0031c219e5dd06cf3e6bf8fb8e6e64a77819b358f53bb132e3e03366", size = 3997059, upload-time = "2026-04-18T04:27:46.764Z" }, + { url = "https://files.pythonhosted.org/packages/92/96/a5dc078cf0126fbfbc35611d77ecd5da80054b5893e28fb213a5613b9e1d/lxml-6.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:c3592631e652afa34999a088f98ba7dfc7d6aff0d535c410bea77a71743f3819", size = 3659552, upload-time = "2026-04-18T04:27:51.133Z" }, + { url = "https://files.pythonhosted.org/packages/08/03/69347590f1cf4a6d5a4944bb6099e6d37f334784f16062234e1f892fdb1d/lxml-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a0092f2b107b69601adf562a57c956fbb596e05e3e6651cabd3054113b007e45", size = 8559689, upload-time = "2026-04-18T04:31:57.785Z" }, + { url = "https://files.pythonhosted.org/packages/3f/58/25e00bb40b185c974cfe156c110474d9a8a8390d5f7c92a4e328189bb60e/lxml-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fc7140d7a7386e6b545d41b7358f4d02b656d4053f5fa6859f92f4b9c2572c4d", size = 4617892, upload-time = "2026-04-18T04:32:01.78Z" }, + { url = "https://files.pythonhosted.org/packages/f5/54/92ad98a94ac318dc4f97aaac22ff8d1b94212b2ae8af5b6e9b354bf825f7/lxml-6.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:419c58fc92cc3a2c3fa5f78c63dbf5da70c1fa9c1b25f25727ecee89a96c7de2", size = 4923489, upload-time = "2026-04-18T04:33:31.401Z" }, + { url = "https://files.pythonhosted.org/packages/15/3b/a20aecfab42bdf4f9b390590d345857ad3ffd7c51988d1c89c53a0c73faf/lxml-6.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:37fabd1452852636cf38ecdcc9dd5ca4bba7a35d6c53fa09725deeb894a87491", size = 5082162, upload-time = "2026-04-18T04:33:34.262Z" }, + { url = "https://files.pythonhosted.org/packages/45/26/2cdb3d281ac1bd175603e290cbe4bad6eff127c0f8de90bafd6f8548f0fd/lxml-6.1.0-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2853c8b2170cc6cd54a6b4d50d2c1a8a7aeca201f23804b4898525c7a152cfc", size = 4993247, upload-time = "2026-04-18T04:33:36.674Z" }, + { url = "https://files.pythonhosted.org/packages/f6/05/d735aef963740022a08185c84821f689fc903acb3d50326e6b1e9886cc22/lxml-6.1.0-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8e369cbd690e788c8d15e56222d91a09c6a417f49cbc543040cba0fe2e25a79e", size = 5613042, upload-time = "2026-04-18T04:33:39.205Z" }, + { url = "https://files.pythonhosted.org/packages/ee/b8/ead7c10efff731738c72e59ed6eb5791854879fbed7ae98781a12006263a/lxml-6.1.0-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e69aa6805905807186eb00e66c6d97a935c928275182eb02ee40ba00da9623b2", size = 5228304, upload-time = "2026-04-18T04:33:41.647Z" }, + { url = "https://files.pythonhosted.org/packages/6b/10/e9842d2ec322ea65f0a7270aa0315a53abed06058b88ef1b027f620e7a5f/lxml-6.1.0-cp313-cp313-manylinux_2_28_i686.whl", hash = "sha256:4bd1bdb8a9e0e2dd229de19b5f8aebac80e916921b4b2c6ef8a52bc131d0c1f9", size = 5341578, upload-time = "2026-04-18T04:33:44.596Z" }, + { url = "https://files.pythonhosted.org/packages/89/54/40d9403d7c2775fa7301d3ddd3464689bfe9ba71acc17dfff777071b4fdc/lxml-6.1.0-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:cbd7b79cdcb4986ad78a2662625882747f09db5e4cd7b2ae178a88c9c51b3dfe", size = 4700209, upload-time = "2026-04-18T04:33:47.552Z" }, + { url = "https://files.pythonhosted.org/packages/85/b2/bbdcc2cf45dfc7dfffef4fd97e5c47b15919b6a365247d95d6f684ef5e82/lxml-6.1.0-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:43e4d297f11080ec9d64a4b1ad7ac02b4484c9f0e2179d9c4ef78e886e747b88", size = 5232365, upload-time = "2026-04-18T04:33:50.249Z" }, + { url = "https://files.pythonhosted.org/packages/48/5a/b06875665e53aaba7127611a7bed3b7b9658e20b22bc2dd217a0b7ab0091/lxml-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cc16682cc987a3da00aa56a3aa3075b08edb10d9b1e476938cfdbee8f3b67181", size = 5043654, upload-time = "2026-04-18T04:33:52.71Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9c/e71a069d09641c1a7abeb30e693f828c7c90a41cbe3d650b2d734d876f85/lxml-6.1.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d6d8efe71429635f0559579092bb5e60560d7b9115ee38c4adbea35632e7fa24", size = 4769326, upload-time = "2026-04-18T04:33:55.244Z" }, + { url = "https://files.pythonhosted.org/packages/cc/06/7a9cd84b3d4ed79adf35f874750abb697dec0b4a81a836037b36e47c091a/lxml-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e39ab3a28af7784e206d8606ec0e4bcad0190f63a492bca95e94e5a4aef7f6e", size = 5635879, upload-time = "2026-04-18T04:33:58.509Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f0/9d57916befc1e54c451712c7ee48e9e74e80ae4d03bdce49914e0aee42cd/lxml-6.1.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:9eb667bf50856c4a58145f8ca2d5e5be160191e79eb9e30855a476191b3c3495", size = 5224048, upload-time = "2026-04-18T04:34:00.943Z" }, + { url = "https://files.pythonhosted.org/packages/99/75/90c4eefda0c08c92221fe0753db2d6699a4c628f76ff4465ec20dea84cc1/lxml-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7f4a77d6f7edf9230cee3e1f7f6764722a41604ee5681844f18db9a81ea0ec33", size = 5250241, upload-time = "2026-04-18T04:34:03.365Z" }, + { url = "https://files.pythonhosted.org/packages/5e/73/16596f7e4e38fa33084b9ccbccc22a15f82a290a055126f2c1541236d2ff/lxml-6.1.0-cp313-cp313-win32.whl", hash = "sha256:28902146ffbe5222df411c5d19e5352490122e14447e98cd118907ee3fd6ee62", size = 3596938, upload-time = "2026-04-18T04:31:56.206Z" }, + { url = "https://files.pythonhosted.org/packages/8e/63/981401c5680c1eb30893f00a19641ac80db5d1e7086c62cb4b13ed813038/lxml-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:4a1503c56e4e2b38dc76f2f2da7bae69670c0f1933e27cfa34b2fa5876410b16", size = 3995728, upload-time = "2026-04-18T04:31:58.763Z" }, + { url = "https://files.pythonhosted.org/packages/e7/e8/c358a38ac3e541d16a1b527e4e9cb78c0419b0506a070ace11777e5e8404/lxml-6.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:e0af85773850417d994d019741239b901b22c6680206f46a34766926e466141d", size = 3658372, upload-time = "2026-04-18T04:32:03.629Z" }, + { url = "https://files.pythonhosted.org/packages/eb/45/cee4cf203ef0bab5c52afc118da61d6b460c928f2893d40023cfa27e0b80/lxml-6.1.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:ab863fd37458fed6456525f297d21239d987800c46e67da5ef04fc6b3dd93ac8", size = 8576713, upload-time = "2026-04-18T04:32:06.831Z" }, + { url = "https://files.pythonhosted.org/packages/8a/a7/eda05babeb7e046839204eaf254cd4d7c9130ce2bbf0d9e90ea41af5654d/lxml-6.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:6fd8b1df8254ff4fd93fd31da1fc15770bde23ac045be9bb1f87425702f61cc9", size = 4623874, upload-time = "2026-04-18T04:32:10.755Z" }, + { url = "https://files.pythonhosted.org/packages/e7/e9/db5846de9b436b91890a62f29d80cd849ea17948a49bf532d5278ee69a9e/lxml-6.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:47024feaae386a92a146af0d2aeed65229bf6fff738e6a11dda6b0015fb8fd03", size = 4949535, upload-time = "2026-04-18T04:34:06.657Z" }, + { url = "https://files.pythonhosted.org/packages/5a/ba/0d3593373dcae1d68f40dc3c41a5a92f2544e68115eb2f62319a4c2a6500/lxml-6.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3f00972f84450204cd5d93a5395965e348956aaceaadec693a22ec743f8ae3eb", size = 5086881, upload-time = "2026-04-18T04:34:09.556Z" }, + { url = "https://files.pythonhosted.org/packages/43/76/759a7484539ad1af0d125a9afe9c3fb5f82a8779fd1f5f56319d9e4ea2fd/lxml-6.1.0-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97faa0860e13b05b15a51fb4986421ef7a30f0b3334061c416e0981e9450ca4c", size = 5031305, upload-time = "2026-04-18T04:34:12.336Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b9/c1f0daf981a11e47636126901fd4ab82429e18c57aeb0fc3ad2940b42d8b/lxml-6.1.0-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:972a6451204798675407beaad97b868d0c733d9a74dafefc63120b81b8c2de28", size = 5647522, upload-time = "2026-04-18T04:34:14.89Z" }, + { url = "https://files.pythonhosted.org/packages/31/e6/1f533dcd205275363d9ba3511bcec52fa2df86abf8abe6a5f2c599f0dc31/lxml-6.1.0-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fe022f20bc4569ec66b63b3fb275a3d628d9d32da6326b2982584104db6d3086", size = 5239310, upload-time = "2026-04-18T04:34:17.652Z" }, + { url = "https://files.pythonhosted.org/packages/c3/8c/4175fb709c78a6e315ed814ed33be3defd8b8721067e70419a6cf6f971da/lxml-6.1.0-cp314-cp314-manylinux_2_28_i686.whl", hash = "sha256:75c4c7c619a744f972f4451bf5adf6d0fb00992a1ffc9fd78e13b0bc817cc99f", size = 5350799, upload-time = "2026-04-18T04:34:20.529Z" }, + { url = "https://files.pythonhosted.org/packages/fd/77/6ffdebc5994975f0dde4acb59761902bd9d9bb84422b9a0bd239a7da9ca8/lxml-6.1.0-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:3648f20d25102a22b6061c688beb3a805099ea4beb0a01ce62975d926944d292", size = 4697693, upload-time = "2026-04-18T04:34:23.541Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f1/565f36bd5c73294602d48e04d23f81ff4c8736be6ba5e1d1ec670ac9be80/lxml-6.1.0-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:77b9f99b17cbf14026d1e618035077060fc7195dd940d025149f3e2e830fbfcb", size = 5250708, upload-time = "2026-04-18T04:34:26.001Z" }, + { url = "https://files.pythonhosted.org/packages/5a/11/a68ab9dd18c5c499404deb4005f4bc4e0e88e5b72cd755ad96efec81d18d/lxml-6.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:32662519149fd7a9db354175aa5e417d83485a8039b8aaa62f873ceee7ea4cad", size = 5084737, upload-time = "2026-04-18T04:34:28.32Z" }, + { url = "https://files.pythonhosted.org/packages/ab/78/e8f41e2c74f4af564e6a0348aea69fb6daaefa64bc071ef469823d22cc18/lxml-6.1.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:73d658216fc173cf2c939e90e07b941c5e12736b0bf6a99e7af95459cfe8eabb", size = 4737817, upload-time = "2026-04-18T04:34:30.784Z" }, + { url = "https://files.pythonhosted.org/packages/06/2d/aa4e117aa2ce2f3b35d9ff246be74a2f8e853baba5d2a92c64744474603a/lxml-6.1.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ac4db068889f8772a4a698c5980ec302771bb545e10c4b095d4c8be26749616f", size = 5670753, upload-time = "2026-04-18T04:34:33.675Z" }, + { url = "https://files.pythonhosted.org/packages/08/f5/dd745d50c0409031dbfcc4881740542a01e54d6f0110bd420fa7782110b8/lxml-6.1.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:45e9dfbd1b661eb64ba0d4dbe762bd210c42d86dd1e5bd2bdf89d634231beb43", size = 5238071, upload-time = "2026-04-18T04:34:36.12Z" }, + { url = "https://files.pythonhosted.org/packages/3e/74/ad424f36d0340a904665867dab310a3f1f4c96ff4039698de83b77f44c1f/lxml-6.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:89e8d73d09ac696a5ba42ec69787913d53284f12092f651506779314f10ba585", size = 5264319, upload-time = "2026-04-18T04:34:39.035Z" }, + { url = "https://files.pythonhosted.org/packages/53/36/a15d8b3514ec889bfd6aa3609107fcb6c9189f8dc347f1c0b81eded8d87c/lxml-6.1.0-cp314-cp314-win32.whl", hash = "sha256:ebe33f4ec1b2de38ceb225a1749a2965855bffeef435ba93cd2d5d540783bf2f", size = 3657139, upload-time = "2026-04-18T04:32:20.006Z" }, + { url = "https://files.pythonhosted.org/packages/1a/a4/263ebb0710851a3c6c937180a9a86df1206fdfe53cc43005aa2237fd7736/lxml-6.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:398443df51c538bd578529aa7e5f7afc6c292644174b47961f3bf87fe5741120", size = 4064195, upload-time = "2026-04-18T04:32:23.876Z" }, + { url = "https://files.pythonhosted.org/packages/80/68/2000f29d323b6c286de077ad20b429fc52272e44eae6d295467043e56012/lxml-6.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:8c8984e1d8c4b3949e419158fda14d921ff703a9ed8a47236c6eb7a2b6cb4946", size = 3741870, upload-time = "2026-04-18T04:32:27.922Z" }, + { url = "https://files.pythonhosted.org/packages/30/e9/21383c7c8d43799f0da90224c0d7c921870d476ec9b3e01e1b2c0b8237c5/lxml-6.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:1081dd10bc6fa437db2500e13993abf7cc30716d0a2f40e65abb935f02ec559c", size = 8827548, upload-time = "2026-04-18T04:32:15.094Z" }, + { url = "https://files.pythonhosted.org/packages/a5/01/c6bc11cd587030dd4f719f65c5657960649fe3e19196c844c75bf32cd0d6/lxml-6.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:dabecc48db5f42ba348d1f5d5afdc54c6c4cc758e676926c7cd327045749517d", size = 4735866, upload-time = "2026-04-18T04:32:18.924Z" }, + { url = "https://files.pythonhosted.org/packages/f3/01/757132fff5f4acf25463b5298f1a46099f3a94480b806547b29ce5e385de/lxml-6.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e3dd5fe19c9e0ac818a9c7f132a5e43c1339ec1cbbfecb1a938bd3a47875b7c9", size = 4969476, upload-time = "2026-04-18T04:34:41.889Z" }, + { url = "https://files.pythonhosted.org/packages/fd/fb/1bc8b9d27ed64be7c8903db6c89e74dc8c2cd9ec630a7462e4654316dc5b/lxml-6.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9e7b0a4ca6dcc007a4cef00a761bba2dea959de4bd2df98f926b33c92ca5dfb9", size = 5103719, upload-time = "2026-04-18T04:34:44.797Z" }, + { url = "https://files.pythonhosted.org/packages/d5/e7/5bf82fa28133536a54601aae633b14988e89ed61d4c1eb6b899b023233aa/lxml-6.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d27bbe326c6b539c64b42638b18bc6003a8d88f76213a97ac9ed4f885efeab7", size = 5027890, upload-time = "2026-04-18T04:34:47.634Z" }, + { url = "https://files.pythonhosted.org/packages/2d/20/e048db5d4b4ea0366648aa595f26bb764b2670903fc585b87436d0a5032c/lxml-6.1.0-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4e425db0c5445ef0ad56b0eec54f89b88b2d884656e536a90b2f52aecb4ca86", size = 5596008, upload-time = "2026-04-18T04:34:51.503Z" }, + { url = "https://files.pythonhosted.org/packages/9a/c2/d10807bc8da4824b39e5bd01b5d05c077b6fd01bd91584167edf6b269d22/lxml-6.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4b89b098105b8599dc57adac95d1813409ac476d3c948a498775d3d0c6124bfb", size = 5224451, upload-time = "2026-04-18T04:34:54.263Z" }, + { url = "https://files.pythonhosted.org/packages/3c/15/2ebea45bea427e7f0057e9ce7b2d62c5aba20c6b001cca89ed0aadb3ad41/lxml-6.1.0-cp314-cp314t-manylinux_2_28_i686.whl", hash = "sha256:c4a699432846df86cc3de502ee85f445ebad748a1c6021d445f3e514d2cd4b1c", size = 5312135, upload-time = "2026-04-18T04:34:56.818Z" }, + { url = "https://files.pythonhosted.org/packages/31/e2/87eeae151b0be2a308d49a7ec444ff3eb192b14251e62addb29d0bf3778f/lxml-6.1.0-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:30e7b2ed63b6c8e97cca8af048589a788ab5c9c905f36d9cf1c2bb549f450d2f", size = 4639126, upload-time = "2026-04-18T04:34:59.704Z" }, + { url = "https://files.pythonhosted.org/packages/a3/51/8a3f6a20902ad604dd746ec7b4000311b240d389dac5e9d95adefd349e0c/lxml-6.1.0-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:022981127642fe19866d2907d76241bb07ed21749601f727d5d5dd1ce5d1b773", size = 5232579, upload-time = "2026-04-18T04:35:02.658Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d2/650d619bdbe048d2c3f2c31edb00e35670a5e2d65b4fe3b61bce37b19121/lxml-6.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:23cad0cc86046d4222f7f418910e46b89971c5a45d3c8abfad0f64b7b05e4a9b", size = 5084206, upload-time = "2026-04-18T04:35:05.175Z" }, + { url = "https://files.pythonhosted.org/packages/dd/8a/672ca1a3cbeabd1f511ca275a916c0514b747f4b85bdaae103b8fa92f307/lxml-6.1.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:21c3302068f50d1e8728c67c87ba92aa87043abee517aa2576cca1855326b405", size = 4758906, upload-time = "2026-04-18T04:35:08.098Z" }, + { url = "https://files.pythonhosted.org/packages/be/f1/ef4b691da85c916cb2feb1eec7414f678162798ac85e042fa164419ac05c/lxml-6.1.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:be10838781cb3be19251e276910cd508fe127e27c3242e50521521a0f3781690", size = 5620553, upload-time = "2026-04-18T04:35:11.23Z" }, + { url = "https://files.pythonhosted.org/packages/59/17/94e81def74107809755ac2782fdad4404420f1c92ca83433d117a6d5acf0/lxml-6.1.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:2173a7bffe97667bbf0767f8a99e587740a8c56fdf3befac4b09cb29a80276fd", size = 5229458, upload-time = "2026-04-18T04:35:14.254Z" }, + { url = "https://files.pythonhosted.org/packages/21/55/c4be91b0f830a871fc1b0d730943d56013b683d4671d5198260e2eae722b/lxml-6.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c6854e9cf99c84beb004eecd7d3a3868ef1109bf2b1df92d7bc11e96a36c2180", size = 5247861, upload-time = "2026-04-18T04:35:17.006Z" }, + { url = "https://files.pythonhosted.org/packages/c2/ca/77123e4d77df3cb1e968ade7b1f808f5d3a5c1c96b18a33895397de292c1/lxml-6.1.0-cp314-cp314t-win32.whl", hash = "sha256:00750d63ef0031a05331b9223463b1c7c02b9004cef2346a5b2877f0f9494dd2", size = 3897377, upload-time = "2026-04-18T04:32:07.656Z" }, + { url = "https://files.pythonhosted.org/packages/64/ce/3554833989d074267c063209bae8b09815e5656456a2d332b947806b05ff/lxml-6.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:80410c3a7e3c617af04de17caa9f9f20adaa817093293d69eae7d7d0522836f5", size = 4392701, upload-time = "2026-04-18T04:32:12.113Z" }, + { url = "https://files.pythonhosted.org/packages/2b/a0/9b916c68c0e57752c07f8f64b30138d9d4059dbeb27b90274dedbea128ff/lxml-6.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:26dd9f57ee3bd41e7d35b4c98a2ffd89ed11591649f421f0ec19f67d50ec67ac", size = 3817120, upload-time = "2026-04-18T04:32:15.803Z" }, ] [[package]] @@ -2954,7 +3356,7 @@ wheels = [ [[package]] name = "matplotlib" -version = "3.10.8" +version = "3.10.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "contourpy" }, @@ -2967,43 +3369,43 @@ dependencies = [ { name = "pyparsing" }, { name = "python-dateutil" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8a/76/d3c6e3a13fe484ebe7718d14e269c9569c4eb0020a968a327acb3b9a8fe6/matplotlib-3.10.8.tar.gz", hash = "sha256:2299372c19d56bcd35cf05a2738308758d32b9eaed2371898d8f5bd33f084aa3", size = 34806269, upload-time = "2025-12-10T22:56:51.155Z" } +sdist = { url = "https://files.pythonhosted.org/packages/63/1b/4be5be87d43d327a0cf4de1a56e86f7f84c89312452406cf122efe2839e6/matplotlib-3.10.9.tar.gz", hash = "sha256:fd66508e8c6877d98e586654b608a0456db8d7e8a546eb1e2600efd957302358", size = 34811233, upload-time = "2026-04-24T00:14:13.539Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/67/f997cdcbb514012eb0d10cd2b4b332667997fb5ebe26b8d41d04962fa0e6/matplotlib-3.10.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:64fcc24778ca0404ce0cb7b6b77ae1f4c7231cdd60e6778f999ee05cbd581b9a", size = 8260453, upload-time = "2025-12-10T22:55:30.709Z" }, - { url = "https://files.pythonhosted.org/packages/7e/65/07d5f5c7f7c994f12c768708bd2e17a4f01a2b0f44a1c9eccad872433e2e/matplotlib-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9a5ca4ac220a0cdd1ba6bcba3608547117d30468fefce49bb26f55c1a3d5c58", size = 8148321, upload-time = "2025-12-10T22:55:33.265Z" }, - { url = "https://files.pythonhosted.org/packages/3e/f3/c5195b1ae57ef85339fd7285dfb603b22c8b4e79114bae5f4f0fcf688677/matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04", size = 8716944, upload-time = "2025-12-10T22:55:34.922Z" }, - { url = "https://files.pythonhosted.org/packages/00/f9/7638f5cc82ec8a7aa005de48622eecc3ed7c9854b96ba15bd76b7fd27574/matplotlib-3.10.8-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24d50994d8c5816ddc35411e50a86ab05f575e2530c02752e02538122613371f", size = 9550099, upload-time = "2025-12-10T22:55:36.789Z" }, - { url = "https://files.pythonhosted.org/packages/57/61/78cd5920d35b29fd2a0fe894de8adf672ff52939d2e9b43cb83cd5ce1bc7/matplotlib-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:99eefd13c0dc3b3c1b4d561c1169e65fe47aab7b8158754d7c084088e2329466", size = 9613040, upload-time = "2025-12-10T22:55:38.715Z" }, - { url = "https://files.pythonhosted.org/packages/30/4e/c10f171b6e2f44d9e3a2b96efa38b1677439d79c99357600a62cc1e9594e/matplotlib-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:dd80ecb295460a5d9d260df63c43f4afbdd832d725a531f008dad1664f458adf", size = 8142717, upload-time = "2025-12-10T22:55:41.103Z" }, - { url = "https://files.pythonhosted.org/packages/f1/76/934db220026b5fef85f45d51a738b91dea7d70207581063cd9bd8fafcf74/matplotlib-3.10.8-cp312-cp312-win_arm64.whl", hash = "sha256:3c624e43ed56313651bc18a47f838b60d7b8032ed348911c54906b130b20071b", size = 8012751, upload-time = "2025-12-10T22:55:42.684Z" }, - { url = "https://files.pythonhosted.org/packages/3d/b9/15fd5541ef4f5b9a17eefd379356cf12175fe577424e7b1d80676516031a/matplotlib-3.10.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3f2e409836d7f5ac2f1c013110a4d50b9f7edc26328c108915f9075d7d7a91b6", size = 8261076, upload-time = "2025-12-10T22:55:44.648Z" }, - { url = "https://files.pythonhosted.org/packages/8d/a0/2ba3473c1b66b9c74dc7107c67e9008cb1782edbe896d4c899d39ae9cf78/matplotlib-3.10.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:56271f3dac49a88d7fca5060f004d9d22b865f743a12a23b1e937a0be4818ee1", size = 8148794, upload-time = "2025-12-10T22:55:46.252Z" }, - { url = "https://files.pythonhosted.org/packages/75/97/a471f1c3eb1fd6f6c24a31a5858f443891d5127e63a7788678d14e249aea/matplotlib-3.10.8-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a0a7f52498f72f13d4a25ea70f35f4cb60642b466cbb0a9be951b5bc3f45a486", size = 8718474, upload-time = "2025-12-10T22:55:47.864Z" }, - { url = "https://files.pythonhosted.org/packages/01/be/cd478f4b66f48256f42927d0acbcd63a26a893136456cd079c0cc24fbabf/matplotlib-3.10.8-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646d95230efb9ca614a7a594d4fcacde0ac61d25e37dd51710b36477594963ce", size = 9549637, upload-time = "2025-12-10T22:55:50.048Z" }, - { url = "https://files.pythonhosted.org/packages/5d/7c/8dc289776eae5109e268c4fb92baf870678dc048a25d4ac903683b86d5bf/matplotlib-3.10.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f89c151aab2e2e23cb3fe0acad1e8b82841fd265379c4cecd0f3fcb34c15e0f6", size = 9613678, upload-time = "2025-12-10T22:55:52.21Z" }, - { url = "https://files.pythonhosted.org/packages/64/40/37612487cc8a437d4dd261b32ca21fe2d79510fe74af74e1f42becb1bdb8/matplotlib-3.10.8-cp313-cp313-win_amd64.whl", hash = "sha256:e8ea3e2d4066083e264e75c829078f9e149fa119d27e19acd503de65e0b13149", size = 8142686, upload-time = "2025-12-10T22:55:54.253Z" }, - { url = "https://files.pythonhosted.org/packages/66/52/8d8a8730e968185514680c2a6625943f70269509c3dcfc0dcf7d75928cb8/matplotlib-3.10.8-cp313-cp313-win_arm64.whl", hash = "sha256:c108a1d6fa78a50646029cb6d49808ff0fc1330fda87fa6f6250c6b5369b6645", size = 8012917, upload-time = "2025-12-10T22:55:56.268Z" }, - { url = "https://files.pythonhosted.org/packages/b5/27/51fe26e1062f298af5ef66343d8ef460e090a27fea73036c76c35821df04/matplotlib-3.10.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ad3d9833a64cf48cc4300f2b406c3d0f4f4724a91c0bd5640678a6ba7c102077", size = 8305679, upload-time = "2025-12-10T22:55:57.856Z" }, - { url = "https://files.pythonhosted.org/packages/2c/1e/4de865bc591ac8e3062e835f42dd7fe7a93168d519557837f0e37513f629/matplotlib-3.10.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:eb3823f11823deade26ce3b9f40dcb4a213da7a670013929f31d5f5ed1055b22", size = 8198336, upload-time = "2025-12-10T22:55:59.371Z" }, - { url = "https://files.pythonhosted.org/packages/c6/cb/2f7b6e75fb4dce87ef91f60cac4f6e34f4c145ab036a22318ec837971300/matplotlib-3.10.8-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d9050fee89a89ed57b4fb2c1bfac9a3d0c57a0d55aed95949eedbc42070fea39", size = 8731653, upload-time = "2025-12-10T22:56:01.032Z" }, - { url = "https://files.pythonhosted.org/packages/46/b3/bd9c57d6ba670a37ab31fb87ec3e8691b947134b201f881665b28cc039ff/matplotlib-3.10.8-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b44d07310e404ba95f8c25aa5536f154c0a8ec473303535949e52eb71d0a1565", size = 9561356, upload-time = "2025-12-10T22:56:02.95Z" }, - { url = "https://files.pythonhosted.org/packages/c0/3d/8b94a481456dfc9dfe6e39e93b5ab376e50998cddfd23f4ae3b431708f16/matplotlib-3.10.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0a33deb84c15ede243aead39f77e990469fff93ad1521163305095b77b72ce4a", size = 9614000, upload-time = "2025-12-10T22:56:05.411Z" }, - { url = "https://files.pythonhosted.org/packages/bd/cd/bc06149fe5585ba800b189a6a654a75f1f127e8aab02fd2be10df7fa500c/matplotlib-3.10.8-cp313-cp313t-win_amd64.whl", hash = "sha256:3a48a78d2786784cc2413e57397981fb45c79e968d99656706018d6e62e57958", size = 8220043, upload-time = "2025-12-10T22:56:07.551Z" }, - { url = "https://files.pythonhosted.org/packages/e3/de/b22cf255abec916562cc04eef457c13e58a1990048de0c0c3604d082355e/matplotlib-3.10.8-cp313-cp313t-win_arm64.whl", hash = "sha256:15d30132718972c2c074cd14638c7f4592bd98719e2308bccea40e0538bc0cb5", size = 8062075, upload-time = "2025-12-10T22:56:09.178Z" }, - { url = "https://files.pythonhosted.org/packages/3c/43/9c0ff7a2f11615e516c3b058e1e6e8f9614ddeca53faca06da267c48345d/matplotlib-3.10.8-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b53285e65d4fa4c86399979e956235deb900be5baa7fc1218ea67fbfaeaadd6f", size = 8262481, upload-time = "2025-12-10T22:56:10.885Z" }, - { url = "https://files.pythonhosted.org/packages/6f/ca/e8ae28649fcdf039fda5ef554b40a95f50592a3c47e6f7270c9561c12b07/matplotlib-3.10.8-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:32f8dce744be5569bebe789e46727946041199030db8aeb2954d26013a0eb26b", size = 8151473, upload-time = "2025-12-10T22:56:12.377Z" }, - { url = "https://files.pythonhosted.org/packages/f1/6f/009d129ae70b75e88cbe7e503a12a4c0670e08ed748a902c2568909e9eb5/matplotlib-3.10.8-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf267add95b1c88300d96ca837833d4112756045364f5c734a2276038dae27d", size = 9553896, upload-time = "2025-12-10T22:56:14.432Z" }, - { url = "https://files.pythonhosted.org/packages/f5/26/4221a741eb97967bc1fd5e4c52b9aa5a91b2f4ec05b59f6def4d820f9df9/matplotlib-3.10.8-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2cf5bd12cecf46908f286d7838b2abc6c91cda506c0445b8223a7c19a00df008", size = 9824193, upload-time = "2025-12-10T22:56:16.29Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f3/3abf75f38605772cf48a9daf5821cd4f563472f38b4b828c6fba6fa6d06e/matplotlib-3.10.8-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:41703cc95688f2516b480f7f339d8851a6035f18e100ee6a32bc0b8536a12a9c", size = 9615444, upload-time = "2025-12-10T22:56:18.155Z" }, - { url = "https://files.pythonhosted.org/packages/93/a5/de89ac80f10b8dc615807ee1133cd99ac74082581196d4d9590bea10690d/matplotlib-3.10.8-cp314-cp314-win_amd64.whl", hash = "sha256:83d282364ea9f3e52363da262ce32a09dfe241e4080dcedda3c0db059d3c1f11", size = 8272719, upload-time = "2025-12-10T22:56:20.366Z" }, - { url = "https://files.pythonhosted.org/packages/69/ce/b006495c19ccc0a137b48083168a37bd056392dee02f87dba0472f2797fe/matplotlib-3.10.8-cp314-cp314-win_arm64.whl", hash = "sha256:2c1998e92cd5999e295a731bcb2911c75f597d937341f3030cc24ef2733d78a8", size = 8144205, upload-time = "2025-12-10T22:56:22.239Z" }, - { url = "https://files.pythonhosted.org/packages/68/d9/b31116a3a855bd313c6fcdb7226926d59b041f26061c6c5b1be66a08c826/matplotlib-3.10.8-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b5a2b97dbdc7d4f353ebf343744f1d1f1cca8aa8bfddb4262fcf4306c3761d50", size = 8305785, upload-time = "2025-12-10T22:56:24.218Z" }, - { url = "https://files.pythonhosted.org/packages/1e/90/6effe8103f0272685767ba5f094f453784057072f49b393e3ea178fe70a5/matplotlib-3.10.8-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3f5c3e4da343bba819f0234186b9004faba952cc420fbc522dc4e103c1985908", size = 8198361, upload-time = "2025-12-10T22:56:26.787Z" }, - { url = "https://files.pythonhosted.org/packages/d7/65/a73188711bea603615fc0baecca1061429ac16940e2385433cc778a9d8e7/matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f62550b9a30afde8c1c3ae450e5eb547d579dd69b25c2fc7a1c67f934c1717a", size = 9561357, upload-time = "2025-12-10T22:56:28.953Z" }, - { url = "https://files.pythonhosted.org/packages/f4/3d/b5c5d5d5be8ce63292567f0e2c43dde9953d3ed86ac2de0a72e93c8f07a1/matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:495672de149445ec1b772ff2c9ede9b769e3cb4f0d0aa7fa730d7f59e2d4e1c1", size = 9823610, upload-time = "2025-12-10T22:56:31.455Z" }, - { url = "https://files.pythonhosted.org/packages/4d/4b/e7beb6bbd49f6bae727a12b270a2654d13c397576d25bd6786e47033300f/matplotlib-3.10.8-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:595ba4d8fe983b88f0eec8c26a241e16d6376fe1979086232f481f8f3f67494c", size = 9614011, upload-time = "2025-12-10T22:56:33.85Z" }, - { url = "https://files.pythonhosted.org/packages/7c/e6/76f2813d31f032e65f6f797e3f2f6e4aab95b65015924b1c51370395c28a/matplotlib-3.10.8-cp314-cp314t-win_amd64.whl", hash = "sha256:25d380fe8b1dc32cf8f0b1b448470a77afb195438bafdf1d858bfb876f3edf7b", size = 8362801, upload-time = "2025-12-10T22:56:36.107Z" }, - { url = "https://files.pythonhosted.org/packages/5d/49/d651878698a0b67f23aa28e17f45a6d6dd3d3f933fa29087fa4ce5947b5a/matplotlib-3.10.8-cp314-cp314t-win_arm64.whl", hash = "sha256:113bb52413ea508ce954a02c10ffd0d565f9c3bc7f2eddc27dfe1731e71c7b5f", size = 8192560, upload-time = "2025-12-10T22:56:38.008Z" }, + { url = "https://files.pythonhosted.org/packages/35/c6/5581e26c72233ebb2a2a6fed2d24fb7c66b4700120b813f51b0555acf0b6/matplotlib-3.10.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f0c3c28d9fbcc1fe7a03be236d73430cf6409c41fb2383a7ac52fe932b072cb1", size = 8319908, upload-time = "2026-04-24T00:12:21.323Z" }, + { url = "https://files.pythonhosted.org/packages/b7/18/4880dd762e40cd360c1bf06e890c5a97b997e91cb324602b1a19950ad5ce/matplotlib-3.10.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41cb28c2bd769aa3e98322c6ab09854cbcc52ab69d2759d681bba3e327b2b320", size = 8216016, upload-time = "2026-04-24T00:12:23.4Z" }, + { url = "https://files.pythonhosted.org/packages/32/91/d024616abdba99e83120e07a20658976f6a343646710760c4a51df126029/matplotlib-3.10.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ae20801130378b82d647ff5047c07316295b68dc054ca6b3c13519d0ea624285", size = 8789336, upload-time = "2026-04-24T00:12:26.096Z" }, + { url = "https://files.pythonhosted.org/packages/5c/04/030a2f61ef2158f5e4c259487a92ac877732499fb33d871585d89e03c42d/matplotlib-3.10.9-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6c63ebcd8b4b169eb2f5c200552ae6b8be8999a005b6b507ed76fb8d7d674fe2", size = 9604602, upload-time = "2026-04-24T00:12:29.052Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c2/541e4d09d87bb6b5830fc28b4c887a9a8cf4e1c6cee698a8c05552ae2003/matplotlib-3.10.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d75d11c949914165976c621b2324f9ef162af7ebf4b057ddf95dd1dba7e5edcf", size = 9670966, upload-time = "2026-04-24T00:12:32.131Z" }, + { url = "https://files.pythonhosted.org/packages/04/a1/4571fc46e7702de8d0c2dc54ad1b2f8e29328dea3ee90831181f7353d93c/matplotlib-3.10.9-cp312-cp312-win_amd64.whl", hash = "sha256:d091f9d758b34aaaaa6331d13574bf01891d903b3dec59bfff458ef7551de5d6", size = 8217462, upload-time = "2026-04-24T00:12:35.226Z" }, + { url = "https://files.pythonhosted.org/packages/4b/d0/2269edb12aa30c13c8bcc9382892e39943ce1d28aab4ec296e0381798e81/matplotlib-3.10.9-cp312-cp312-win_arm64.whl", hash = "sha256:10cc5ce06d10231c36f40e875f3c7e8050362a4ee8f0ee5d29a6b3277d57bb42", size = 8136688, upload-time = "2026-04-24T00:12:37.442Z" }, + { url = "https://files.pythonhosted.org/packages/aa/d3/8d4f6afbecb49fc04e060a57c0fce39ea51cc163a6bd87303ccd698e4fa6/matplotlib-3.10.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b580440f1ff81a0e34122051a3dfabb7e4b7f9e380629929bde0eff9af72165f", size = 8320331, upload-time = "2026-04-24T00:12:39.688Z" }, + { url = "https://files.pythonhosted.org/packages/63/d9/9e14bc7564bf92d5ffa801ae5fac819ce74b925dfb55e3ebde61a3bbad3e/matplotlib-3.10.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b1b745c489cd1a77a0dc1120a05dc87af9798faebc913601feb8c73d89bf2d1e", size = 8216461, upload-time = "2026-04-24T00:12:42.494Z" }, + { url = "https://files.pythonhosted.org/packages/8a/17/4402d0d14ccf1dfc70932600b68097fbbf9c898a4871d2cbbe79c7801a32/matplotlib-3.10.9-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8f3bcac1ca5ed000a6f4337d47ba67dfddf37ed6a46c15fd7f014997f7bf865f", size = 8790091, upload-time = "2026-04-24T00:12:44.789Z" }, + { url = "https://files.pythonhosted.org/packages/3e/0b/322aeec06dd9b91411f92028b37d447342770a24392aa4813e317064dad5/matplotlib-3.10.9-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a8d66a55def891c33147ba3ba9bfcabf0b526a43764c818acbb4525e5ed0838", size = 9605027, upload-time = "2026-04-24T00:12:47.583Z" }, + { url = "https://files.pythonhosted.org/packages/74/88/5f13482f55e7b00bcfc09838b093c2456e1379978d2a146844aae05350ad/matplotlib-3.10.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d843374407c4017a6403b59c6c81606773d136f3259d5b6da3131bc814542cc2", size = 9671269, upload-time = "2026-04-24T00:12:50.878Z" }, + { url = "https://files.pythonhosted.org/packages/c5/e0/0840fd2f93da988ec660b8ad1984abe9f25d2aed22a5e394ff1c68c88307/matplotlib-3.10.9-cp313-cp313-win_amd64.whl", hash = "sha256:f4399f64b3e94cd500195490972ae1ee81170df1636fa15364d157d5bdd7b921", size = 8217588, upload-time = "2026-04-24T00:12:53.784Z" }, + { url = "https://files.pythonhosted.org/packages/47/b9/d706d06dd605c49b9f83a2aed8c13e3e5db70697d7a80b7e3d7915de6b17/matplotlib-3.10.9-cp313-cp313-win_arm64.whl", hash = "sha256:ba7b3b8ef09eab7df0e86e9ae086faa433efbfbdb46afcb3aa16aabf779469a8", size = 8136913, upload-time = "2026-04-24T00:12:56.501Z" }, + { url = "https://files.pythonhosted.org/packages/9b/45/6e32d96978264c8ca8c4b1010adb955a1a49cfaf314e212bbc8908f04a61/matplotlib-3.10.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:09218df8a93712bd6ea133e83a153c755448cf7868316c531cffcc43f69d1cc9", size = 8368019, upload-time = "2026-04-24T00:12:58.896Z" }, + { url = "https://files.pythonhosted.org/packages/86/0a/c8e3d3bba245f0f7fc424937f8ff7ef77291a36af3edb97ccd78aa93d84f/matplotlib-3.10.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:82368699727bfb7b0182e1aa13082e3c08e092fa1a25d3e1fd92405bff96f6d4", size = 8264645, upload-time = "2026-04-24T00:13:01.406Z" }, + { url = "https://files.pythonhosted.org/packages/3d/aa/5bf5a14fe4fed73a4209a155606f8096ff797aad89c6c35179026571133e/matplotlib-3.10.9-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3225f4e1edcb8c86c884ddf79ebe20ecd0a67d30188f279897554ccd8fded4dc", size = 8802194, upload-time = "2026-04-24T00:13:03.702Z" }, + { url = "https://files.pythonhosted.org/packages/dd/5e/b4be852d6bba6fd15893fadf91ff26ae49cb91aac789e95dde9d342e664f/matplotlib-3.10.9-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de2445a0c6690d21b7eb6ce071cebad6d40a2e9bdf10d039074a96ba19797b99", size = 9622684, upload-time = "2026-04-24T00:13:06.647Z" }, + { url = "https://files.pythonhosted.org/packages/4c/3d/ed428c971139112ef730f62770654d609467346d09d4b62617e1afd68a5a/matplotlib-3.10.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:b2b9516251cb89ff618d757daec0e2ed1bf21248013844a853d87ef85ab3081d", size = 9680790, upload-time = "2026-04-24T00:13:10.009Z" }, + { url = "https://files.pythonhosted.org/packages/e7/09/052e884aaf2b985c63cb79f715f1d5b6a3eaa7de78f6a52b9dbc077d5b53/matplotlib-3.10.9-cp313-cp313t-win_amd64.whl", hash = "sha256:e9fae004b941b23ff2edcf1567a857ed77bafc8086ffa258190462328434faf8", size = 8287571, upload-time = "2026-04-24T00:13:13.087Z" }, + { url = "https://files.pythonhosted.org/packages/f4/38/ae27288e788c35a4250491422f3db7750366fc8c97d6f36fbdecfc1f5518/matplotlib-3.10.9-cp313-cp313t-win_arm64.whl", hash = "sha256:6b63d9c7c769b88ab81e10dc86e4e0607cf56817b9f9e6cf24b2a5f1693b8e38", size = 8188292, upload-time = "2026-04-24T00:13:15.546Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e6/3bd8afd04949f02eabc1c17115ea5255e19cacd4d06fc5abdde4eeb0052c/matplotlib-3.10.9-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:172db52c9e683f5d12eaf57f0f54834190e12581fe1cc2a19595a8f5acb4e77d", size = 8321276, upload-time = "2026-04-24T00:13:18.318Z" }, + { url = "https://files.pythonhosted.org/packages/41/86/86231232fff41c9f8e4a1a7d7a597d349a02527109c3af7d618366122139/matplotlib-3.10.9-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:97e35e8d39ccc85859095e01a53847432ba9a53ddf7986f7a54a11b73d0e143f", size = 8218218, upload-time = "2026-04-24T00:13:20.974Z" }, + { url = "https://files.pythonhosted.org/packages/85/8f/becc9722cafc64f5d2eb0b7c1bf5f585271c618a45dbd8fabeb021f898b6/matplotlib-3.10.9-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aba1615dabe83188e19d4f75a253c6a08423e04c1425e64039f800050a69de6b", size = 9608145, upload-time = "2026-04-24T00:13:23.228Z" }, + { url = "https://files.pythonhosted.org/packages/32/5d/f7e914f7d9325abff4057cee62c0fa70263683189f774473cbfb534cd13b/matplotlib-3.10.9-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34cf8167e023ad956c15f36302911d5406bd99a9862c1a8499ea6f7c0e015dc2", size = 9885085, upload-time = "2026-04-24T00:13:25.849Z" }, + { url = "https://files.pythonhosted.org/packages/a5/fd/fa69f2221534e80cc5772ac2b7d222011a2acafc2ec7216d5dd174c864ae/matplotlib-3.10.9-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:59476c6d29d612b8e9bb6ce8c5b631be6ba8f9e3a2421f22a02b192c7dd28716", size = 9672358, upload-time = "2026-04-24T00:13:28.906Z" }, + { url = "https://files.pythonhosted.org/packages/ab/1a/5a4f747a8b271cbb024946d2dd3c913ab5032ba430626f8c3528ada96b4b/matplotlib-3.10.9-cp314-cp314-win_amd64.whl", hash = "sha256:336b9acc64d309063126edcdaca00db9373af3c476bb94388fe9c5a53ad13e6f", size = 8349970, upload-time = "2026-04-24T00:13:31.904Z" }, + { url = "https://files.pythonhosted.org/packages/64/dc/95d60ecaefe30680a154b52ea96ab4b0dab547f1fd6aa12f5fb655e89cae/matplotlib-3.10.9-cp314-cp314-win_arm64.whl", hash = "sha256:2dc9477819ffd78ad12a20df1d9d6a6bd4fec6aaa9072681465fddca052f1456", size = 8272785, upload-time = "2026-04-24T00:13:34.511Z" }, + { url = "https://files.pythonhosted.org/packages/70/a0/005d68bc8b8418300ce6591f18586910a8526806e2ab663933d9f20a41e9/matplotlib-3.10.9-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:da4e09638420548f31c354032a6250e473c68e5a4e96899b4844cf39ddea23fe", size = 8367999, upload-time = "2026-04-24T00:13:36.962Z" }, + { url = "https://files.pythonhosted.org/packages/22/05/1236cc9290be70b2498af20ca348add76e3fffe7f67b477db5133a84f3ea/matplotlib-3.10.9-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:345f6f68ecc8da0ca56fad2ea08fde1a115eda530079eca185d50a7bc3e146c6", size = 8264543, upload-time = "2026-04-24T00:13:39.851Z" }, + { url = "https://files.pythonhosted.org/packages/cd/c2/071f5a5ff6c5bd63aaaf2f45c811d9bf2ced94bde188d9e1a519e21d0cba/matplotlib-3.10.9-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4edcfbd8565339aa62f1cd4012f7180926fdbe71850f7b0d3c379c175cd6b66c", size = 9622800, upload-time = "2026-04-24T00:13:42.296Z" }, + { url = "https://files.pythonhosted.org/packages/95/57/da7d1f10a85624b9e7db68e069dd94e58dc41dbf9463c5921632ecbe3661/matplotlib-3.10.9-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6be157fe17fc37cb95ac1d7374cf717ce9259616edec911a78d9d26dae8522d4", size = 9888561, upload-time = "2026-04-24T00:13:45.026Z" }, + { url = "https://files.pythonhosted.org/packages/67/b2/ef8d6bb59b0edb6c16c968b70f548aa13b54348972def5aa6ac85df67145/matplotlib-3.10.9-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4e42042d54db34fda4e95a7bd3e5789c2a995d2dad3eb8850232ee534092fbbf", size = 9680884, upload-time = "2026-04-24T00:13:48.066Z" }, + { url = "https://files.pythonhosted.org/packages/61/1c/d21bfeb9931881ebe96bcfcff27c7ae4b160ae0ec291a714c42641a56d75/matplotlib-3.10.9-cp314-cp314t-win_amd64.whl", hash = "sha256:c27df8b3848f32a83d1767566595e43cfaa4460380974da06f4279a7ec143c39", size = 8432333, upload-time = "2026-04-24T00:13:51.008Z" }, + { url = "https://files.pythonhosted.org/packages/78/23/92493c3e6e1b635ccfff146f7b99e674808787915420373ac399283764c2/matplotlib-3.10.9-cp314-cp314t-win_arm64.whl", hash = "sha256:a49f1eadc84ca85fd72fa4e89e70e61bf86452df6f971af04b12c60761a0772c", size = 8324785, upload-time = "2026-04-24T00:13:53.633Z" }, ] [[package]] @@ -3082,6 +3484,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/29/e1/c758357e82c2deb2401a7214fbfbc6ddf09b5453371dee1b7a2da0aab274/metaworld-3.0.0-py3-none-any.whl", hash = "sha256:f1dd9f8a1bcceab34a5f3c20113724dd90d21984ae89df98c21c842eb3ece137", size = 36660341, upload-time = "2025-06-14T01:44:32.171Z" }, ] +[[package]] +name = "mistune" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/55/d01f0c4b45ade6536c51170b9043db8b2ec6ddf4a35c7ea3f5f559ac935b/mistune-3.2.0.tar.gz", hash = "sha256:708487c8a8cdd99c9d90eb3ed4c3ed961246ff78ac82f03418f5183ab70e398a", size = 95467, upload-time = "2025-12-23T11:36:34.994Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/f7/4a5e785ec9fbd65146a27b6b70b6cdc161a66f2024e4b04ac06a67f5578b/mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1", size = 53598, upload-time = "2025-12-23T11:36:33.211Z" }, +] + [[package]] name = "ml-dtypes" version = "0.5.4" @@ -3138,7 +3549,7 @@ wheels = [ [[package]] name = "mujoco" -version = "3.6.0" +version = "3.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "absl-py" }, @@ -3147,18 +3558,23 @@ dependencies = [ { name = "numpy" }, { name = "pyopengl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/82/f8f08dfe9123df4351b560f894f0e7166c1a45a0dd2f04145ed00b8f849b/mujoco-3.6.0.tar.gz", hash = "sha256:15c89f423e33bce0860ad7061763b72323426d6348d7b2e46ebdcc37b11e0905", size = 915041, upload-time = "2026-03-11T01:45:42.807Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/d8/9aae1a021b6e15ee69d805d893e01dda71cbaae1c75d5f8ec8e12916cb7c/mujoco-3.8.0.tar.gz", hash = "sha256:250afe57458d6881b2d7659fa0029a128cb57cbbb620268d95647fb9ad742183", size = 918250, upload-time = "2026-04-24T22:59:07.531Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/c4/f8959e3d5d98b282e081ce08d07cd71ae949cc0ad9f2c39c0a69fcb88c8c/mujoco-3.6.0-cp312-cp312-macosx_10_16_x86_64.whl", hash = "sha256:e7e60ee4c07f6fecd63c23e6f47b8d7cdacad75d311739d50d50b5107a630af2", size = 7159624, upload-time = "2026-03-11T01:45:16.893Z" }, - { url = "https://files.pythonhosted.org/packages/26/55/7407eced2c44fbea233302d2c11e778852ea0f2eb0e14610f13a7e0d6ac7/mujoco-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ea71750f8cbe24b02a091093592f08fb71c95692b43c25e87dabe496ace0bb55", size = 7093719, upload-time = "2026-03-11T01:45:19.191Z" }, - { url = "https://files.pythonhosted.org/packages/2c/cc/2aae89c3a83fed29ccb9057c05fb4a218b2a42c6dea136d9a78fea6b39f8/mujoco-3.6.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:094de585a2084508f1cfd76170b0dfe1d9c122b3bd4677e96ef2383100c9032f", size = 6982824, upload-time = "2026-03-11T01:45:22.078Z" }, - { url = "https://files.pythonhosted.org/packages/52/6c/5ec4e93676a65064a6591176772e00cfa02716156a1d0a7d646a8203348f/mujoco-3.6.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8714fab312c7ee58f45bda7ef8762da2184e3a6a1d780a5093e93a160d66bd3d", size = 7473873, upload-time = "2026-03-11T01:45:24.671Z" }, - { url = "https://files.pythonhosted.org/packages/92/22/38d82f0c34213af53afbbb248b3442943ef48ffbac1e4c909b321e02ac56/mujoco-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:3d4ec53e4e20fcc85843d607fa1648e0b12d2d2de81ee6f85926e95a7e84e8d8", size = 5764289, upload-time = "2026-03-11T01:45:27.014Z" }, - { url = "https://files.pythonhosted.org/packages/29/0e/f3ea1fc9d1a25f19b173b13c23797805480cdb0a1026d43cf6b37dc2de6e/mujoco-3.6.0-cp313-cp313-macosx_10_16_x86_64.whl", hash = "sha256:29c8c05061798fc3b80269ab3661fa915b890e9623bda4bc6bc9e237db81e885", size = 7159784, upload-time = "2026-03-11T01:45:30.037Z" }, - { url = "https://files.pythonhosted.org/packages/25/f2/2cbeabc6b69110e743100f550ec00ae8c60352b6975cf95470add299ed7a/mujoco-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d593e9373a61db82a506485f7f34533fb6d7e2bff7602f2310aa03e3a93b292f", size = 7093841, upload-time = "2026-03-11T01:45:33.204Z" }, - { url = "https://files.pythonhosted.org/packages/44/80/d7173c73ebfee73a9a3748851c1b5a5e2b5f70b13f4e7fc56dd9d54343d7/mujoco-3.6.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a1a1638dabd66f18d0c04c7fd9383439b0b47e9f91e07939b41e1e628de6357", size = 6983327, upload-time = "2026-03-11T01:45:35.658Z" }, - { url = "https://files.pythonhosted.org/packages/6e/48/c8cd52847d8a973fc606910a5467b8b7b68fa763afbe91f41d87123f957c/mujoco-3.6.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5dc7adceab3a7dbf8b4d52176d4aa629aca5f83dfce5ae06abc1a8c93980d67b", size = 7474334, upload-time = "2026-03-11T01:45:38.237Z" }, - { url = "https://files.pythonhosted.org/packages/c9/f4/17e3962681c141182616db9ec556ad902311ec154fffda7e9b35ed9677c2/mujoco-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:8068182e134ad8a7786a8d24e3198f485e2c531be1149d7793cfda1cc7fb7122", size = 5764100, upload-time = "2026-03-11T01:45:40.548Z" }, + { url = "https://files.pythonhosted.org/packages/b4/0d/35aad24bef1f36e9ebf63367938b16abec82407338d612c37624ff20b0e3/mujoco-3.8.0-cp312-cp312-macosx_10_16_x86_64.whl", hash = "sha256:a495da0cd01aff6ac94ec97f0a1d913e1afe071daf107e220f81814435227982", size = 7265096, upload-time = "2026-04-24T22:58:33.475Z" }, + { url = "https://files.pythonhosted.org/packages/60/d7/2ee5a123431eb50f234de2759e46f3d0c02876e0b1ffce1b26102ed388e7/mujoco-3.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cc1d25b0cd47248fd39681310950b2bea0f6098f57358c0c02730d365bb80ba1", size = 7204862, upload-time = "2026-04-24T22:58:35.978Z" }, + { url = "https://files.pythonhosted.org/packages/aa/62/a488e6e0963e0210b8262650d25e51c4c597ff7beed4fe01a7e88e3abfc5/mujoco-3.8.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:980ab5a2210777cf766e53eb574726f9360e2a87e47d83a6c8d801fb71f2fe52", size = 6743542, upload-time = "2026-04-24T22:58:38.137Z" }, + { url = "https://files.pythonhosted.org/packages/6f/de/bc2271210dad5c6ab73af294779226308e9cf4ed8bc2dbe59922eb8702ed/mujoco-3.8.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:323fedd14905b73cfe56ea8ff916716ccf8b57cff348a7aa6932c8983a465d64", size = 7226045, upload-time = "2026-04-24T22:58:41.117Z" }, + { url = "https://files.pythonhosted.org/packages/65/87/198a88747ff0c01e35070c0c80ae0c05ff8d1a61d6e6f379a4e5ff3e6185/mujoco-3.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:8db22dbc5a6c98241549c8161f20a2b0c2ccc5d08fa42595e7a4b594e35a70dd", size = 5813167, upload-time = "2026-04-24T22:58:43.469Z" }, + { url = "https://files.pythonhosted.org/packages/11/7d/41c73ebe93565ed196ec5ad012232138e3d10850e841ccd77d459afc4383/mujoco-3.8.0-cp313-cp313-macosx_10_16_x86_64.whl", hash = "sha256:d4a080aab0be4d02162e6fe3bcd7163c01cc751638f5a84ba05477b512d95cc0", size = 7265494, upload-time = "2026-04-24T22:58:45.119Z" }, + { url = "https://files.pythonhosted.org/packages/7c/05/d21b43c31c5d9179c2d33e0d38896775b262a9d78729b760717927a02e28/mujoco-3.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5aa987e70a6601ebf02d123d9842f2d1b8f8057163feec1f0a5a049de1cbe252", size = 7205049, upload-time = "2026-04-24T22:58:46.874Z" }, + { url = "https://files.pythonhosted.org/packages/6d/9c/af181776d0ffb70ad6a4365f0613529f268782850dedab0569c6cce83fcc/mujoco-3.8.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8bd33b7e2382605012dfe41c00a8d3bb358153e5b019d920a52c31664472ce20", size = 6743578, upload-time = "2026-04-24T22:58:48.862Z" }, + { url = "https://files.pythonhosted.org/packages/89/8a/c9b28784a7e51926d609b70842e16b85e286df87ad861dbbb26c4e49cacf/mujoco-3.8.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f2b3de0c9fed950c5080ea4b3ff1fb5c89f88e22798f1e1693ec8dbbd36de00b", size = 7226464, upload-time = "2026-04-24T22:58:51.039Z" }, + { url = "https://files.pythonhosted.org/packages/50/3f/0a72c74dd766524b9f1b79f0d6d327b9a797d87b44fe62b3068b44123b54/mujoco-3.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:de03d173f4d9c7341b5dcc10a8eddb36bb19989df68f24369dc7c782dc053f11", size = 5813265, upload-time = "2026-04-24T22:58:53.316Z" }, + { url = "https://files.pythonhosted.org/packages/81/f7/afdbcb4ad50786ed7500205f29ffb5c3a5ef9d42e6b3ad8f9636c4911687/mujoco-3.8.0-cp314-cp314-macosx_10_16_x86_64.whl", hash = "sha256:09c27fc6ce1560912e920789bc121290e4c84919ae30f7b54da5efed4cd2804a", size = 7320672, upload-time = "2026-04-24T22:58:55.469Z" }, + { url = "https://files.pythonhosted.org/packages/4a/8a/6299f6209084dda9469374461c77adad5d63041427b9b9bd4fadaf0c35b0/mujoco-3.8.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5fc4e930cd1414f965381ac97ec054a001539e7aa462836145f6a3201b0dfe88", size = 7249791, upload-time = "2026-04-24T22:58:57.126Z" }, + { url = "https://files.pythonhosted.org/packages/df/3c/a74d169b7725aee971962238d2aee767f64496ede1367cd558361c97d5ca/mujoco-3.8.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:14c32992267906d422ed2127e99aa9ad036a62324139da2a3bd25df1e928d0ee", size = 6754009, upload-time = "2026-04-24T22:59:00.375Z" }, + { url = "https://files.pythonhosted.org/packages/66/36/f3610724bb35f6cfb2ffebe9d8d315975e5fa9722146ad58298df442da7e/mujoco-3.8.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1373c4744a3424f1aa224d2ad5201497ea150c4698beb9aaeb8c3560efa60fdb", size = 7227764, upload-time = "2026-04-24T22:59:02.722Z" }, + { url = "https://files.pythonhosted.org/packages/92/5a/80f5347c322300e4402b08df74e7489756e9af060bb8b8d342086dd5b41c/mujoco-3.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:f8da8fc4a2861f9d1eef64d83adcd783bfe5c02bdc78af2d963d942d097dfdce", size = 6144239, upload-time = "2026-04-24T22:59:05.329Z" }, ] [[package]] @@ -3279,7 +3695,7 @@ wheels = [ [[package]] name = "mypy" -version = "1.20.0" +version = "1.20.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, @@ -3287,37 +3703,37 @@ dependencies = [ { name = "pathspec" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f8/5c/b0089fe7fef0a994ae5ee07029ced0526082c6cfaaa4c10d40a10e33b097/mypy-1.20.0.tar.gz", hash = "sha256:eb96c84efcc33f0b5e0e04beacf00129dd963b67226b01c00b9dfc8affb464c3", size = 3815028, upload-time = "2026-03-31T16:55:14.959Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/af/e3d4b3e9ec91a0ff9aabfdb38692952acf49bbb899c2e4c29acb3a6da3ae/mypy-1.20.2.tar.gz", hash = "sha256:e8222c26daaafd9e8626dec58ae36029f82585890589576f769a650dd20fd665", size = 3817349, upload-time = "2026-04-21T17:12:28.473Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/be/dd/3afa29b58c2e57c79116ed55d700721c3c3b15955e2b6251dd165d377c0e/mypy-1.20.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:002b613ae19f4ac7d18b7e168ffe1cb9013b37c57f7411984abbd3b817b0a214", size = 14509525, upload-time = "2026-03-31T16:55:01.824Z" }, - { url = "https://files.pythonhosted.org/packages/54/eb/227b516ab8cad9f2a13c5e7a98d28cd6aa75e9c83e82776ae6c1c4c046c7/mypy-1.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9336b5e6712f4adaf5afc3203a99a40b379049104349d747eb3e5a3aa23ac2e", size = 13326469, upload-time = "2026-03-31T16:51:41.23Z" }, - { url = "https://files.pythonhosted.org/packages/57/d4/1ddb799860c1b5ac6117ec307b965f65deeb47044395ff01ab793248a591/mypy-1.20.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f13b3e41bce9d257eded794c0f12878af3129d80aacd8a3ee0dee51f3a978651", size = 13705953, upload-time = "2026-03-31T16:48:55.69Z" }, - { url = "https://files.pythonhosted.org/packages/c5/b7/54a720f565a87b893182a2a393370289ae7149e4715859e10e1c05e49154/mypy-1.20.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9804c3ad27f78e54e58b32e7cb532d128b43dbfb9f3f9f06262b821a0f6bd3f5", size = 14710363, upload-time = "2026-03-31T16:53:26.948Z" }, - { url = "https://files.pythonhosted.org/packages/b2/2a/74810274848d061f8a8ea4ac23aaad43bd3d8c1882457999c2e568341c57/mypy-1.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:697f102c5c1d526bdd761a69f17c6070f9892eebcb94b1a5963d679288c09e78", size = 14947005, upload-time = "2026-03-31T16:50:17.591Z" }, - { url = "https://files.pythonhosted.org/packages/77/91/21b8ba75f958bcda75690951ce6fa6b7138b03471618959529d74b8544e2/mypy-1.20.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ecd63f75fdd30327e4ad8b5704bd6d91fc6c1b2e029f8ee14705e1207212489", size = 10880616, upload-time = "2026-03-31T16:52:19.986Z" }, - { url = "https://files.pythonhosted.org/packages/8a/15/3d8198ef97c1ca03aea010cce4f1d4f3bc5d9849e8c0140111ca2ead9fdd/mypy-1.20.0-cp312-cp312-win_arm64.whl", hash = "sha256:f194db59657c58593a3c47c6dfd7bad4ef4ac12dbc94d01b3a95521f78177e33", size = 9813091, upload-time = "2026-03-31T16:53:44.385Z" }, - { url = "https://files.pythonhosted.org/packages/d6/a7/f64ea7bd592fa431cb597418b6dec4a47f7d0c36325fec7ac67bc8402b94/mypy-1.20.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b20c8b0fd5877abdf402e79a3af987053de07e6fb208c18df6659f708b535134", size = 14485344, upload-time = "2026-03-31T16:49:16.78Z" }, - { url = "https://files.pythonhosted.org/packages/bb/72/8927d84cfc90c6abea6e96663576e2e417589347eb538749a464c4c218a0/mypy-1.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:367e5c993ba34d5054d11937d0485ad6dfc60ba760fa326c01090fc256adf15c", size = 13327400, upload-time = "2026-03-31T16:53:08.02Z" }, - { url = "https://files.pythonhosted.org/packages/ab/4a/11ab99f9afa41aa350178d24a7d2da17043228ea10f6456523f64b5a6cf6/mypy-1.20.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f799d9db89fc00446f03281f84a221e50018fc40113a3ba9864b132895619ebe", size = 13706384, upload-time = "2026-03-31T16:52:28.577Z" }, - { url = "https://files.pythonhosted.org/packages/42/79/694ca73979cfb3535ebfe78733844cd5aff2e63304f59bf90585110d975a/mypy-1.20.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:555658c611099455b2da507582ea20d2043dfdfe7f5ad0add472b1c6238b433f", size = 14700378, upload-time = "2026-03-31T16:48:45.527Z" }, - { url = "https://files.pythonhosted.org/packages/84/24/a022ccab3a46e3d2cdf2e0e260648633640eb396c7e75d5a42818a8d3971/mypy-1.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:efe8d70949c3023698c3fca1e94527e7e790a361ab8116f90d11221421cd8726", size = 14932170, upload-time = "2026-03-31T16:49:36.038Z" }, - { url = "https://files.pythonhosted.org/packages/d8/9b/549228d88f574d04117e736f55958bd4908f980f9f5700a07aeb85df005b/mypy-1.20.0-cp313-cp313-win_amd64.whl", hash = "sha256:f49590891d2c2f8a9de15614e32e459a794bcba84693c2394291a2038bbaaa69", size = 10888526, upload-time = "2026-03-31T16:50:59.827Z" }, - { url = "https://files.pythonhosted.org/packages/91/17/15095c0e54a8bc04d22d4ff06b2139d5f142c2e87520b4e39010c4862771/mypy-1.20.0-cp313-cp313-win_arm64.whl", hash = "sha256:76a70bf840495729be47510856b978f1b0ec7d08f257ca38c9d932720bf6b43e", size = 9816456, upload-time = "2026-03-31T16:49:59.537Z" }, - { url = "https://files.pythonhosted.org/packages/4e/0e/6ca4a84cbed9e62384bc0b2974c90395ece5ed672393e553996501625fc5/mypy-1.20.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:0f42dfaab7ec1baff3b383ad7af562ab0de573c5f6edb44b2dab016082b89948", size = 14483331, upload-time = "2026-03-31T16:52:57.999Z" }, - { url = "https://files.pythonhosted.org/packages/7d/c5/5fe9d8a729dd9605064691816243ae6c49fde0bd28f6e5e17f6a24203c43/mypy-1.20.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:31b5dbb55293c1bd27c0fc813a0d2bb5ceef9d65ac5afa2e58f829dab7921fd5", size = 13342047, upload-time = "2026-03-31T16:54:21.555Z" }, - { url = "https://files.pythonhosted.org/packages/4c/33/e18bcfa338ca4e6b2771c85d4c5203e627d0c69d9de5c1a2cf2ba13320ba/mypy-1.20.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49d11c6f573a5a08f77fad13faff2139f6d0730ebed2cfa9b3d2702671dd7188", size = 13719585, upload-time = "2026-03-31T16:51:53.89Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8d/93491ff7b79419edc7eabf95cb3b3f7490e2e574b2855c7c7e7394ff933f/mypy-1.20.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7d3243c406773185144527f83be0e0aefc7bf4601b0b2b956665608bf7c98a83", size = 14685075, upload-time = "2026-03-31T16:54:04.464Z" }, - { url = "https://files.pythonhosted.org/packages/b5/9d/d924b38a4923f8d164bf2b4ec98bf13beaf6e10a5348b4b137eadae40a6e/mypy-1.20.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a79c1eba7ac4209f2d850f0edd0a2f8bba88cbfdfefe6fb76a19e9d4fe5e71a2", size = 14919141, upload-time = "2026-03-31T16:54:51.785Z" }, - { url = "https://files.pythonhosted.org/packages/59/98/1da9977016678c0b99d43afe52ed00bb3c1a0c4c995d3e6acca1a6ebb9b4/mypy-1.20.0-cp314-cp314-win_amd64.whl", hash = "sha256:00e047c74d3ec6e71a2eb88e9ea551a2edb90c21f993aefa9e0d2a898e0bb732", size = 11050925, upload-time = "2026-03-31T16:51:30.758Z" }, - { url = "https://files.pythonhosted.org/packages/5e/e3/ba0b7a3143e49a9c4f5967dde6ea4bf8e0b10ecbbcca69af84027160ee89/mypy-1.20.0-cp314-cp314-win_arm64.whl", hash = "sha256:931a7630bba591593dcf6e97224a21ff80fb357e7982628d25e3c618e7f598ef", size = 10001089, upload-time = "2026-03-31T16:49:43.632Z" }, - { url = "https://files.pythonhosted.org/packages/12/28/e617e67b3be9d213cda7277913269c874eb26472489f95d09d89765ce2d8/mypy-1.20.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:26c8b52627b6552f47ff11adb4e1509605f094e29815323e487fc0053ebe93d1", size = 15534710, upload-time = "2026-03-31T16:52:12.506Z" }, - { url = "https://files.pythonhosted.org/packages/6e/0c/3b5f2d3e45dc7169b811adce8451679d9430399d03b168f9b0489f43adaa/mypy-1.20.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:39362cdb4ba5f916e7976fccecaab1ba3a83e35f60fa68b64e9a70e221bb2436", size = 14393013, upload-time = "2026-03-31T16:54:41.186Z" }, - { url = "https://files.pythonhosted.org/packages/a3/49/edc8b0aa145cc09c1c74f7ce2858eead9329931dcbbb26e2ad40906daa4e/mypy-1.20.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34506397dbf40c15dc567635d18a21d33827e9ab29014fb83d292a8f4f8953b6", size = 15047240, upload-time = "2026-03-31T16:54:31.955Z" }, - { url = "https://files.pythonhosted.org/packages/42/37/a946bb416e37a57fa752b3100fd5ede0e28df94f92366d1716555d47c454/mypy-1.20.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:555493c44a4f5a1b58d611a43333e71a9981c6dbe26270377b6f8174126a0526", size = 15858565, upload-time = "2026-03-31T16:53:36.997Z" }, - { url = "https://files.pythonhosted.org/packages/2f/99/7690b5b5b552db1bd4ff362e4c0eb3107b98d680835e65823fbe888c8b78/mypy-1.20.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2721f0ce49cb74a38f00c50da67cb7d36317b5eda38877a49614dc018e91c787", size = 16087874, upload-time = "2026-03-31T16:52:48.313Z" }, - { url = "https://files.pythonhosted.org/packages/aa/76/53e893a498138066acd28192b77495c9357e5a58cc4be753182846b43315/mypy-1.20.0-cp314-cp314t-win_amd64.whl", hash = "sha256:47781555a7aa5fedcc2d16bcd72e0dc83eb272c10dd657f9fb3f9cc08e2e6abb", size = 12572380, upload-time = "2026-03-31T16:49:52.454Z" }, - { url = "https://files.pythonhosted.org/packages/76/9c/6dbdae21f01b7aacddc2c0bbf3c5557aa547827fdf271770fe1e521e7093/mypy-1.20.0-cp314-cp314t-win_arm64.whl", hash = "sha256:c70380fe5d64010f79fb863b9081c7004dd65225d2277333c219d93a10dad4dd", size = 10381174, upload-time = "2026-03-31T16:51:20.179Z" }, - { url = "https://files.pythonhosted.org/packages/21/66/4d734961ce167f0fd8380769b3b7c06dbdd6ff54c2190f3f2ecd22528158/mypy-1.20.0-py3-none-any.whl", hash = "sha256:a6e0641147cbfa7e4e94efdb95c2dab1aff8cfc159ded13e07f308ddccc8c48e", size = 2636365, upload-time = "2026-03-31T16:51:44.911Z" }, + { url = "https://files.pythonhosted.org/packages/71/4e/7560e4528db9e9b147e4c0f22660466bf30a0a1fe3d63d1b9d3b0fd354ee/mypy-1.20.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4dbfcf869f6b0517f70cf0030ba6ea1d6645e132337a7d5204a18d8d5636c02b", size = 14539393, upload-time = "2026-04-21T17:07:12.52Z" }, + { url = "https://files.pythonhosted.org/packages/32/d9/34a5efed8124f5a9234f55ac6a4ced4201e2c5b81e1109c49ad23190ec8c/mypy-1.20.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b6481b228d072315b053210b01ac320e1be243dc17f9e5887ef167f23f5fae4", size = 13361642, upload-time = "2026-04-21T17:06:53.742Z" }, + { url = "https://files.pythonhosted.org/packages/d1/14/eb377acf78c03c92d566a1510cda8137348215b5335085ef662ab82ecd3a/mypy-1.20.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34397cdced6b90b836e38182076049fdb41424322e0b0728c946b0939ebdf9f6", size = 13740347, upload-time = "2026-04-21T17:12:04.73Z" }, + { url = "https://files.pythonhosted.org/packages/b9/94/7e4634a32b641aa1c112422eed1bbece61ee16205f674190e8b536f884de/mypy-1.20.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a5da6976f20cae27059ea8d0c86e7cef3de720e04c4bb9ee18e3690fdb792066", size = 14734042, upload-time = "2026-04-21T17:07:43.16Z" }, + { url = "https://files.pythonhosted.org/packages/7a/f3/f7e62395cb7f434541b4491a01149a4439e28ace4c0c632bbf5431e92d1f/mypy-1.20.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:56908d7e08318d39f85b1f0c6cfd47b0cac1a130da677630dac0de3e0623e102", size = 14964958, upload-time = "2026-04-21T17:11:00.665Z" }, + { url = "https://files.pythonhosted.org/packages/3e/0d/47e3c3a0ec2a876e35aeac365df3cac7776c36bbd4ed18cc521e1b9d255b/mypy-1.20.2-cp312-cp312-win_amd64.whl", hash = "sha256:d52ad8d78522da1d308789df651ee5379088e77c76cb1994858d40a426b343b9", size = 10911340, upload-time = "2026-04-21T17:10:49.179Z" }, + { url = "https://files.pythonhosted.org/packages/d6/b2/6c852d72e0ea8b01f49da817fb52539993cde327e7d010e0103dc12d0dac/mypy-1.20.2-cp312-cp312-win_arm64.whl", hash = "sha256:785b08db19c9f214dc37d65f7c165d19a30fcecb48abfa30f31b01b5acaabb58", size = 9833947, upload-time = "2026-04-21T17:09:05.267Z" }, + { url = "https://files.pythonhosted.org/packages/5b/c4/b93812d3a192c9bcf5df405bd2f30277cd0e48106a14d1023c7f6ed6e39b/mypy-1.20.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:edfbfca868cdd6bd8d974a60f8a3682f5565d3f5c99b327640cedd24c4264026", size = 14524670, upload-time = "2026-04-21T17:10:30.737Z" }, + { url = "https://files.pythonhosted.org/packages/f3/47/42c122501bff18eaf1e8f457f5c017933452d8acdc52918a9f59f6812955/mypy-1.20.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e2877a02380adfcdbc69071a0f74d6e9dbbf593c0dc9d174e1f223ffd5281943", size = 13336218, upload-time = "2026-04-21T17:08:44.069Z" }, + { url = "https://files.pythonhosted.org/packages/92/8f/75bbc92f41725fbd585fb17b440b1119b576105df1013622983e18640a93/mypy-1.20.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7488448de6007cd5177c6cea0517ac33b4c0f5ee9b5e9f2be51ce75511a85517", size = 13724906, upload-time = "2026-04-21T17:08:01.02Z" }, + { url = "https://files.pythonhosted.org/packages/a1/32/4c49da27a606167391ff0c39aa955707a00edc500572e562f7c36c08a71f/mypy-1.20.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bb9c2fa06887e21d6a3a868762acb82aec34e2c6fd0174064f27c93ede68ad15", size = 14726046, upload-time = "2026-04-21T17:11:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/7f/fc/4e354a1bd70216359deb0c9c54847ee6b32ef78dfb09f5131ff99b494078/mypy-1.20.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d56a78b646f2e3daa865bc70cd5ec5a46c50045801ca8ff17a0c43abc97e3ee", size = 14955587, upload-time = "2026-04-21T17:12:16.033Z" }, + { url = "https://files.pythonhosted.org/packages/62/b2/c0f2056e9eb8f08c62cafd9715e4584b89132bdc832fcf85d27d07b5f3e5/mypy-1.20.2-cp313-cp313-win_amd64.whl", hash = "sha256:2a4102b03bb7481d9a91a6da8d174740c9c8c4401024684b9ca3b7cc5e49852f", size = 10922681, upload-time = "2026-04-21T17:06:35.842Z" }, + { url = "https://files.pythonhosted.org/packages/e5/14/065e333721f05de8ef683d0aa804c23026bcc287446b61cac657b902ccac/mypy-1.20.2-cp313-cp313-win_arm64.whl", hash = "sha256:a95a9248b0c6fd933a442c03c3b113c3b61320086b88e2c444676d3fd1ca3330", size = 9830560, upload-time = "2026-04-21T17:07:51.023Z" }, + { url = "https://files.pythonhosted.org/packages/ae/d1/b4ec96b0ecc620a4443570c6e95c867903428cfcde4206518eafdd5880c3/mypy-1.20.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:419413398fe250aae057fd2fe50166b61077083c9b82754c341cf4fd73038f30", size = 14524561, upload-time = "2026-04-21T17:06:27.325Z" }, + { url = "https://files.pythonhosted.org/packages/3a/63/d2c2ff4fa66bc49477d32dfa26e8a167ba803ea6a69c5efb416036909d30/mypy-1.20.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e73c07f23009962885c197ccb9b41356a30cc0e5a1d0c2ea8fd8fb1362d7f924", size = 13363883, upload-time = "2026-04-21T17:11:11.239Z" }, + { url = "https://files.pythonhosted.org/packages/2a/56/983916806bf4eddeaaa2c9230903c3669c6718552a921154e1c5182c701f/mypy-1.20.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c64e5973df366b747646fc98da921f9d6eba9716d57d1db94a83c026a08e0fb", size = 13742945, upload-time = "2026-04-21T17:08:34.181Z" }, + { url = "https://files.pythonhosted.org/packages/19/65/0cd9285ab010ee8214c83d67c6b49417c40d86ce46f1aa109457b5a9b8d7/mypy-1.20.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a65aa591af023864fd08a97da9974e919452cfe19cb146c8a5dc692626445dc", size = 14706163, upload-time = "2026-04-21T17:05:15.51Z" }, + { url = "https://files.pythonhosted.org/packages/94/97/48ff3b297cafcc94d185243a9190836fb1b01c1b0918fff64e941e973cc9/mypy-1.20.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4fef51b01e638974a6e69885687e9bd40c8d1e09a6cd291cca0619625cf1f558", size = 14938677, upload-time = "2026-04-21T17:05:39.562Z" }, + { url = "https://files.pythonhosted.org/packages/fd/a1/1b4233d255bdd0b38a1f284feeb1c143ca508c19184964e22f8d837ec851/mypy-1.20.2-cp314-cp314-win_amd64.whl", hash = "sha256:913485a03f1bcf5d279409a9d2b9ed565c151f61c09f29991e5faa14033da4c8", size = 11089322, upload-time = "2026-04-21T17:06:44.29Z" }, + { url = "https://files.pythonhosted.org/packages/78/c2/ce7ee2ba36aeb954ba50f18fa25d9c1188578654b97d02a66a15b6f09531/mypy-1.20.2-cp314-cp314-win_arm64.whl", hash = "sha256:c3bae4f855d965b5453784300c12ffc63a548304ac7f99e55d4dc7c898673aa3", size = 10017775, upload-time = "2026-04-21T17:07:20.732Z" }, + { url = "https://files.pythonhosted.org/packages/4e/a1/9d93a7d0b5859af0ead82b4888b46df6c8797e1bc5e1e262a08518c6d48e/mypy-1.20.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:2de3dcea53babc1c3237a19002bc3d228ce1833278f093b8d619e06e7cc79609", size = 15549002, upload-time = "2026-04-21T17:08:23.107Z" }, + { url = "https://files.pythonhosted.org/packages/00/d2/09a6a10ee1bf0008f6c144d9676f2ca6a12512151b4e0ad0ff6c4fac5337/mypy-1.20.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:52b176444e2e5054dfcbcb8c75b0b719865c96247b37407184bbfca5c353f2c2", size = 14401942, upload-time = "2026-04-21T17:07:31.837Z" }, + { url = "https://files.pythonhosted.org/packages/57/da/9594b75c3c019e805250bed3583bdf4443ff9e6ef08f97e39ae308cb06f2/mypy-1.20.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:688c3312e5dadb573a2c69c82af3a298d43ecf9e6d264e0f95df960b5f6ac19c", size = 15041649, upload-time = "2026-04-21T17:09:34.653Z" }, + { url = "https://files.pythonhosted.org/packages/97/77/f75a65c278e6e8eba2071f7f5a90481891053ecc39878cc444634d892abe/mypy-1.20.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29752dbbf8cc53f89f6ac096d363314333045c257c9c75cbd189ca2de0455744", size = 15864588, upload-time = "2026-04-21T17:11:44.936Z" }, + { url = "https://files.pythonhosted.org/packages/d7/46/1a4e1c66e96c1a3246ddf5403d122ac9b0a8d2b7e65730b9d6533ba7a6d3/mypy-1.20.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:803203d2b6ea644982c644895c2f78b28d0e208bba7b27d9b921e0ec5eb207c6", size = 16093956, upload-time = "2026-04-21T17:10:17.683Z" }, + { url = "https://files.pythonhosted.org/packages/5a/2c/78a8851264dec38cd736ca5b8bc9380674df0dd0be7792f538916157716c/mypy-1.20.2-cp314-cp314t-win_amd64.whl", hash = "sha256:9bcb8aa397ff0093c824182fd76a935a9ba7ad097fcbef80ae89bf6c1731d8ec", size = 12568661, upload-time = "2026-04-21T17:11:54.473Z" }, + { url = "https://files.pythonhosted.org/packages/83/01/cd7318aa03493322ce275a0e14f4f52b8896335e4e79d4fb8153a7ad2b77/mypy-1.20.2-cp314-cp314t-win_arm64.whl", hash = "sha256:e061b58443f1736f8a37c48978d7ab581636d6ab03e3d4f99e3fa90463bb9382", size = 10389240, upload-time = "2026-04-21T17:09:42.719Z" }, + { url = "https://files.pythonhosted.org/packages/28/9a/f23c163e25b11074188251b0b5a0342625fc1cdb6af604757174fa9acc9b/mypy-1.20.2-py3-none-any.whl", hash = "sha256:a94c5a76ab46c5e6257c7972b6c8cff0574201ca7dc05647e33e795d78680563", size = 2637314, upload-time = "2026-04-21T17:05:54.5Z" }, ] [[package]] @@ -3329,21 +3745,70 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, ] +[[package]] +name = "nbclient" +version = "0.10.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "nbformat" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/91/1c1d5a4b9a9ebba2b4e32b8c852c2975c872aec1fe42ab5e516b2cecd193/nbclient-0.10.4.tar.gz", hash = "sha256:1e54091b16e6da39e297b0ece3e10f6f29f4ac4e8ee515d29f8a7099bd6553c9", size = 62554, upload-time = "2025-12-23T07:45:46.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/a0/5b0c2f11142ed1dddec842457d3f65eaf71a0080894eb6f018755b319c3a/nbclient-0.10.4-py3-none-any.whl", hash = "sha256:9162df5a7373d70d606527300a95a975a47c137776cd942e52d9c7e29ff83440", size = 25465, upload-time = "2025-12-23T07:45:44.51Z" }, +] + +[[package]] +name = "nbconvert" +version = "7.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "bleach", extra = ["css"] }, + { name = "defusedxml" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyterlab-pygments" }, + { name = "markupsafe" }, + { name = "mistune" }, + { name = "nbclient" }, + { name = "nbformat" }, + { name = "packaging" }, + { name = "pandocfilters" }, + { name = "pygments" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/b1/708e53fe2e429c103c6e6e159106bcf0357ac41aa4c28772bd8402339051/nbconvert-7.17.1.tar.gz", hash = "sha256:34d0d0a7e73ce3cbab6c5aae8f4f468797280b01fd8bd2ca746da8569eddd7d2", size = 865311, upload-time = "2026-04-08T00:44:14.914Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/f8/bb0a9d5f46819c821dc1f004aa2cc29b1d91453297dbf5ff20470f00f193/nbconvert-7.17.1-py3-none-any.whl", hash = "sha256:aa85c087b435e7bf1ffd03319f658e285f2b89eccab33bc1ba7025495ab3e7c8", size = 261927, upload-time = "2026-04-08T00:44:12.845Z" }, +] + [[package]] name = "nbformat" version = "5.10.4" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "fastjsonschema", marker = "sys_platform == 'linux'" }, - { name = "jsonschema", marker = "sys_platform == 'linux'" }, - { name = "jupyter-core", marker = "sys_platform == 'linux'" }, - { name = "traitlets", marker = "sys_platform == 'linux'" }, + { name = "fastjsonschema" }, + { name = "jsonschema" }, + { name = "jupyter-core" }, + { name = "traitlets" }, ] sdist = { url = "https://files.pythonhosted.org/packages/6d/fd/91545e604bc3dad7dca9ed03284086039b294c6b3d75c0d2fa45f9e9caf3/nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a", size = 142749, upload-time = "2024-04-04T11:20:37.371Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/a9/82/0340caa499416c78e5d8f5f05947ae4bc3cba53c9f038ab6e9ed964e22f1/nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b", size = 78454, upload-time = "2024-04-04T11:20:34.895Z" }, ] +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + [[package]] name = "networkx" version = "3.6.1" @@ -3388,6 +3853,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, ] +[[package]] +name = "notebook" +version = "7.5.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, + { name = "jupyterlab" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "tornado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/6d/41052c48d6f6349ca0a7c4d1f6a78464de135e6d18f5829ba2510e62184c/notebook-7.5.5.tar.gz", hash = "sha256:dc0bfab0f2372c8278c457423d3256c34154ac2cc76bf20e9925260c461013c3", size = 14169167, upload-time = "2026-03-11T16:32:51.922Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/aa/cbd1deb9f07446241e88f8d5fecccd95b249bca0b4e5482214a4d1714c49/notebook-7.5.5-py3-none-any.whl", hash = "sha256:a7c14dbeefa6592e87f72290ca982e0c10f5bbf3786be2a600fda9da2764a2b8", size = 14578929, upload-time = "2026-03-11T16:32:48.021Z" }, +] + +[[package]] +name = "notebook-shim" +version = "0.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/54/d2/92fa3243712b9a3e8bafaf60aac366da1cada3639ca767ff4b5b3654ec28/notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb", size = 13167, upload-time = "2024-02-14T23:35:18.353Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/33/bd5b9137445ea4b680023eb0469b2bb969d61303dedb2aac6560ff3d14a1/notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef", size = 13307, upload-time = "2024-02-14T23:35:16.286Z" }, +] + [[package]] name = "num2words" version = "0.5.14" @@ -3402,22 +3895,22 @@ wheels = [ [[package]] name = "numba" -version = "0.65.0" +version = "0.65.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llvmlite", marker = "sys_platform == 'linux'" }, { name = "numpy", marker = "sys_platform == 'linux'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/49/61/7299643b9c18d669e04be7c5bcb64d985070d07553274817b45b049e7bfe/numba-0.65.0.tar.gz", hash = "sha256:edad0d9f6682e93624c00125a471ae4df186175d71fd604c983c377cdc03e68b", size = 2764131, upload-time = "2026-04-01T03:52:01.946Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/c5/db2ac3685833d626c0dcae6bd2330cd68433e1fd248d15f70998160d3ad7/numba-0.65.1.tar.gz", hash = "sha256:19357146c32fe9ed25059ab915e8465fb13951cf6b0aace3826b76886373ab23", size = 2765600, upload-time = "2026-04-24T02:02:56.551Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/73/36/88406bd58600cc696417b8e5dd6a056478da808f3eaf48d18e2421e0c2d9/numba-0.65.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a52d92ffd297c10364bce60cd1fcb88f99284ab5df085f2c6bcd1cb33b529a6f", size = 3801411, upload-time = "2026-04-01T03:51:34.321Z" }, - { url = "https://files.pythonhosted.org/packages/0c/61/ce753a1d7646dd477e16d15e89473703faebb8995d2f71d7ad69a540b565/numba-0.65.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da8e371e328c06d0010c3d8b44b21858652831b85bcfba78cb22c042e22dbd8e", size = 3501622, upload-time = "2026-04-01T03:51:36.348Z" }, - { url = "https://files.pythonhosted.org/packages/1b/8f/3d116e4b8e92f6abace431afa4b2b944f4d65bdee83af886f5c4b263df95/numba-0.65.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b8a9008411615c69d083d1dcf477f75a5aa727b30beb16e139799e2be945cdfd", size = 3809537, upload-time = "2026-04-01T03:51:41.42Z" }, - { url = "https://files.pythonhosted.org/packages/b5/2c/6a3ca4128e253cb67affe06deb47688f51ce968f5111e2a06d010e6f1fa6/numba-0.65.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af96c0cba53664efcb361528b8c75e011a6556c859c7e08424c2715201c6cf7a", size = 3508615, upload-time = "2026-04-01T03:51:43.444Z" }, - { url = "https://files.pythonhosted.org/packages/24/8d/e12d6ff4b9119db3cbf7b2db1ce257576441bd3c76388c786dea74f20b02/numba-0.65.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:05c0a9fdf75d85f57dee47b719e8d6415707b80aae45d75f63f9dc1b935c29f7", size = 3778456, upload-time = "2026-04-01T03:51:48.552Z" }, - { url = "https://files.pythonhosted.org/packages/17/89/abcd83e76f6a773276fe76244140671bcc5bf820f6e2ae1a15362ae4c8c9/numba-0.65.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:583680e0e8faf124d362df23b4b593f3221a8996341a63d1b664c122401bec2f", size = 3478464, upload-time = "2026-04-01T03:51:50.527Z" }, - { url = "https://files.pythonhosted.org/packages/ff/e5/8267b0adb0c01b52b553df5062fbbb42c30ed5362d08b85cc913a36f838f/numba-0.65.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c7fa502960f7a2f3f5cb025bc7bff888a3551277b92431bfdc5ba2f11a375749", size = 3816373, upload-time = "2026-04-01T03:51:56.18Z" }, - { url = "https://files.pythonhosted.org/packages/b0/f5/b8397ca360971669a93706b9274592b6864e4367a37d498fbbcb62aa2d48/numba-0.65.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5046c63f783ca3eb6195f826a50797465e7c4ce811daa17c9bea47e310c9b964", size = 3532782, upload-time = "2026-04-01T03:51:58.387Z" }, + { url = "https://files.pythonhosted.org/packages/69/47/a415af0283e4db0398104c6d1c11c9861a98dc67a7aa442a7769ed5d6196/numba-0.65.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:52bc6f3ceb8fcaff9b2ae26b4c6b1e9fee39db8d355534c0fe4f39a901246b84", size = 3802467, upload-time = "2026-04-24T02:02:27.712Z" }, + { url = "https://files.pythonhosted.org/packages/46/36/246f73ec99cfeab2f2cb2ce7d4218766cc36a2da418901223f4f4da9c813/numba-0.65.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90ca10b3463bae0bd70589726fe3c77d01d6b5fc86bee54bcdf9fb6b47c28977", size = 3502628, upload-time = "2026-04-24T02:02:29.763Z" }, + { url = "https://files.pythonhosted.org/packages/a0/22/b8d873f6466b20aa563fc9b33acd48dec89a07803ddaa2f1c8ca1cd33126/numba-0.65.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c09f49117ef255e1f1c6dad0c7a1ed39868243862a73be5706793241a3755f1b", size = 3810619, upload-time = "2026-04-24T02:02:36.041Z" }, + { url = "https://files.pythonhosted.org/packages/62/08/e16a8b5d9a018962ebb5c66be662317cde32b9f5dab08441f90bed5522fb/numba-0.65.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:594a8680b3fadac99e97e489b1fd89007177e5336713745c3b769528c635a464", size = 3509783, upload-time = "2026-04-24T02:02:38.245Z" }, + { url = "https://files.pythonhosted.org/packages/87/96/f3eb235fafa82a34e2ab5dd7dc9ffff998ebf5f0bbc23fa56a96aeb44da6/numba-0.65.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:71e73029bf53a62cc6afcf96be4bd942290d8b4c55f0a454fb536158115790f7", size = 3779602, upload-time = "2026-04-24T02:02:43.726Z" }, + { url = "https://files.pythonhosted.org/packages/09/90/b0f09b48752d23640b8284f22aa597737e8adaddc7fbfacc4708b7f73a4c/numba-0.65.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a07635e0be926b9bdbffb09137c230fb13f6ec0e564914ba937cee12ce3eb35", size = 3479532, upload-time = "2026-04-24T02:02:45.427Z" }, + { url = "https://files.pythonhosted.org/packages/03/36/98ddbcf3e4f04a6dd07e1c67249955920579ba4af6bb6868e3088f4ed282/numba-0.65.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:78abc28feff2c2ff8307fff3975b6438352759c9acb797ecd6b1fb6e7e39e31d", size = 3817198, upload-time = "2026-04-24T02:02:51.266Z" }, + { url = "https://files.pythonhosted.org/packages/a3/83/0dad21057ece5a835599f5d24099b091703995e23dbbf894f259e91c010b/numba-0.65.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee7676cb389555805f9b9a1840cbcd1ea6c8bd5376ab6918e3a29c5ea1dbda20", size = 3533862, upload-time = "2026-04-24T02:02:52.987Z" }, ] [[package]] @@ -3637,35 +4130,34 @@ wheels = [ [[package]] name = "onnxruntime" -version = "1.24.4" +version = "1.25.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "flatbuffers" }, { name = "numpy" }, { name = "packaging" }, { name = "protobuf" }, - { name = "sympy" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/38/31db1b232b4ba960065a90c1506ad7a56995cd8482033184e97fadca17cc/onnxruntime-1.24.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:cad1c2b3f455c55678ab2a8caa51fb420c25e6e3cf10f4c23653cdabedc8de78", size = 17341875, upload-time = "2026-03-17T22:05:51.669Z" }, - { url = "https://files.pythonhosted.org/packages/aa/60/c4d1c8043eb42f8a9aa9e931c8c293d289c48ff463267130eca97d13357f/onnxruntime-1.24.4-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1a5c5a544b22f90859c88617ecb30e161ee3349fcc73878854f43d77f00558b5", size = 15172485, upload-time = "2026-03-17T22:03:32.182Z" }, - { url = "https://files.pythonhosted.org/packages/6d/ab/5b68110e0460d73fad814d5bd11c7b1ddcce5c37b10177eb264d6a36e331/onnxruntime-1.24.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d640eb9f3782689b55cfa715094474cd5662f2f137be6a6f847a594b6e9705c", size = 17244912, upload-time = "2026-03-17T22:04:37.251Z" }, - { url = "https://files.pythonhosted.org/packages/8b/f4/6b89e297b93704345f0f3f8c62229bee323ef25682a3f9b4f89a39324950/onnxruntime-1.24.4-cp312-cp312-win_amd64.whl", hash = "sha256:535b29475ca42b593c45fbb2152fbf1cdf3f287315bf650e6a724a0a1d065cdb", size = 12596856, upload-time = "2026-03-17T22:05:41.224Z" }, - { url = "https://files.pythonhosted.org/packages/43/06/8b8ec6e9e6a474fcd5d772453f627ad4549dfe3ab8c0bf70af5afcde551b/onnxruntime-1.24.4-cp312-cp312-win_arm64.whl", hash = "sha256:e6214096e14b7b52e3bee1903dc12dc7ca09cb65e26664668a4620cc5e6f9a90", size = 12270275, upload-time = "2026-03-17T22:05:31.132Z" }, - { url = "https://files.pythonhosted.org/packages/e9/f0/8a21ec0a97e40abb7d8da1e8b20fb9e1af509cc6d191f6faa75f73622fb2/onnxruntime-1.24.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e99a48078baaefa2b50fe5836c319499f71f13f76ed32d0211f39109147a49e0", size = 17341922, upload-time = "2026-03-17T22:03:56.364Z" }, - { url = "https://files.pythonhosted.org/packages/8b/25/d7908de8e08cee9abfa15b8aa82349b79733ae5865162a3609c11598805d/onnxruntime-1.24.4-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4aaed1e5e1aaacf2343c838a30a7c3ade78f13eeb16817411f929d04040a13", size = 15172290, upload-time = "2026-03-17T22:03:37.124Z" }, - { url = "https://files.pythonhosted.org/packages/7f/72/105ec27a78c5aa0154a7c0cd8c41c19a97799c3b12fc30392928997e3be3/onnxruntime-1.24.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e30c972bc02e072911aabb6891453ec73795386c0af2b761b65444b8a4c4745f", size = 17244738, upload-time = "2026-03-17T22:04:40.625Z" }, - { url = "https://files.pythonhosted.org/packages/05/fb/a592736d968c2f58e12de4d52088dda8e0e724b26ad5c0487263adb45875/onnxruntime-1.24.4-cp313-cp313-win_amd64.whl", hash = "sha256:3b6ba8b0181a3aa88edab00eb01424ffc06f42e71095a91186c2249415fcff93", size = 12597435, upload-time = "2026-03-17T22:05:43.826Z" }, - { url = "https://files.pythonhosted.org/packages/ad/04/ae2479e9841b64bd2eb44f8a64756c62593f896514369a11243b1b86ca5c/onnxruntime-1.24.4-cp313-cp313-win_arm64.whl", hash = "sha256:71d6a5c1821d6e8586a024000ece458db8f2fc0ecd050435d45794827ce81e19", size = 12269852, upload-time = "2026-03-17T22:05:33.353Z" }, - { url = "https://files.pythonhosted.org/packages/b4/af/a479a536c4398ffaf49fbbe755f45d5b8726bdb4335ab31b537f3d7149b8/onnxruntime-1.24.4-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1700f559c8086d06b2a4d5de51e62cb4ff5e2631822f71a36db8c72383db71ee", size = 15176861, upload-time = "2026-03-17T22:03:40.143Z" }, - { url = "https://files.pythonhosted.org/packages/be/13/19f5da70c346a76037da2c2851ecbf1266e61d7f0dcdb887c667210d4608/onnxruntime-1.24.4-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c74e268dc808e61e63784d43f9ddcdaf50a776c2819e8bd1d1b11ef64bf7e36", size = 17247454, upload-time = "2026-03-17T22:04:46.643Z" }, - { url = "https://files.pythonhosted.org/packages/89/db/b30dbbd6037847b205ab75d962bc349bf1e46d02a65b30d7047a6893ffd6/onnxruntime-1.24.4-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:fbff2a248940e3398ae78374c5a839e49a2f39079b488bc64439fa0ec327a3e4", size = 17343300, upload-time = "2026-03-17T22:03:59.223Z" }, - { url = "https://files.pythonhosted.org/packages/61/88/1746c0e7959961475b84c776d35601a21d445f463c93b1433a409ec3e188/onnxruntime-1.24.4-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2b7969e72d8cb53ffc88ab6d49dd5e75c1c663bda7be7eb0ece192f127343d1", size = 15175936, upload-time = "2026-03-17T22:03:43.671Z" }, - { url = "https://files.pythonhosted.org/packages/5f/ba/4699cde04a52cece66cbebc85bd8335a0d3b9ad485abc9a2e15946a1349d/onnxruntime-1.24.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14ed1f197fab812b695a5eaddb536c635e58a2fbbe50a517c78f082cc6ce9177", size = 17246432, upload-time = "2026-03-17T22:04:49.58Z" }, - { url = "https://files.pythonhosted.org/packages/ef/60/4590910841bb28bd3b4b388a9efbedf4e2d2cca99ddf0c863642b4e87814/onnxruntime-1.24.4-cp314-cp314-win_amd64.whl", hash = "sha256:311e309f573bf3c12aa5723e23823077f83d5e412a18499d4485c7eb41040858", size = 12903276, upload-time = "2026-03-17T22:05:46.349Z" }, - { url = "https://files.pythonhosted.org/packages/7f/6f/60e2c0acea1e1ac09b3e794b5a19c166eebf91c0b860b3e6db8e74983fda/onnxruntime-1.24.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f0b910e86b759a4732663ec61fd57ac42ee1b0066f68299de164220b660546d", size = 12594365, upload-time = "2026-03-17T22:05:35.795Z" }, - { url = "https://files.pythonhosted.org/packages/cf/68/0c05d10f8f6c40fe0912ebec0d5a33884aaa2af2053507e864dab0883208/onnxruntime-1.24.4-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa12ddc54c9c4594073abcaa265cd9681e95fb89dae982a6f508a794ca42e661", size = 15176889, upload-time = "2026-03-17T22:03:48.021Z" }, - { url = "https://files.pythonhosted.org/packages/6c/1d/1666dc64e78d8587d168fec4e3b7922b92eb286a2ddeebcf6acb55c7dc82/onnxruntime-1.24.4-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1cc6a518255f012134bc791975a6294806be9a3b20c4a54cca25194c90cf731", size = 17247021, upload-time = "2026-03-17T22:04:52.377Z" }, + { url = "https://files.pythonhosted.org/packages/7a/69/f98c6bda4c34ac382b70c36033a989ceffd1caf5afba47bd2ef26535850f/onnxruntime-1.25.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8ecd3362de3fb496fb3e2d055a95d5acab611cf759a27609c6d99704c9d8f184", size = 17742518, upload-time = "2026-04-22T17:20:34.444Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c6/19c5bfbc60396791e975652f982bcff9ff4b27947c8e2bf0064ac5d5727b/onnxruntime-1.25.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c99238d20bfa80ac68c7b03c2c936d389189ae40997f78a30d151570d7e18bf", size = 15841110, upload-time = "2026-04-22T17:19:31.284Z" }, + { url = "https://files.pythonhosted.org/packages/a9/1b/d681878f227513917d8620e4ea504af5eb3313fc01f8aea7b19a976c65db/onnxruntime-1.25.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be93baa694ef8e5831fcb7b542da21f502b122918b5b9612d9f02972e043ee01", size = 17996146, upload-time = "2026-04-22T17:19:53.792Z" }, + { url = "https://files.pythonhosted.org/packages/55/fe/ec98e416bd75063dea1e493661c7c939e18660ee41d6a63d7221e5657f48/onnxruntime-1.25.0-cp312-cp312-win_amd64.whl", hash = "sha256:9596040c1f7d247bbfab5d4db1e7651c790235e48e460c7d445ec81687d5a182", size = 12872370, upload-time = "2026-04-22T17:20:22.856Z" }, + { url = "https://files.pythonhosted.org/packages/f7/86/9a1ac7c8a8eba7967935d4c109fc956d8f9ba61cba61d9368315bb27d0bc/onnxruntime-1.25.0-cp312-cp312-win_arm64.whl", hash = "sha256:463aed7f5e4a3ca5a476db7e9bba9164fa26921ef34c37e59b28c4c61e55f266", size = 12600072, upload-time = "2026-04-22T17:20:11.523Z" }, + { url = "https://files.pythonhosted.org/packages/c1/5f/3b916a303f43e9c7eed3a705ea69f6867233c161dede30f4df21538c6693/onnxruntime-1.25.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:1b3d76cf770afba76859f270679c9ad0b017b9357eb5892e91926943e05ca82c", size = 17743247, upload-time = "2026-04-22T17:19:45.206Z" }, + { url = "https://files.pythonhosted.org/packages/d5/b3/9e45ba86ed39ab688578f21dd39ed4b575726205596891870a1a8b4d5ca9/onnxruntime-1.25.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cddb565dfd630550a8817b3d5493ffcfa0fec273b545b2816f2fce53384e1151", size = 15841442, upload-time = "2026-04-22T17:19:34.209Z" }, + { url = "https://files.pythonhosted.org/packages/d2/c4/810809e3b411fd66958bdd7285a63acf948988ab4189e1fd860a2f999db3/onnxruntime-1.25.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ade74e651e28b39e6bfd6f576cb9b8a4edfa0916234145154dc891bd55331c22", size = 17993660, upload-time = "2026-04-22T17:19:56.719Z" }, + { url = "https://files.pythonhosted.org/packages/42/3d/b736cda9c71b3df022ca6bbcb991d14b7723c068dbebe826af9102e79777/onnxruntime-1.25.0-cp313-cp313-win_amd64.whl", hash = "sha256:9196c32c039c37ce8362cbee0aa3a704679be5f2b6fb3e849fea927c98fe1e5b", size = 12871906, upload-time = "2026-04-22T17:20:25.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/1f/d7bb87cdbb839b356133e9f9e3851fc0c3130dd1c360640c9ce948e3e083/onnxruntime-1.25.0-cp313-cp313-win_arm64.whl", hash = "sha256:b3e52dc2208dec6f61ef118dff04610927e9a18d99e019a828799b23cc9cdea4", size = 12599753, upload-time = "2026-04-22T17:20:14.661Z" }, + { url = "https://files.pythonhosted.org/packages/04/3c/edb0d825a65beed40a3de8a51521d49d433aa767f8d00e633cd2602024c0/onnxruntime-1.25.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de8548d8fe8fd58ca841178051d535d6f378efae14a4b4eb336617d80540fb41", size = 15852628, upload-time = "2026-04-22T17:19:36.886Z" }, + { url = "https://files.pythonhosted.org/packages/55/51/7a660b4d087f27b273ff725f744880e7664f64a9331bfb1eae91ed2a9f0a/onnxruntime-1.25.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4edec672d09e34b9e83ad09c44454ce97627388f32858b1d59fe01d091ff54b5", size = 17997241, upload-time = "2026-04-22T17:19:59.653Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/5254acb849f414c8fb2643fe21f2c2ef8089fab18569f24775ccb8ee182d/onnxruntime-1.25.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:38f27febd2ff034a600a8bdbea34b1f7c961a2dab6bcb5351e70548fea456161", size = 17744932, upload-time = "2026-04-22T17:19:48.097Z" }, + { url = "https://files.pythonhosted.org/packages/49/98/c2593aaa392e278a41bec35a00298aa5f22bb382483ad02ca451a556b2a2/onnxruntime-1.25.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e0ae389ed1647f11c1b501ba1cef1e2c7453002f626136ace214c9c46153ee4", size = 15842603, upload-time = "2026-04-22T17:19:39.879Z" }, + { url = "https://files.pythonhosted.org/packages/08/b6/07e924b8a47adc9ce2f92a7ef71a6fb709981b1ebd08179f61cbce6ff9b3/onnxruntime-1.25.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ca32d38173c0f58699ca9dc9e867de74d2c2ab7d1c2d969f862ee8633370b77", size = 17994808, upload-time = "2026-04-22T17:20:02.462Z" }, + { url = "https://files.pythonhosted.org/packages/56/31/e0147d87acfd06992a9bf45ffc070fd3ab49ff9a1f12de9fb403f2fc0b97/onnxruntime-1.25.0-cp314-cp314-win_amd64.whl", hash = "sha256:a2829e29621db7a4bcd457e6d0f3e4f541fb274c7127e7d2e1a5b46c70572672", size = 13183697, upload-time = "2026-04-22T17:20:28.658Z" }, + { url = "https://files.pythonhosted.org/packages/18/29/b1d5b91d04ae80768ed8e38639ab2fcc92750a67fddc30ad6b700f244113/onnxruntime-1.25.0-cp314-cp314-win_arm64.whl", hash = "sha256:2bed9b35568b3ecf8ab34dc832d37216e47947e86508a0fd6b75e4c19d7ba907", size = 12933438, upload-time = "2026-04-22T17:20:17.223Z" }, + { url = "https://files.pythonhosted.org/packages/56/f4/cfd47f88da545ea57c1f2a4b5886d455ec64f53b723b1a448fc44ed757e9/onnxruntime-1.25.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00548a16e8f0d52cb1c67ef50177e5e2be848ccffc6db60010ee37faaccbbb6f", size = 15853591, upload-time = "2026-04-22T17:19:42.325Z" }, + { url = "https://files.pythonhosted.org/packages/89/de/8b406be6ea4f2c254f9bc850cbe8038064c7768a94cdf7785420b3652ea7/onnxruntime-1.25.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a071a0740388e0ffad081c583761f37837b113bde3d03dc70790ed6cf4f4de0b", size = 17996166, upload-time = "2026-04-22T17:20:05.873Z" }, ] [[package]] @@ -3769,6 +4261,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/70/44/5191d2e4026f86a2a109053e194d3ba7a31a2d10a9c2348368c63ed4e85a/pandas-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3869faf4bd07b3b66a9f462417d0ca3a9df29a9f6abd5d0d0dbab15dac7abe87", size = 13202175, upload-time = "2025-09-29T23:31:59.173Z" }, ] +[[package]] +name = "pandocfilters" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/70/6f/3dd4940bbe001c06a65f88e36bad298bc7a0de5036115639926b0c5c0458/pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e", size = 8454, upload-time = "2024-01-18T20:08:13.726Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/af/4fbc8cab944db5d21b7e2a5b8e9211a03a79852b1157e2c102fcc61ac440/pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc", size = 8663, upload-time = "2024-01-18T20:08:11.28Z" }, +] + [[package]] name = "parso" version = "0.8.6" @@ -3780,16 +4281,16 @@ wheels = [ [[package]] name = "pathspec" -version = "1.0.4" +version = "1.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fa/36/e27608899f9b8d4dff0617b2d9ab17ca5608956ca44461ac14ac48b44015/pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", size = 131200, upload-time = "2026-01-27T03:59:46.938Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/82/42f767fc1c1143d6fd36efb827202a2d997a375e160a71eb2888a925aac1/pathspec-1.1.1.tar.gz", hash = "sha256:17db5ecd524104a120e173814c90367a96a98d07c45b2e10c2f3919fff91bf5a", size = 135180, upload-time = "2026-04-27T01:46:08.907Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" }, + { url = "https://files.pythonhosted.org/packages/f1/d9/7fb5aa316bc299258e68c73ba3bddbc499654a07f151cba08f6153988714/pathspec-1.1.1-py3-none-any.whl", hash = "sha256:a00ce642f577bf7f473932318056212bc4f8bfdf53128c78bbd5af0b9b20b189", size = 57328, upload-time = "2026-04-27T01:46:07.06Z" }, ] [[package]] name = "peft" -version = "0.18.1" +version = "0.19.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "accelerate" }, @@ -3803,9 +4304,9 @@ dependencies = [ { name = "tqdm" }, { name = "transformers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d8/48/147b3ea999560b40a34fd78724c7777aa9d18409c2250bdcaf9c4f2db7fc/peft-0.18.1.tar.gz", hash = "sha256:2dd0d6bfce936d1850e48aaddbd250941c5c02fc8ef3237cd8fd5aac35e0bae2", size = 635030, upload-time = "2026-01-09T13:08:01.136Z" } +sdist = { url = "https://files.pythonhosted.org/packages/86/cf/037f1e3d5186496c05513a6754639e2dab3038a05f384284d49a9bd06a2d/peft-0.19.1.tar.gz", hash = "sha256:0d97542fe96dcdaa20d3b81c06f26f988618f416a73544ab23c3618ccb674a40", size = 763738, upload-time = "2026-04-16T15:46:45.105Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/14/b4e3f574acf349ae6f61f9c000a77f97a3b315b4bb6ad03791e79ae4a568/peft-0.18.1-py3-none-any.whl", hash = "sha256:0bf06847a3551e3019fc58c440cffc9a6b73e6e2962c95b52e224f77bbdb50f1", size = 556960, upload-time = "2026-01-09T13:07:55.865Z" }, + { url = "https://files.pythonhosted.org/packages/e8/b6/f54d676ed93cc2dd2234c3b172ea9c8c3d7d29361e66b1b23dec57a67465/peft-0.19.1-py3-none-any.whl", hash = "sha256:2113f72a81621b5913ef28f9022204c742df111890c5f49d812716a4a301e356", size = 680692, upload-time = "2026-04-16T15:46:42.886Z" }, ] [[package]] @@ -3955,7 +4456,7 @@ wheels = [ [[package]] name = "pre-commit" -version = "4.5.1" +version = "4.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cfgv" }, @@ -3964,9 +4465,18 @@ dependencies = [ { name = "pyyaml" }, { name = "virtualenv" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/40/f1/6d86a29246dfd2e9b6237f0b5823717f60cad94d47ddc26afa916d21f525/pre_commit-4.5.1.tar.gz", hash = "sha256:eb545fcff725875197837263e977ea257a402056661f09dae08e4b149b030a61", size = 198232, upload-time = "2025-12-16T21:14:33.552Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/22/2de9408ac81acbb8a7d05d4cc064a152ccf33b3d480ebe0cd292153db239/pre_commit-4.6.0.tar.gz", hash = "sha256:718d2208cef53fdc38206e40524a6d4d9576d103eb16f0fec11c875e7716e9d9", size = 198525, upload-time = "2026-04-21T20:31:41.613Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/19/fd3ef348460c80af7bb4669ea7926651d1f95c23ff2df18b9d24bab4f3fa/pre_commit-4.5.1-py2.py3-none-any.whl", hash = "sha256:3b3afd891e97337708c1674210f8eba659b52a38ea5f822ff142d10786221f77", size = 226437, upload-time = "2025-12-16T21:14:32.409Z" }, + { url = "https://files.pythonhosted.org/packages/80/6e/4b28b62ecb6aae56769c34a8ff1d661473ec1e9519e2d5f8b2c150086b26/pre_commit-4.6.0-py2.py3-none-any.whl", hash = "sha256:e2cf246f7299edcabcf15f9b0571fdce06058527f0a06535068a86d38089f29b", size = 226472, upload-time = "2026-04-21T20:31:40.092Z" }, +] + +[[package]] +name = "prometheus-client" +version = "0.25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1b/fb/d9aa83ffe43ce1f19e557c0971d04b90561b0cfd50762aafb01968285553/prometheus_client-0.25.0.tar.gz", hash = "sha256:5e373b75c31afb3c86f1a52fa1ad470c9aace18082d39ec0d2f918d11cc9ba28", size = 86035, upload-time = "2026-04-09T19:53:42.359Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/9b/d4b1e644385499c8346fa9b622a3f030dce14cd6ef8a1871c221a17a67e7/prometheus_client-0.25.0-py3-none-any.whl", hash = "sha256:d5aec89e349a6ec230805d0df882f3807f74fd6c1a2fa86864e3c2279059fed1", size = 64154, upload-time = "2026-04-09T19:53:41.324Z" }, ] [[package]] @@ -4127,45 +4637,45 @@ wheels = [ [[package]] name = "pyarrow" -version = "23.0.1" +version = "24.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/88/22/134986a4cc224d593c1afde5494d18ff629393d74cc2eddb176669f234a4/pyarrow-23.0.1.tar.gz", hash = "sha256:b8c5873e33440b2bc2f4a79d2b47017a89c5a24116c055625e6f2ee50523f019", size = 1167336, upload-time = "2026-02-16T10:14:12.39Z" } +sdist = { url = "https://files.pythonhosted.org/packages/91/13/13e1069b351bdc3881266e11147ffccf687505dbb0ea74036237f5d454a5/pyarrow-24.0.0.tar.gz", hash = "sha256:85fe721a14dd823aca09127acbb06c3ca723efbd436c004f16bca601b04dcc83", size = 1180261, upload-time = "2026-04-21T10:51:25.837Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/4b/4166bb5abbfe6f750fc60ad337c43ecf61340fa52ab386da6e8dbf9e63c4/pyarrow-23.0.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f4b0dbfa124c0bb161f8b5ebb40f1a680b70279aa0c9901d44a2b5a20806039f", size = 34214575, upload-time = "2026-02-16T10:09:56.225Z" }, - { url = "https://files.pythonhosted.org/packages/e1/da/3f941e3734ac8088ea588b53e860baeddac8323ea40ce22e3d0baa865cc9/pyarrow-23.0.1-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:7707d2b6673f7de054e2e83d59f9e805939038eebe1763fe811ee8fa5c0cd1a7", size = 35832540, upload-time = "2026-02-16T10:10:03.428Z" }, - { url = "https://files.pythonhosted.org/packages/88/7c/3d841c366620e906d54430817531b877ba646310296df42ef697308c2705/pyarrow-23.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:86ff03fb9f1a320266e0de855dee4b17da6794c595d207f89bba40d16b5c78b9", size = 44470940, upload-time = "2026-02-16T10:10:10.704Z" }, - { url = "https://files.pythonhosted.org/packages/2c/a5/da83046273d990f256cb79796a190bbf7ec999269705ddc609403f8c6b06/pyarrow-23.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:813d99f31275919c383aab17f0f455a04f5a429c261cc411b1e9a8f5e4aaaa05", size = 47586063, upload-time = "2026-02-16T10:10:17.95Z" }, - { url = "https://files.pythonhosted.org/packages/5b/3c/b7d2ebcff47a514f47f9da1e74b7949138c58cfeb108cdd4ee62f43f0cf3/pyarrow-23.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bf5842f960cddd2ef757d486041d57c96483efc295a8c4a0e20e704cbbf39c67", size = 48173045, upload-time = "2026-02-16T10:10:25.363Z" }, - { url = "https://files.pythonhosted.org/packages/43/b2/b40961262213beaba6acfc88698eb773dfce32ecdf34d19291db94c2bd73/pyarrow-23.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564baf97c858ecc03ec01a41062e8f4698abc3e6e2acd79c01c2e97880a19730", size = 50621741, upload-time = "2026-02-16T10:10:33.477Z" }, - { url = "https://files.pythonhosted.org/packages/f6/70/1fdda42d65b28b078e93d75d371b2185a61da89dda4def8ba6ba41ebdeb4/pyarrow-23.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:07deae7783782ac7250989a7b2ecde9b3c343a643f82e8a4df03d93b633006f0", size = 27620678, upload-time = "2026-02-16T10:10:39.31Z" }, - { url = "https://files.pythonhosted.org/packages/47/10/2cbe4c6f0fb83d2de37249567373d64327a5e4d8db72f486db42875b08f6/pyarrow-23.0.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:6b8fda694640b00e8af3c824f99f789e836720aa8c9379fb435d4c4953a756b8", size = 34210066, upload-time = "2026-02-16T10:10:45.487Z" }, - { url = "https://files.pythonhosted.org/packages/cb/4f/679fa7e84dadbaca7a65f7cdba8d6c83febbd93ca12fa4adf40ba3b6362b/pyarrow-23.0.1-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:8ff51b1addc469b9444b7c6f3548e19dc931b172ab234e995a60aea9f6e6025f", size = 35825526, upload-time = "2026-02-16T10:10:52.266Z" }, - { url = "https://files.pythonhosted.org/packages/f9/63/d2747d930882c9d661e9398eefc54f15696547b8983aaaf11d4a2e8b5426/pyarrow-23.0.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:71c5be5cbf1e1cb6169d2a0980850bccb558ddc9b747b6206435313c47c37677", size = 44473279, upload-time = "2026-02-16T10:11:01.557Z" }, - { url = "https://files.pythonhosted.org/packages/b3/93/10a48b5e238de6d562a411af6467e71e7aedbc9b87f8d3a35f1560ae30fb/pyarrow-23.0.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:9b6f4f17b43bc39d56fec96e53fe89d94bac3eb134137964371b45352d40d0c2", size = 47585798, upload-time = "2026-02-16T10:11:09.401Z" }, - { url = "https://files.pythonhosted.org/packages/5c/20/476943001c54ef078dbf9542280e22741219a184a0632862bca4feccd666/pyarrow-23.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fc13fc6c403d1337acab46a2c4346ca6c9dec5780c3c697cf8abfd5e19b6b37", size = 48179446, upload-time = "2026-02-16T10:11:17.781Z" }, - { url = "https://files.pythonhosted.org/packages/4b/b6/5dd0c47b335fcd8edba9bfab78ad961bd0fd55ebe53468cc393f45e0be60/pyarrow-23.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5c16ed4f53247fa3ffb12a14d236de4213a4415d127fe9cebed33d51671113e2", size = 50623972, upload-time = "2026-02-16T10:11:26.185Z" }, - { url = "https://files.pythonhosted.org/packages/d5/09/a532297c9591a727d67760e2e756b83905dd89adb365a7f6e9c72578bcc1/pyarrow-23.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:cecfb12ef629cf6be0b1887f9f86463b0dd3dc3195ae6224e74006be4736035a", size = 27540749, upload-time = "2026-02-16T10:12:23.297Z" }, - { url = "https://files.pythonhosted.org/packages/a5/8e/38749c4b1303e6ae76b3c80618f84861ae0c55dd3c2273842ea6f8258233/pyarrow-23.0.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:29f7f7419a0e30264ea261fdc0e5fe63ce5a6095003db2945d7cd78df391a7e1", size = 34471544, upload-time = "2026-02-16T10:11:32.535Z" }, - { url = "https://files.pythonhosted.org/packages/a3/73/f237b2bc8c669212f842bcfd842b04fc8d936bfc9d471630569132dc920d/pyarrow-23.0.1-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:33d648dc25b51fd8055c19e4261e813dfc4d2427f068bcecc8b53d01b81b0500", size = 35949911, upload-time = "2026-02-16T10:11:39.813Z" }, - { url = "https://files.pythonhosted.org/packages/0c/86/b912195eee0903b5611bf596833def7d146ab2d301afeb4b722c57ffc966/pyarrow-23.0.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:cd395abf8f91c673dd3589cadc8cc1ee4e8674fa61b2e923c8dd215d9c7d1f41", size = 44520337, upload-time = "2026-02-16T10:11:47.764Z" }, - { url = "https://files.pythonhosted.org/packages/69/c2/f2a717fb824f62d0be952ea724b4f6f9372a17eed6f704b5c9526f12f2f1/pyarrow-23.0.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:00be9576d970c31defb5c32eb72ef585bf600ef6d0a82d5eccaae96639cf9d07", size = 47548944, upload-time = "2026-02-16T10:11:56.607Z" }, - { url = "https://files.pythonhosted.org/packages/84/a7/90007d476b9f0dc308e3bc57b832d004f848fd6c0da601375d20d92d1519/pyarrow-23.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c2139549494445609f35a5cda4eb94e2c9e4d704ce60a095b342f82460c73a83", size = 48236269, upload-time = "2026-02-16T10:12:04.47Z" }, - { url = "https://files.pythonhosted.org/packages/b0/3f/b16fab3e77709856eb6ac328ce35f57a6d4a18462c7ca5186ef31b45e0e0/pyarrow-23.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:7044b442f184d84e2351e5084600f0d7343d6117aabcbc1ac78eb1ae11eb4125", size = 50604794, upload-time = "2026-02-16T10:12:11.797Z" }, - { url = "https://files.pythonhosted.org/packages/e9/a1/22df0620a9fac31d68397a75465c344e83c3dfe521f7612aea33e27ab6c0/pyarrow-23.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a35581e856a2fafa12f3f54fce4331862b1cfb0bef5758347a858a4aa9d6bae8", size = 27660642, upload-time = "2026-02-16T10:12:17.746Z" }, - { url = "https://files.pythonhosted.org/packages/8d/1b/6da9a89583ce7b23ac611f183ae4843cd3a6cf54f079549b0e8c14031e73/pyarrow-23.0.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:5df1161da23636a70838099d4aaa65142777185cc0cdba4037a18cee7d8db9ca", size = 34238755, upload-time = "2026-02-16T10:12:32.819Z" }, - { url = "https://files.pythonhosted.org/packages/ae/b5/d58a241fbe324dbaeb8df07be6af8752c846192d78d2272e551098f74e88/pyarrow-23.0.1-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:fa8e51cb04b9f8c9c5ace6bab63af9a1f88d35c0d6cbf53e8c17c098552285e1", size = 35847826, upload-time = "2026-02-16T10:12:38.949Z" }, - { url = "https://files.pythonhosted.org/packages/54/a5/8cbc83f04aba433ca7b331b38f39e000efd9f0c7ce47128670e737542996/pyarrow-23.0.1-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:0b95a3994f015be13c63148fef8832e8a23938128c185ee951c98908a696e0eb", size = 44536859, upload-time = "2026-02-16T10:12:45.467Z" }, - { url = "https://files.pythonhosted.org/packages/36/2e/c0f017c405fcdc252dbccafbe05e36b0d0eb1ea9a958f081e01c6972927f/pyarrow-23.0.1-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:4982d71350b1a6e5cfe1af742c53dfb759b11ce14141870d05d9e540d13bc5d1", size = 47614443, upload-time = "2026-02-16T10:12:55.525Z" }, - { url = "https://files.pythonhosted.org/packages/af/6b/2314a78057912f5627afa13ba43809d9d653e6630859618b0fd81a4e0759/pyarrow-23.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c250248f1fe266db627921c89b47b7c06fee0489ad95b04d50353537d74d6886", size = 48232991, upload-time = "2026-02-16T10:13:04.729Z" }, - { url = "https://files.pythonhosted.org/packages/40/f2/1bcb1d3be3460832ef3370d621142216e15a2c7c62602a4ea19ec240dd64/pyarrow-23.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5f4763b83c11c16e5f4c15601ba6dfa849e20723b46aa2617cb4bffe8768479f", size = 50645077, upload-time = "2026-02-16T10:13:14.147Z" }, - { url = "https://files.pythonhosted.org/packages/eb/3f/b1da7b61cd66566a4d4c8383d376c606d1c34a906c3f1cb35c479f59d1aa/pyarrow-23.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:3a4c85ef66c134161987c17b147d6bffdca4566f9a4c1d81a0a01cdf08414ea5", size = 28234271, upload-time = "2026-02-16T10:14:09.397Z" }, - { url = "https://files.pythonhosted.org/packages/b5/78/07f67434e910a0f7323269be7bfbf58699bd0c1d080b18a1ab49ba943fe8/pyarrow-23.0.1-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:17cd28e906c18af486a499422740298c52d7c6795344ea5002a7720b4eadf16d", size = 34488692, upload-time = "2026-02-16T10:13:21.541Z" }, - { url = "https://files.pythonhosted.org/packages/50/76/34cf7ae93ece1f740a04910d9f7e80ba166b9b4ab9596a953e9e62b90fe1/pyarrow-23.0.1-cp314-cp314t-macosx_12_0_x86_64.whl", hash = "sha256:76e823d0e86b4fb5e1cf4a58d293036e678b5a4b03539be933d3b31f9406859f", size = 35964383, upload-time = "2026-02-16T10:13:28.63Z" }, - { url = "https://files.pythonhosted.org/packages/46/90/459b827238936d4244214be7c684e1b366a63f8c78c380807ae25ed92199/pyarrow-23.0.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:a62e1899e3078bf65943078b3ad2a6ddcacf2373bc06379aac61b1e548a75814", size = 44538119, upload-time = "2026-02-16T10:13:35.506Z" }, - { url = "https://files.pythonhosted.org/packages/28/a1/93a71ae5881e99d1f9de1d4554a87be37da11cd6b152239fb5bd924fdc64/pyarrow-23.0.1-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:df088e8f640c9fae3b1f495b3c64755c4e719091caf250f3a74d095ddf3c836d", size = 47571199, upload-time = "2026-02-16T10:13:42.504Z" }, - { url = "https://files.pythonhosted.org/packages/88/a3/d2c462d4ef313521eaf2eff04d204ac60775263f1fb08c374b543f79f610/pyarrow-23.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:46718a220d64677c93bc243af1d44b55998255427588e400677d7192671845c7", size = 48259435, upload-time = "2026-02-16T10:13:49.226Z" }, - { url = "https://files.pythonhosted.org/packages/cc/f1/11a544b8c3d38a759eb3fbb022039117fd633e9a7b19e4841cc3da091915/pyarrow-23.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a09f3876e87f48bc2f13583ab551f0379e5dfb83210391e68ace404181a20690", size = 50629149, upload-time = "2026-02-16T10:13:57.238Z" }, - { url = "https://files.pythonhosted.org/packages/50/f2/c0e76a0b451ffdf0cf788932e182758eb7558953f4f27f1aff8e2518b653/pyarrow-23.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:527e8d899f14bd15b740cd5a54ad56b7f98044955373a17179d5956ddb93d9ce", size = 28365807, upload-time = "2026-02-16T10:14:03.892Z" }, + { url = "https://files.pythonhosted.org/packages/b4/a9/9686d9f07837f91f775e8932659192e02c74f9d8920524b480b85212cc68/pyarrow-24.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:6233c9ed9ab9d1db47de57d9753256d9dcffbf42db341576099f0fd9f6bf4810", size = 34981559, upload-time = "2026-04-21T10:47:22.17Z" }, + { url = "https://files.pythonhosted.org/packages/80/b6/0ddf0e9b6ead3474ab087ae598c76b031fc45532bf6a63f3a553440fb258/pyarrow-24.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:f7616236ec1bc2b15bfdec22a71ab38851c86f8f05ff64f379e1278cf20c634a", size = 36663654, upload-time = "2026-04-21T10:47:28.315Z" }, + { url = "https://files.pythonhosted.org/packages/7c/3b/926382efe8ce27ba729071d3566ade6dfb86bdf112f366000196b2f5780a/pyarrow-24.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:1617043b99bd33e5318ae18eb2919af09c71322ef1ca46566cdafc6e6712fb66", size = 45679394, upload-time = "2026-04-21T10:47:34.821Z" }, + { url = "https://files.pythonhosted.org/packages/b3/7a/829f7d9dfd37c207206081d6dad474d81dde29952401f07f2ba507814818/pyarrow-24.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6165461f55ef6314f026de6638d661188e3455d3ec49834556a0ebbdbace18bb", size = 48863122, upload-time = "2026-04-21T10:47:42.056Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e8/f88ce625fe8babaae64e8db2d417c7653adb3019b08aae85c5ed787dc816/pyarrow-24.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3b13dedfe76a0ad2d1d859b0811b53827a4e9d93a0bcb05cf59333ab4980cc7e", size = 49376032, upload-time = "2026-04-21T10:47:48.967Z" }, + { url = "https://files.pythonhosted.org/packages/36/7a/82c363caa145fff88fb475da50d3bf52bb024f61917be5424c3392eaf878/pyarrow-24.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:25ea65d868eb04015cd18e6df2fbe98f07e5bda2abefabcb88fce39a947716f6", size = 51929490, upload-time = "2026-04-21T10:47:55.981Z" }, + { url = "https://files.pythonhosted.org/packages/66/1c/e3e72c8014ad2743ca64a701652c733cc5cbcee15c0463a32a8c55518d9e/pyarrow-24.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:295f0a7f2e242dabd513737cf076007dc5b2d59237e3eca37b05c0c6446f3826", size = 27355660, upload-time = "2026-04-21T10:48:01.718Z" }, + { url = "https://files.pythonhosted.org/packages/6f/d3/a1abf004482026ddc17f4503db227787fa3cfe41ec5091ff20e4fea55e57/pyarrow-24.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:02b001b3ed4723caa44f6cd1af2d5c86aa2cf9971dacc2ffa55b21237713dfba", size = 34976759, upload-time = "2026-04-21T10:48:07.258Z" }, + { url = "https://files.pythonhosted.org/packages/4f/4a/34f0a36d28a2dd32225301b79daad44e243dc1a2bb77d43b60749be255c4/pyarrow-24.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:04920d6a71aabd08a0417709efce97d45ea8e6fb733d9ca9ecffb13c67839f68", size = 36658471, upload-time = "2026-04-21T10:48:13.347Z" }, + { url = "https://files.pythonhosted.org/packages/1f/78/543b94712ae8bb1a6023bcc1acf1a740fbff8286747c289cd9468fced2a5/pyarrow-24.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:a964266397740257f16f7bb2e4f08a0c81454004beab8ff59dd531b73610e9f2", size = 45675981, upload-time = "2026-04-21T10:48:20.201Z" }, + { url = "https://files.pythonhosted.org/packages/84/9f/8fb7c222b100d314137fa40ec050de56cd8c6d957d1cfff685ce72f15b17/pyarrow-24.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6f066b179d68c413374294bc1735f68475457c933258df594443bb9d88ddc2a0", size = 48859172, upload-time = "2026-04-21T10:48:27.541Z" }, + { url = "https://files.pythonhosted.org/packages/a7/d3/1ea72538e6c8b3b475ed78d1049a2c518e655761ea50fe1171fc855fcab7/pyarrow-24.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1183baeb14c5f587b1ec52831e665718ce632caab84b7cd6b85fd44f96114495", size = 49385733, upload-time = "2026-04-21T10:48:34.7Z" }, + { url = "https://files.pythonhosted.org/packages/c3/be/c3d8b06a1ba35f2260f8e1f771abbee7d5e345c0937aab90675706b1690a/pyarrow-24.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:806f24b4085453c197a5078218d1ee08783ebbba271badd153d1ae22a3ee804f", size = 51934335, upload-time = "2026-04-21T10:48:42.099Z" }, + { url = "https://files.pythonhosted.org/packages/9c/62/89e07a1e7329d2cde3e3c6994ba0839a24977a2beda8be6005ea3d860b99/pyarrow-24.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:e4505fc6583f7b05ab854934896bcac8253b04ac1171a77dfb73efef92076d91", size = 27271748, upload-time = "2026-04-21T10:49:42.532Z" }, + { url = "https://files.pythonhosted.org/packages/17/1a/cff3a59f80b5b1658549d46611b67163f65e0664431c076ad728bf9d5af4/pyarrow-24.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:1a4e45017efbf115032e4475ee876d525e0e36c742214fbe405332480ecd6275", size = 35238554, upload-time = "2026-04-21T10:48:48.526Z" }, + { url = "https://files.pythonhosted.org/packages/a8/99/cce0f42a327bfef2c420fb6078a3eb834826e5d6697bf3009fe11d2ad051/pyarrow-24.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:7986f1fa71cee060ad00758bcc79d3a93bab8559bf978fab9e53472a2e25a17b", size = 36782301, upload-time = "2026-04-21T10:48:55.181Z" }, + { url = "https://files.pythonhosted.org/packages/2a/66/8e560d5ff6793ca29aca213c53eec0dd482dd46cb93b2819e5aab52e4252/pyarrow-24.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:d3e0b61e8efb24ed38898e5cdc5fffa9124be480008d401a1f8071500494ae42", size = 45721929, upload-time = "2026-04-21T10:49:03.676Z" }, + { url = "https://files.pythonhosted.org/packages/27/0c/a26e25505d030716e078d9f16eb74973cbf0b33b672884e9f9da1c83b871/pyarrow-24.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:55a3bc1e3df3b5567b7d27ef551b2283f0c68a5e86f1cd56abc569da4f31335b", size = 48825365, upload-time = "2026-04-21T10:49:11.714Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/771f9ecb0c65e73fe9dccdd1717901b9594f08c4515d000c7c62df573811/pyarrow-24.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:641f795b361874ac9da5294f8f443dfdbee355cf2bd9e3b8d97aaac2306b9b37", size = 49451819, upload-time = "2026-04-21T10:49:21.474Z" }, + { url = "https://files.pythonhosted.org/packages/48/da/61ae89a88732f5a785646f3ec6125dbb640fa98a540eb2b9889caa561403/pyarrow-24.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8adc8e6ce5fccf5dc707046ae4914fd537def529709cc0d285d37a7f9cd442ca", size = 51909252, upload-time = "2026-04-21T10:49:31.164Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1a/8dd5cafab7b66573fa91c03d06d213356ad4edd71813aa75e08ce2b3a844/pyarrow-24.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:9b18371ad2f44044b81a8d23bc2d8a9b6a6226dca775e8e16cfee640473d6c5d", size = 27388127, upload-time = "2026-04-21T10:49:37.334Z" }, + { url = "https://files.pythonhosted.org/packages/ad/80/d022a34ff05d2cbedd8ccf841fc1f532ecfa9eb5ed1711b56d0e0ea71fc9/pyarrow-24.0.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:1cc9057f0319e26333b357e17f3c2c022f1a83739b48a88b25bfd5fa2dc18838", size = 35007997, upload-time = "2026-04-21T10:49:48.796Z" }, + { url = "https://files.pythonhosted.org/packages/1a/ff/f01485fda6f4e5d441afb8dd5e7681e4db18826c1e271852f5d3957d6a80/pyarrow-24.0.0-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:e6f1278ee4785b6db21229374a1c9e54ec7c549de5d1efc9630b6207de7e170b", size = 36678720, upload-time = "2026-04-21T10:49:55.858Z" }, + { url = "https://files.pythonhosted.org/packages/9e/c2/2d2d5fea814237923f71b36495211f20b43a1576f9a4d6da7e751a64ec6f/pyarrow-24.0.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:adbbedc55506cbdabb830890444fb856bfb0060c46c6f8026c6c2f2cf86ae795", size = 45741852, upload-time = "2026-04-21T10:50:04.624Z" }, + { url = "https://files.pythonhosted.org/packages/8e/3a/28ba9c1c1ebdbb5f1b94dfebb46f207e52e6a554b7fe4132540fde29a3a0/pyarrow-24.0.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:ae8a1145af31d903fa9bb166824d7abe9b4681a000b0159c9fb99c11bc11ad26", size = 48889852, upload-time = "2026-04-21T10:50:12.293Z" }, + { url = "https://files.pythonhosted.org/packages/df/51/4a389acfd31dca009f8fb82d7f510bb4130f2b3a8e18cf00194d0687d8ac/pyarrow-24.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d7027eba1df3b2069e2e8d80f644fa0918b68c46432af3d088ddd390d063ecde", size = 49445207, upload-time = "2026-04-21T10:50:20.677Z" }, + { url = "https://files.pythonhosted.org/packages/19/4b/0bab2b23d2ae901b1b9a03c0efd4b2d070256f8ce3fc43f6e58c167b2081/pyarrow-24.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e56a1ffe9bf7b727432b89104cc0849c21582949dd7bdcb34f17b2001a351a76", size = 51954117, upload-time = "2026-04-21T10:50:29.14Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/f4e9145da0417b3d2c12035a8492b35ff4a3dbc653e614fcfb51d9dedb38/pyarrow-24.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:38be1808cdd068605b787e6ca9119b27eb275a0234e50212c3492331680c3b1e", size = 28001155, upload-time = "2026-04-21T10:51:22.337Z" }, + { url = "https://files.pythonhosted.org/packages/79/4f/46a49a63f43526da895b1a45bbb51d5baf8e4d77159f8528fc3e5490007f/pyarrow-24.0.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:418e48ce50a45a6a6c73c454677203a9c75c966cb1e92ca3370959185f197a05", size = 35250387, upload-time = "2026-04-21T10:50:35.552Z" }, + { url = "https://files.pythonhosted.org/packages/a0/da/d5e0cd5ef00796922404806d5f00325cdadc3441ce2c13fe7115f2df9a64/pyarrow-24.0.0-cp314-cp314t-macosx_12_0_x86_64.whl", hash = "sha256:2f16197705a230a78270cdd4ea8a1d57e86b2fdcbc34a1f6aebc72e65c986f9a", size = 36797102, upload-time = "2026-04-21T10:50:42.417Z" }, + { url = "https://files.pythonhosted.org/packages/34/c7/5904145b0a593a05236c882933d439b5720f0a145381179063722fbfc123/pyarrow-24.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:fb24ac194bfc5e86839d7dcd52092ee31e5fe6733fe11f5e3b06ef0812b20072", size = 45745118, upload-time = "2026-04-21T10:50:49.324Z" }, + { url = "https://files.pythonhosted.org/packages/13/d3/cca42fe166d1c6e4d5b80e530b7949104d10e17508a90ae202dac205ce2a/pyarrow-24.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:9700ebd9a51f5895ce75ff4ac4b3c47a7d4b42bc618be8e713e5d56bacf5f931", size = 48844765, upload-time = "2026-04-21T10:50:55.579Z" }, + { url = "https://files.pythonhosted.org/packages/b0/49/942c3b79878ba928324d1e17c274ed84581db8c0a749b24bcf4cbdf15bd3/pyarrow-24.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d8ddd2768da81d3ee08cfea9b597f4abb4e8e1dc8ae7e204b608d23a0d3ab699", size = 49471890, upload-time = "2026-04-21T10:51:02.439Z" }, + { url = "https://files.pythonhosted.org/packages/76/97/ff71431000a75d84135a1ace5ca4ba11726a231a8007bbb320a4c54075d5/pyarrow-24.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:61a3d7eaa97a14768b542f3d284dc6400dd2470d9f080708b13cd46b6ae18136", size = 51932250, upload-time = "2026-04-21T10:51:10.576Z" }, + { url = "https://files.pythonhosted.org/packages/51/be/6f79d55816d5c22557cf27533543d5d70dfe692adfbee4b99f2760674f38/pyarrow-24.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:c91d00057f23b8d353039520dc3a6c09d8608164c692e9f59a175a42b2ae0c19", size = 28131282, upload-time = "2026-04-21T10:51:16.815Z" }, ] [[package]] @@ -4179,7 +4689,7 @@ wheels = [ [[package]] name = "pydantic" -version = "2.12.5" +version = "2.13.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-types" }, @@ -4187,80 +4697,84 @@ dependencies = [ { name = "typing-extensions" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d9/e4/40d09941a2cebcb20609b86a559817d5b9291c49dd6f8c87e5feffbe703a/pydantic-2.13.3.tar.gz", hash = "sha256:af09e9d1d09f4e7fe37145c1f577e1d61ceb9a41924bf0094a36506285d0a84d", size = 844068, upload-time = "2026-04-20T14:46:43.632Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, + { url = "https://files.pythonhosted.org/packages/f3/0a/fd7d723f8f8153418fb40cf9c940e82004fce7e987026b08a68a36dd3fe7/pydantic-2.13.3-py3-none-any.whl", hash = "sha256:6db14ac8dfc9a1e57f87ea2c0de670c251240f43cb0c30a5130e9720dc612927", size = 471981, upload-time = "2026-04-20T14:46:41.402Z" }, ] [[package]] name = "pydantic-core" -version = "2.41.5" +version = "2.46.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/ef/f7abb56c49382a246fd2ce9c799691e3c3e7175ec74b14d99e798bcddb1a/pydantic_core-2.46.3.tar.gz", hash = "sha256:41c178f65b8c29807239d47e6050262eb6bf84eb695e41101e62e38df4a5bc2c", size = 471412, upload-time = "2026-04-20T14:40:56.672Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, - { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, - { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, - { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, - { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, - { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, - { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, - { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, - { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, - { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, - { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, - { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, - { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, - { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, - { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, - { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, - { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, - { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, - { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, - { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, - { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, - { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, - { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, - { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, - { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, - { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, - { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, - { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, - { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, - { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, - { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, - { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, - { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, - { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, - { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, - { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, - { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, - { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, - { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, - { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, - { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, - { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, - { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, - { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, - { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, - { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, - { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, - { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, - { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, - { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, - { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, - { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, - { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, - { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, - { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, - { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, - { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, - { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, - { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, - { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/4b/cb/5b47425556ecc1f3fe18ed2a0083188aa46e1dd812b06e406475b3a5d536/pydantic_core-2.46.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b11b59b3eee90a80a36701ddb4576d9ae31f93f05cb9e277ceaa09e6bf074a67", size = 2101946, upload-time = "2026-04-20T14:40:52.581Z" }, + { url = "https://files.pythonhosted.org/packages/a1/4f/2fb62c2267cae99b815bbf4a7b9283812c88ca3153ef29f7707200f1d4e5/pydantic_core-2.46.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:af8653713055ea18a3abc1537fe2ebc42f5b0bbb768d1eb79fd74eb47c0ac089", size = 1951612, upload-time = "2026-04-20T14:42:42.996Z" }, + { url = "https://files.pythonhosted.org/packages/50/6e/b7348fd30d6556d132cddd5bd79f37f96f2601fe0608afac4f5fb01ec0b3/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75a519dab6d63c514f3a81053e5266c549679e4aa88f6ec57f2b7b854aceb1b0", size = 1977027, upload-time = "2026-04-20T14:42:02.001Z" }, + { url = "https://files.pythonhosted.org/packages/82/11/31d60ee2b45540d3fb0b29302a393dbc01cd771c473f5b5147bcd353e593/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a6cd87cb1575b1ad05ba98894c5b5c96411ef678fa2f6ed2576607095b8d9789", size = 2063008, upload-time = "2026-04-20T14:44:17.952Z" }, + { url = "https://files.pythonhosted.org/packages/8a/db/3a9d1957181b59258f44a2300ab0f0be9d1e12d662a4f57bb31250455c52/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f80a55484b8d843c8ada81ebf70a682f3f00a3d40e378c06cf17ecb44d280d7d", size = 2233082, upload-time = "2026-04-20T14:40:57.934Z" }, + { url = "https://files.pythonhosted.org/packages/9c/e1/3277c38792aeb5cfb18c2f0c5785a221d9ff4e149abbe1184d53d5f72273/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3861f1731b90c50a3266316b9044f5c9b405eecb8e299b0a7120596334e4fe9c", size = 2304615, upload-time = "2026-04-20T14:42:12.584Z" }, + { url = "https://files.pythonhosted.org/packages/5e/d5/e3d9717c9eba10855325650afd2a9cba8e607321697f18953af9d562da2f/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb528e295ed31570ac3dcc9bfdd6e0150bc11ce6168ac87a8082055cf1a67395", size = 2094380, upload-time = "2026-04-20T14:43:05.522Z" }, + { url = "https://files.pythonhosted.org/packages/a1/20/abac35dedcbfd66c6f0b03e4e3564511771d6c9b7ede10a362d03e110d9b/pydantic_core-2.46.3-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:367508faa4973b992b271ba1494acaab36eb7e8739d1e47be5035fb1ea225396", size = 2135429, upload-time = "2026-04-20T14:41:55.549Z" }, + { url = "https://files.pythonhosted.org/packages/6c/a5/41bfd1df69afad71b5cf0535055bccc73022715ad362edbc124bc1e021d7/pydantic_core-2.46.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ad3c826fe523e4becf4fe39baa44286cff85ef137c729a2c5e269afbfd0905d", size = 2174582, upload-time = "2026-04-20T14:41:45.96Z" }, + { url = "https://files.pythonhosted.org/packages/79/65/38d86ea056b29b2b10734eb23329b7a7672ca604df4f2b6e9c02d4ee22fe/pydantic_core-2.46.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ec638c5d194ef8af27db69f16c954a09797c0dc25015ad6123eb2c73a4d271ca", size = 2187533, upload-time = "2026-04-20T14:40:55.367Z" }, + { url = "https://files.pythonhosted.org/packages/b6/55/a1129141678a2026badc539ad1dee0a71d06f54c2f06a4bd68c030ac781b/pydantic_core-2.46.3-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:28ed528c45446062ee66edb1d33df5d88828ae167de76e773a3c7f64bd14e976", size = 2332985, upload-time = "2026-04-20T14:44:13.05Z" }, + { url = "https://files.pythonhosted.org/packages/d7/60/cb26f4077719f709e54819f4e8e1d43f4091f94e285eb6bd21e1190a7b7c/pydantic_core-2.46.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aed19d0c783886d5bd86d80ae5030006b45e28464218747dcf83dabfdd092c7b", size = 2373670, upload-time = "2026-04-20T14:41:53.421Z" }, + { url = "https://files.pythonhosted.org/packages/6b/7e/c3f21882bdf1d8d086876f81b5e296206c69c6082551d776895de7801fa0/pydantic_core-2.46.3-cp312-cp312-win32.whl", hash = "sha256:06d5d8820cbbdb4147578c1fe7ffcd5b83f34508cb9f9ab76e807be7db6ff0a4", size = 1966722, upload-time = "2026-04-20T14:44:30.588Z" }, + { url = "https://files.pythonhosted.org/packages/57/be/6b5e757b859013ebfbd7adba02f23b428f37c86dcbf78b5bb0b4ffd36e99/pydantic_core-2.46.3-cp312-cp312-win_amd64.whl", hash = "sha256:c3212fda0ee959c1dd04c60b601ec31097aaa893573a3a1abd0a47bcac2968c1", size = 2072970, upload-time = "2026-04-20T14:42:54.248Z" }, + { url = "https://files.pythonhosted.org/packages/bf/f8/a989b21cc75e9a32d24192ef700eea606521221a89faa40c919ce884f2b1/pydantic_core-2.46.3-cp312-cp312-win_arm64.whl", hash = "sha256:f1f8338dd7a7f31761f1f1a3c47503a9a3b34eea3c8b01fa6ee96408affb5e72", size = 2035963, upload-time = "2026-04-20T14:44:20.4Z" }, + { url = "https://files.pythonhosted.org/packages/9b/3c/9b5e8eb9821936d065439c3b0fb1490ffa64163bfe7e1595985a47896073/pydantic_core-2.46.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:12bc98de041458b80c86c56b24df1d23832f3e166cbaff011f25d187f5c62c37", size = 2102109, upload-time = "2026-04-20T14:41:24.219Z" }, + { url = "https://files.pythonhosted.org/packages/91/97/1c41d1f5a19f241d8069f1e249853bcce378cdb76eec8ab636d7bc426280/pydantic_core-2.46.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:85348b8f89d2c3508b65b16c3c33a4da22b8215138d8b996912bb1532868885f", size = 1951820, upload-time = "2026-04-20T14:42:14.236Z" }, + { url = "https://files.pythonhosted.org/packages/30/b4/d03a7ae14571bc2b6b3c7b122441154720619afe9a336fa3a95434df5e2f/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1105677a6df914b1fb71a81b96c8cce7726857e1717d86001f29be06a25ee6f8", size = 1977785, upload-time = "2026-04-20T14:42:31.648Z" }, + { url = "https://files.pythonhosted.org/packages/ae/0c/4086f808834b59e3c8f1aa26df8f4b6d998cdcf354a143d18ef41529d1fe/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:87082cd65669a33adeba5470769e9704c7cf026cc30afb9cc77fd865578ebaad", size = 2062761, upload-time = "2026-04-20T14:40:37.093Z" }, + { url = "https://files.pythonhosted.org/packages/fa/71/a649be5a5064c2df0db06e0a512c2281134ed2fcc981f52a657936a7527c/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60e5f66e12c4f5212d08522963380eaaeac5ebd795826cfd19b2dfb0c7a52b9c", size = 2232989, upload-time = "2026-04-20T14:42:59.254Z" }, + { url = "https://files.pythonhosted.org/packages/a2/84/7756e75763e810b3a710f4724441d1ecc5883b94aacb07ca71c5fb5cfb69/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6cdf19bf84128d5e7c37e8a73a0c5c10d51103a650ac585d42dd6ae233f2b7f", size = 2303975, upload-time = "2026-04-20T14:41:32.287Z" }, + { url = "https://files.pythonhosted.org/packages/6c/35/68a762e0c1e31f35fa0dac733cbd9f5b118042853698de9509c8e5bf128b/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:031bb17f4885a43773c8c763089499f242aee2ea85cf17154168775dccdecf35", size = 2095325, upload-time = "2026-04-20T14:42:47.685Z" }, + { url = "https://files.pythonhosted.org/packages/77/bf/1bf8c9a8e91836c926eae5e3e51dce009bf495a60ca56060689d3df3f340/pydantic_core-2.46.3-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:bcf2a8b2982a6673693eae7348ef3d8cf3979c1d63b54fca7c397a635cc68687", size = 2133368, upload-time = "2026-04-20T14:41:22.766Z" }, + { url = "https://files.pythonhosted.org/packages/e5/50/87d818d6bab915984995157ceb2380f5aac4e563dddbed6b56f0ed057aba/pydantic_core-2.46.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28e8cf2f52d72ced402a137145923a762cbb5081e48b34312f7a0c8f55928ec3", size = 2173908, upload-time = "2026-04-20T14:42:52.044Z" }, + { url = "https://files.pythonhosted.org/packages/91/88/a311fb306d0bd6185db41fa14ae888fb81d0baf648a761ae760d30819d33/pydantic_core-2.46.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:17eaface65d9fc5abb940003020309c1bf7a211f5f608d7870297c367e6f9022", size = 2186422, upload-time = "2026-04-20T14:43:29.55Z" }, + { url = "https://files.pythonhosted.org/packages/8f/79/28fd0d81508525ab2054fef7c77a638c8b5b0afcbbaeee493cf7c3fef7e1/pydantic_core-2.46.3-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:93fd339f23408a07e98950a89644f92c54d8729719a40b30c0a30bb9ebc55d23", size = 2332709, upload-time = "2026-04-20T14:42:16.134Z" }, + { url = "https://files.pythonhosted.org/packages/b3/21/795bf5fe5c0f379308b8ef19c50dedab2e7711dbc8d0c2acf08f1c7daa05/pydantic_core-2.46.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:23cbdb3aaa74dfe0837975dbf69b469753bbde8eacace524519ffdb6b6e89eb7", size = 2372428, upload-time = "2026-04-20T14:41:10.974Z" }, + { url = "https://files.pythonhosted.org/packages/45/b3/ed14c659cbe7605e3ef063077680a64680aec81eb1a04763a05190d49b7f/pydantic_core-2.46.3-cp313-cp313-win32.whl", hash = "sha256:610eda2e3838f401105e6326ca304f5da1e15393ae25dacae5c5c63f2c275b13", size = 1965601, upload-time = "2026-04-20T14:41:42.128Z" }, + { url = "https://files.pythonhosted.org/packages/ef/bb/adb70d9a762ddd002d723fbf1bd492244d37da41e3af7b74ad212609027e/pydantic_core-2.46.3-cp313-cp313-win_amd64.whl", hash = "sha256:68cc7866ed863db34351294187f9b729964c371ba33e31c26f478471c52e1ed0", size = 2071517, upload-time = "2026-04-20T14:43:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/52/eb/66faefabebfe68bd7788339c9c9127231e680b11906368c67ce112fdb47f/pydantic_core-2.46.3-cp313-cp313-win_arm64.whl", hash = "sha256:f64b5537ac62b231572879cd08ec05600308636a5d63bcbdb15063a466977bec", size = 2035802, upload-time = "2026-04-20T14:43:38.507Z" }, + { url = "https://files.pythonhosted.org/packages/7f/db/a7bcb4940183fda36022cd18ba8dd12f2dff40740ec7b58ce7457befa416/pydantic_core-2.46.3-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:afa3aa644f74e290cdede48a7b0bee37d1c35e71b05105f6b340d484af536d9b", size = 2097614, upload-time = "2026-04-20T14:44:38.374Z" }, + { url = "https://files.pythonhosted.org/packages/24/35/e4066358a22e3e99519db370494c7528f5a2aa1367370e80e27e20283543/pydantic_core-2.46.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ced3310e51aa425f7f77da8bbbb5212616655bedbe82c70944320bc1dbe5e018", size = 1951896, upload-time = "2026-04-20T14:40:53.996Z" }, + { url = "https://files.pythonhosted.org/packages/87/92/37cf4049d1636996e4b888c05a501f40a43ff218983a551d57f9d5e14f0d/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e29908922ce9da1a30b4da490bd1d3d82c01dcfdf864d2a74aacee674d0bfa34", size = 1979314, upload-time = "2026-04-20T14:41:49.446Z" }, + { url = "https://files.pythonhosted.org/packages/d8/36/9ff4d676dfbdfb2d591cf43f3d90ded01e15b1404fd101180ed2d62a2fd3/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c9ff69140423eea8ed2d5477df3ba037f671f5e897d206d921bc9fdc39613e7", size = 2056133, upload-time = "2026-04-20T14:42:23.574Z" }, + { url = "https://files.pythonhosted.org/packages/bc/f0/405b442a4d7ba855b06eec8b2bf9c617d43b8432d099dfdc7bf999293495/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b675ab0a0d5b1c8fdb81195dc5bcefea3f3c240871cdd7ff9a2de8aa50772eb2", size = 2228726, upload-time = "2026-04-20T14:44:22.816Z" }, + { url = "https://files.pythonhosted.org/packages/e7/f8/65cd92dd5a0bd89ba277a98ecbfaf6fc36bbd3300973c7a4b826d6ab1391/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0087084960f209a9a4af50ecd1fb063d9ad3658c07bb81a7a53f452dacbfb2ba", size = 2301214, upload-time = "2026-04-20T14:44:48.792Z" }, + { url = "https://files.pythonhosted.org/packages/fd/86/ef96a4c6e79e7a2d0410826a68fbc0eccc0fd44aa733be199d5fcac3bb87/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed42e6cc8e1b0e2b9b96e2276bad70ae625d10d6d524aed0c93de974ae029f9f", size = 2099927, upload-time = "2026-04-20T14:41:40.196Z" }, + { url = "https://files.pythonhosted.org/packages/6d/53/269caf30e0096e0a8a8f929d1982a27b3879872cca2d917d17c2f9fdf4fe/pydantic_core-2.46.3-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:f1771ce258afb3e4201e67d154edbbae712a76a6081079fe247c2f53c6322c22", size = 2128789, upload-time = "2026-04-20T14:41:15.868Z" }, + { url = "https://files.pythonhosted.org/packages/00/b0/1a6d9b6a587e118482910c244a1c5acf4d192604174132efd12bf0ac486f/pydantic_core-2.46.3-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a7610b6a5242a6c736d8ad47fd5fff87fcfe8f833b281b1c409c3d6835d9227f", size = 2173815, upload-time = "2026-04-20T14:44:25.152Z" }, + { url = "https://files.pythonhosted.org/packages/87/56/e7e00d4041a7e62b5a40815590114db3b535bf3ca0bf4dca9f16cef25246/pydantic_core-2.46.3-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:ff5e7783bcc5476e1db448bf268f11cb257b1c276d3e89f00b5727be86dd0127", size = 2181608, upload-time = "2026-04-20T14:41:28.933Z" }, + { url = "https://files.pythonhosted.org/packages/e8/22/4bd23c3d41f7c185d60808a1de83c76cf5aeabf792f6c636a55c3b1ec7f9/pydantic_core-2.46.3-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:9d2e32edcc143bc01e95300671915d9ca052d4f745aa0a49c48d4803f8a85f2c", size = 2326968, upload-time = "2026-04-20T14:42:03.962Z" }, + { url = "https://files.pythonhosted.org/packages/24/ac/66cd45129e3915e5ade3b292cb3bc7fd537f58f8f8dbdaba6170f7cabb74/pydantic_core-2.46.3-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6e42d83d1c6b87fa56b521479cff237e626a292f3b31b6345c15a99121b454c1", size = 2369842, upload-time = "2026-04-20T14:41:35.52Z" }, + { url = "https://files.pythonhosted.org/packages/a2/51/dd4248abb84113615473aa20d5545b7c4cd73c8644003b5259686f93996c/pydantic_core-2.46.3-cp314-cp314-win32.whl", hash = "sha256:07bc6d2a28c3adb4f7c6ae46aa4f2d2929af127f587ed44057af50bf1ce0f505", size = 1959661, upload-time = "2026-04-20T14:41:00.042Z" }, + { url = "https://files.pythonhosted.org/packages/20/eb/59980e5f1ae54a3b86372bd9f0fa373ea2d402e8cdcd3459334430f91e91/pydantic_core-2.46.3-cp314-cp314-win_amd64.whl", hash = "sha256:8940562319bc621da30714617e6a7eaa6b98c84e8c685bcdc02d7ed5e7c7c44e", size = 2071686, upload-time = "2026-04-20T14:43:16.471Z" }, + { url = "https://files.pythonhosted.org/packages/8c/db/1cf77e5247047dfee34bc01fa9bca134854f528c8eb053e144298893d370/pydantic_core-2.46.3-cp314-cp314-win_arm64.whl", hash = "sha256:5dcbbcf4d22210ced8f837c96db941bdb078f419543472aca5d9a0bb7cddc7df", size = 2026907, upload-time = "2026-04-20T14:43:31.732Z" }, + { url = "https://files.pythonhosted.org/packages/57/c0/b3df9f6a543276eadba0a48487b082ca1f201745329d97dbfa287034a230/pydantic_core-2.46.3-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:d0fe3dce1e836e418f912c1ad91c73357d03e556a4d286f441bf34fed2dbeecf", size = 2095047, upload-time = "2026-04-20T14:42:37.982Z" }, + { url = "https://files.pythonhosted.org/packages/66/57/886a938073b97556c168fd99e1a7305bb363cd30a6d2c76086bf0587b32a/pydantic_core-2.46.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9ce92e58abc722dac1bf835a6798a60b294e48eb0e625ec9fd994b932ac5feee", size = 1934329, upload-time = "2026-04-20T14:43:49.655Z" }, + { url = "https://files.pythonhosted.org/packages/0b/7c/b42eaa5c34b13b07ecb51da21761297a9b8eb43044c864a035999998f328/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a03e6467f0f5ab796a486146d1b887b2dc5e5f9b3288898c1b1c3ad974e53e4a", size = 1974847, upload-time = "2026-04-20T14:42:10.737Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9b/92b42db6543e7de4f99ae977101a2967b63122d4b6cf7773812da2d7d5b5/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2798b6ba041b9d70acfb9071a2ea13c8456dd1e6a5555798e41ba7b0790e329c", size = 2041742, upload-time = "2026-04-20T14:40:44.262Z" }, + { url = "https://files.pythonhosted.org/packages/0f/19/46fbe1efabb5aa2834b43b9454e70f9a83ad9c338c1291e48bdc4fecf167/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9be3e221bdc6d69abf294dcf7aff6af19c31a5cdcc8f0aa3b14be29df4bd03b1", size = 2236235, upload-time = "2026-04-20T14:41:27.307Z" }, + { url = "https://files.pythonhosted.org/packages/77/da/b3f95bc009ad60ec53120f5d16c6faa8cabdbe8a20d83849a1f2b8728148/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f13936129ce841f2a5ddf6f126fea3c43cd128807b5a59588c37cf10178c2e64", size = 2282633, upload-time = "2026-04-20T14:44:33.271Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6e/401336117722e28f32fb8220df676769d28ebdf08f2f4469646d404c43a3/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28b5f2ef03416facccb1c6ef744c69793175fd27e44ef15669201601cf423acb", size = 2109679, upload-time = "2026-04-20T14:44:41.065Z" }, + { url = "https://files.pythonhosted.org/packages/fc/53/b289f9bc8756a32fe718c46f55afaeaf8d489ee18d1a1e7be1db73f42cc4/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:830d1247d77ad23852314f069e9d7ddafeec5f684baf9d7e7065ed46a049c4e6", size = 2108342, upload-time = "2026-04-20T14:42:50.144Z" }, + { url = "https://files.pythonhosted.org/packages/10/5b/8292fc7c1f9111f1b2b7c1b0dcf1179edcd014fc3ea4517499f50b829d71/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0793c90c1a3c74966e7975eaef3ed30ebdff3260a0f815a62a22adc17e4c01c", size = 2157208, upload-time = "2026-04-20T14:42:08.133Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9e/f80044e9ec07580f057a89fc131f78dda7a58751ddf52bbe05eaf31db50f/pydantic_core-2.46.3-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:d2d0aead851b66f5245ec0c4fb2612ef457f8bbafefdf65a2bf9d6bac6140f47", size = 2167237, upload-time = "2026-04-20T14:42:25.412Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/6781a1b037f3b96be9227edbd1101f6d3946746056231bf4ac48cdff1a8d/pydantic_core-2.46.3-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:2f40e4246676beb31c5ce77c38a55ca4e465c6b38d11ea1bd935420568e0b1ab", size = 2312540, upload-time = "2026-04-20T14:40:40.313Z" }, + { url = "https://files.pythonhosted.org/packages/3e/db/19c0839feeb728e7df03255581f198dfdf1c2aeb1e174a8420b63c5252e5/pydantic_core-2.46.3-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:cf489cf8986c543939aeee17a09c04d6ffb43bfef8ca16fcbcc5cfdcbed24dba", size = 2369556, upload-time = "2026-04-20T14:41:09.427Z" }, + { url = "https://files.pythonhosted.org/packages/e0/15/3228774cb7cd45f5f721ddf1b2242747f4eb834d0c491f0c02d606f09fed/pydantic_core-2.46.3-cp314-cp314t-win32.whl", hash = "sha256:ffe0883b56cfc05798bf994164d2b2ff03efe2d22022a2bb080f3b626176dd56", size = 1949756, upload-time = "2026-04-20T14:41:25.717Z" }, + { url = "https://files.pythonhosted.org/packages/b8/2a/c79cf53fd91e5a87e30d481809f52f9a60dd221e39de66455cf04deaad37/pydantic_core-2.46.3-cp314-cp314t-win_amd64.whl", hash = "sha256:706d9d0ce9cf4593d07270d8e9f53b161f90c57d315aeec4fb4fd7a8b10240d8", size = 2051305, upload-time = "2026-04-20T14:43:18.627Z" }, + { url = "https://files.pythonhosted.org/packages/0b/db/d8182a7f1d9343a032265aae186eb063fe26ca4c40f256b21e8da4498e89/pydantic_core-2.46.3-cp314-cp314t-win_arm64.whl", hash = "sha256:77706aeb41df6a76568434701e0917da10692da28cb69d5fb6919ce5fdb07374", size = 2026310, upload-time = "2026-04-20T14:41:01.778Z" }, + { url = "https://files.pythonhosted.org/packages/34/42/f426db557e8ab2791bc7562052299944a118655496fbff99914e564c0a94/pydantic_core-2.46.3-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:b12dd51f1187c2eb489af8e20f880362db98e954b54ab792fa5d92e8bcc6b803", size = 2091877, upload-time = "2026-04-20T14:43:27.091Z" }, + { url = "https://files.pythonhosted.org/packages/5c/4f/86a832a9d14df58e663bfdf4627dc00d3317c2bd583c4fb23390b0f04b8e/pydantic_core-2.46.3-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:f00a0961b125f1a47af7bcc17f00782e12f4cd056f83416006b30111d941dfa3", size = 1932428, upload-time = "2026-04-20T14:40:45.781Z" }, + { url = "https://files.pythonhosted.org/packages/11/1a/fe857968954d93fb78e0d4b6df5c988c74c4aaa67181c60be7cfe327c0ca/pydantic_core-2.46.3-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57697d7c056aca4bbb680200f96563e841a6386ac1129370a0102592f4dddff5", size = 1997550, upload-time = "2026-04-20T14:44:02.425Z" }, + { url = "https://files.pythonhosted.org/packages/17/eb/9d89ad2d9b0ba8cd65393d434471621b98912abb10fbe1df08e480ba57b5/pydantic_core-2.46.3-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd35aa21299def8db7ef4fe5c4ff862941a9a158ca7b63d61e66fe67d30416b4", size = 2137657, upload-time = "2026-04-20T14:42:45.149Z" }, ] [[package]] @@ -4323,14 +4837,14 @@ wheels = [ [[package]] name = "pyngrok" -version = "8.0.0" +version = "8.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ec/7a/6f9b08044be356d228533f6a8454377e2d31dd1309adf13209906019762d/pyngrok-8.0.0.tar.gz", hash = "sha256:6e7aaf90b43086ad25508a1122423608003f712d9988319ddf7be50431028101", size = 42082, upload-time = "2026-04-01T15:43:43.333Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/4b/f1372b66985d76177ea492a5f82643b628a958ebdabc99350bb24c643d5b/pyngrok-8.1.0.tar.gz", hash = "sha256:18ea24460f5d74bf5c80feabd55ccf40e9552235ed103f967ec2ef99b57940c6", size = 45000, upload-time = "2026-04-27T02:54:44.771Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/53/ae/d8cb883f717627bd8e2fe2a65ab6544cd23789a63d61411c037a65246982/pyngrok-8.0.0-py3-none-any.whl", hash = "sha256:bdc34c7a0a7e9bad60e44b9dcc7661c055c0a79dc029c7100ed15669c626d5c1", size = 24467, upload-time = "2026-04-01T15:43:42.3Z" }, + { url = "https://files.pythonhosted.org/packages/2f/50/d6f73708d6d358184c1227bbd5cf12dfa78af229fe81553bbd0e9a1ad073/pyngrok-8.1.0-py3-none-any.whl", hash = "sha256:8ba8497a2c7ac6b2f41a66f8102b815da30bfbb63321a70a0a4fa3a51b03e79b", size = 25882, upload-time = "2026-04-27T02:54:43.298Z" }, ] [[package]] @@ -4584,6 +5098,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0b/d7/1959b9648791274998a9c3526f6d0ec8fd2233e4d4acce81bbae76b44b2a/python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a", size = 22101, upload-time = "2026-03-01T16:00:25.09Z" }, ] +[[package]] +name = "python-json-logger" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f7/ff/3cc9165fd44106973cd7ac9facb674a65ed853494592541d339bdc9a30eb/python_json_logger-4.1.0.tar.gz", hash = "sha256:b396b9e3ed782b09ff9d6e4f1683d46c83ad0d35d2e407c09a9ebbf038f88195", size = 17573, upload-time = "2026-03-29T04:39:56.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/be/0631a861af4d1c875f096c07d34e9a63639560a717130e7a87cbc82b7e3f/python_json_logger-4.1.0-py3-none-any.whl", hash = "sha256:132994765cf75bf44554be9aa49b06ef2345d23661a96720262716438141b6b2", size = 15021, upload-time = "2026-03-29T04:39:55.266Z" }, +] + [[package]] name = "python-xlib" version = "0.33" @@ -4605,6 +5128,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/10/99/781fe0c827be2742bcc775efefccb3b048a3a9c6ce9aec0cbf4a101677e5/pytz-2026.1.post1-py2.py3-none-any.whl", hash = "sha256:f2fd16142fda348286a75e1a524be810bb05d444e5a081f37f7affc635035f7a", size = 510489, upload-time = "2026-03-03T07:47:49.167Z" }, ] +[[package]] +name = "pywinpty" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f7/54/37c7370ba91f579235049dc26cd2c5e657d2a943e01820844ffc81f32176/pywinpty-3.0.3.tar.gz", hash = "sha256:523441dc34d231fb361b4b00f8c99d3f16de02f5005fd544a0183112bcc22412", size = 31309, upload-time = "2026-02-04T21:51:09.524Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/d4/aeb5e1784d2c5bff6e189138a9ca91a090117459cea0c30378e1f2db3d54/pywinpty-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:c9081df0e49ffa86d15db4a6ba61530630e48707f987df42c9d3313537e81fc0", size = 2113098, upload-time = "2026-02-04T21:54:37.711Z" }, + { url = "https://files.pythonhosted.org/packages/b9/53/7278223c493ccfe4883239cf06c823c56460a8010e0fc778eef67858dc14/pywinpty-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:15e79d870e18b678fb8a5a6105fd38496b55697c66e6fc0378236026bc4d59e9", size = 234901, upload-time = "2026-02-04T21:53:31.35Z" }, + { url = "https://files.pythonhosted.org/packages/e5/cb/58d6ed3fd429c96a90ef01ac9a617af10a6d41469219c25e7dc162abbb71/pywinpty-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9c91dbb026050c77bdcef964e63a4f10f01a639113c4d3658332614544c467ab", size = 2112686, upload-time = "2026-02-04T21:52:03.035Z" }, + { url = "https://files.pythonhosted.org/packages/fd/50/724ed5c38c504d4e58a88a072776a1e880d970789deaeb2b9f7bd9a5141a/pywinpty-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:fe1f7911805127c94cf51f89ab14096c6f91ffdcacf993d2da6082b2142a2523", size = 234591, upload-time = "2026-02-04T21:52:29.821Z" }, + { url = "https://files.pythonhosted.org/packages/f7/ad/90a110538696b12b39fd8758a06d70ded899308198ad2305ac68e361126e/pywinpty-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:3f07a6cf1c1d470d284e614733c3d0f726d2c85e78508ea10a403140c3c0c18a", size = 2112360, upload-time = "2026-02-04T21:55:33.397Z" }, + { url = "https://files.pythonhosted.org/packages/44/0f/7ffa221757a220402bc79fda44044c3f2cc57338d878ab7d622add6f4581/pywinpty-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:15c7c0b6f8e9d87aabbaff76468dabf6e6121332c40fc1d83548d02a9d6a3759", size = 233107, upload-time = "2026-02-04T21:51:45.455Z" }, + { url = "https://files.pythonhosted.org/packages/28/88/2ff917caff61e55f38bcdb27de06ee30597881b2cae44fbba7627be015c4/pywinpty-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:d4b6b7b0fe0cdcd02e956bd57cfe9f4e5a06514eecf3b5ae174da4f951b58be9", size = 2113282, upload-time = "2026-02-04T21:52:08.188Z" }, + { url = "https://files.pythonhosted.org/packages/63/32/40a775343ace542cc43ece3f1d1fce454021521ecac41c4c4573081c2336/pywinpty-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:34789d685fc0d547ce0c8a65e5a70e56f77d732fa6e03c8f74fefb8cbb252019", size = 234207, upload-time = "2026-02-04T21:51:58.687Z" }, + { url = "https://files.pythonhosted.org/packages/8d/54/5d5e52f4cb75028104ca6faf36c10f9692389b1986d34471663b4ebebd6d/pywinpty-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:0c37e224a47a971d1a6e08649a1714dac4f63c11920780977829ed5c8cadead1", size = 2112910, upload-time = "2026-02-04T21:52:30.976Z" }, + { url = "https://files.pythonhosted.org/packages/0a/44/dcd184824e21d4620b06c7db9fbb15c3ad0a0f1fa2e6de79969fb82647ec/pywinpty-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c4e9c3dff7d86ba81937438d5819f19f385a39d8f592d4e8af67148ceb4f6ab5", size = 233425, upload-time = "2026-02-04T21:51:56.754Z" }, +] + [[package]] name = "pyyaml" version = "6.0.3" @@ -4758,9 +5299,9 @@ name = "referencing" version = "0.37.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "attrs", marker = "sys_platform == 'linux'" }, - { name = "rpds-py", marker = "sys_platform == 'linux'" }, - { name = "typing-extensions", marker = "python_full_version < '3.13' and sys_platform == 'linux'" }, + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } wheels = [ @@ -4889,6 +5430,39 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/be/e7/99fc91c0f99f69d7d43e1db0a6f6cb8273ffc02111539bfc1fee43749bad/rerun_sdk-0.26.2-cp39-abi3-win_amd64.whl", hash = "sha256:a493ad6c8357022cba2ca6f8954a81d0faf984b0b22154eb1d976bfc7649df63", size = 84267089, upload-time = "2025-10-27T11:34:32.023Z" }, ] +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/ea/a9387748e2d111c3c2b275ba970b735e04e15cdb1eb30693b6b5708c4dbd/rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b", size = 5513, upload-time = "2021-05-12T16:37:54.178Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/44/4e421b96b67b2daff264473f7465db72fbdf36a07e05494f50300cc7b0c6/rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa", size = 3490, upload-time = "2021-05-12T16:37:52.536Z" }, +] + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/88/f270de456dd7d11dcc808abfa291ecdd3f45ff44e3b549ffa01b126464d0/rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055", size = 6760, upload-time = "2019-10-28T16:00:19.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/51/17023c0f8f1869d8806b979a2bffa3f861f26a3f1a66b094288323fba52f/rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9", size = 4242, upload-time = "2019-10-28T16:00:13.976Z" }, +] + +[[package]] +name = "rfc3987-syntax" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lark" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2c/06/37c1a5557acf449e8e406a830a05bf885ac47d33270aec454ef78675008d/rfc3987_syntax-1.1.0.tar.gz", hash = "sha256:717a62cbf33cffdd16dfa3a497d81ce48a660ea691b1ddd7be710c22f00b4a0d", size = 14239, upload-time = "2025-07-18T01:05:05.015Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/71/44ce230e1b7fadd372515a97e32a83011f906ddded8d03e3c6aafbdedbb7/rfc3987_syntax-1.1.0-py3-none-any.whl", hash = "sha256:6c3d97604e4c5ce9f714898e05401a0445a641cfa276432b0a648c80856f6a3f", size = 8046, upload-time = "2025-07-18T01:05:03.843Z" }, +] + [[package]] name = "rhoban-cmeel-jsoncpp" version = "1.9.4.9" @@ -4972,6 +5546,8 @@ version = "0.30.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/20/af/3f2f423103f1113b36230496629986e0ef7e199d2aa8392452b484b38ced/rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84", size = 69469, upload-time = "2025-11-30T20:24:38.837Z" } wheels = [ + { url = "https://files.pythonhosted.org/packages/03/e7/98a2f4ac921d82f33e03f3835f5bf3a4a40aa1bfdc57975e74a97b2b4bdd/rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad", size = 375086, upload-time = "2025-11-30T20:22:17.93Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a1/bca7fd3d452b272e13335db8d6b0b3ecde0f90ad6f16f3328c6fb150c889/rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05", size = 359053, upload-time = "2025-11-30T20:22:19.297Z" }, { url = "https://files.pythonhosted.org/packages/65/1c/ae157e83a6357eceff62ba7e52113e3ec4834a84cfe07fa4b0757a7d105f/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28", size = 390763, upload-time = "2025-11-30T20:22:21.661Z" }, { url = "https://files.pythonhosted.org/packages/d4/36/eb2eb8515e2ad24c0bd43c3ee9cd74c33f7ca6430755ccdb240fd3144c44/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd", size = 408951, upload-time = "2025-11-30T20:22:23.408Z" }, { url = "https://files.pythonhosted.org/packages/d6/65/ad8dc1784a331fabbd740ef6f71ce2198c7ed0890dab595adb9ea2d775a1/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f", size = 514622, upload-time = "2025-11-30T20:22:25.16Z" }, @@ -4982,6 +5558,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/00/2b/e59e58c544dc9bd8bd8384ecdb8ea91f6727f0e37a7131baeff8d6f51661/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5", size = 573289, upload-time = "2025-11-30T20:22:32.997Z" }, { url = "https://files.pythonhosted.org/packages/da/3e/a18e6f5b460893172a7d6a680e86d3b6bc87a54c1f0b03446a3c8c7b588f/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e", size = 599737, upload-time = "2025-11-30T20:22:34.419Z" }, { url = "https://files.pythonhosted.org/packages/5c/e2/714694e4b87b85a18e2c243614974413c60aa107fd815b8cbc42b873d1d7/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394", size = 563120, upload-time = "2025-11-30T20:22:35.903Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ab/d5d5e3bcedb0a77f4f613706b750e50a5a3ba1c15ccd3665ecc636c968fd/rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf", size = 223782, upload-time = "2025-11-30T20:22:37.271Z" }, + { url = "https://files.pythonhosted.org/packages/39/3b/f786af9957306fdc38a74cef405b7b93180f481fb48453a114bb6465744a/rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b", size = 240463, upload-time = "2025-11-30T20:22:39.021Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d2/b91dc748126c1559042cfe41990deb92c4ee3e2b415f6b5234969ffaf0cc/rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e", size = 230868, upload-time = "2025-11-30T20:22:40.493Z" }, + { url = "https://files.pythonhosted.org/packages/ed/dc/d61221eb88ff410de3c49143407f6f3147acf2538c86f2ab7ce65ae7d5f9/rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2", size = 374887, upload-time = "2025-11-30T20:22:41.812Z" }, + { url = "https://files.pythonhosted.org/packages/fd/32/55fb50ae104061dbc564ef15cc43c013dc4a9f4527a1f4d99baddf56fe5f/rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8", size = 358904, upload-time = "2025-11-30T20:22:43.479Z" }, { url = "https://files.pythonhosted.org/packages/58/70/faed8186300e3b9bdd138d0273109784eea2396c68458ed580f885dfe7ad/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4", size = 389945, upload-time = "2025-11-30T20:22:44.819Z" }, { url = "https://files.pythonhosted.org/packages/bd/a8/073cac3ed2c6387df38f71296d002ab43496a96b92c823e76f46b8af0543/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136", size = 407783, upload-time = "2025-11-30T20:22:46.103Z" }, { url = "https://files.pythonhosted.org/packages/77/57/5999eb8c58671f1c11eba084115e77a8899d6e694d2a18f69f0ba471ec8b/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7", size = 515021, upload-time = "2025-11-30T20:22:47.458Z" }, @@ -4992,6 +5573,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/06/c1/3088fc04b6624eb12a57eb814f0d4997a44b0d208d6cace713033ff1a6ba/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7", size = 572731, upload-time = "2025-11-30T20:22:54.778Z" }, { url = "https://files.pythonhosted.org/packages/d8/42/c612a833183b39774e8ac8fecae81263a68b9583ee343db33ab571a7ce55/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31", size = 599027, upload-time = "2025-11-30T20:22:56.212Z" }, { url = "https://files.pythonhosted.org/packages/5f/60/525a50f45b01d70005403ae0e25f43c0384369ad24ffe46e8d9068b50086/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95", size = 563020, upload-time = "2025-11-30T20:22:58.2Z" }, + { url = "https://files.pythonhosted.org/packages/0b/5d/47c4655e9bcd5ca907148535c10e7d489044243cc9941c16ed7cd53be91d/rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d", size = 223139, upload-time = "2025-11-30T20:23:00.209Z" }, + { url = "https://files.pythonhosted.org/packages/f2/e1/485132437d20aa4d3e1d8b3fb5a5e65aa8139f1e097080c2a8443201742c/rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15", size = 240224, upload-time = "2025-11-30T20:23:02.008Z" }, + { url = "https://files.pythonhosted.org/packages/24/95/ffd128ed1146a153d928617b0ef673960130be0009c77d8fbf0abe306713/rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1", size = 230645, upload-time = "2025-11-30T20:23:03.43Z" }, + { url = "https://files.pythonhosted.org/packages/ff/1b/b10de890a0def2a319a2626334a7f0ae388215eb60914dbac8a3bae54435/rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a", size = 364443, upload-time = "2025-11-30T20:23:04.878Z" }, + { url = "https://files.pythonhosted.org/packages/0d/bf/27e39f5971dc4f305a4fb9c672ca06f290f7c4e261c568f3dea16a410d47/rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e", size = 353375, upload-time = "2025-11-30T20:23:06.342Z" }, { url = "https://files.pythonhosted.org/packages/40/58/442ada3bba6e8e6615fc00483135c14a7538d2ffac30e2d933ccf6852232/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000", size = 383850, upload-time = "2025-11-30T20:23:07.825Z" }, { url = "https://files.pythonhosted.org/packages/14/14/f59b0127409a33c6ef6f5c1ebd5ad8e32d7861c9c7adfa9a624fc3889f6c/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db", size = 392812, upload-time = "2025-11-30T20:23:09.228Z" }, { url = "https://files.pythonhosted.org/packages/b3/66/e0be3e162ac299b3a22527e8913767d869e6cc75c46bd844aa43fb81ab62/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2", size = 517841, upload-time = "2025-11-30T20:23:11.186Z" }, @@ -5002,6 +5588,10 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a3/31/622a86cdc0c45d6df0e9ccb6becdba5074735e7033c20e401a6d9d0e2ca0/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94", size = 565790, upload-time = "2025-11-30T20:23:19.029Z" }, { url = "https://files.pythonhosted.org/packages/1c/5d/15bbf0fb4a3f58a3b1c67855ec1efcc4ceaef4e86644665fff03e1b66d8d/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08", size = 590217, upload-time = "2025-11-30T20:23:20.885Z" }, { url = "https://files.pythonhosted.org/packages/6d/61/21b8c41f68e60c8cc3b2e25644f0e3681926020f11d06ab0b78e3c6bbff1/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27", size = 555806, upload-time = "2025-11-30T20:23:22.488Z" }, + { url = "https://files.pythonhosted.org/packages/f9/39/7e067bb06c31de48de3eb200f9fc7c58982a4d3db44b07e73963e10d3be9/rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6", size = 211341, upload-time = "2025-11-30T20:23:24.449Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4d/222ef0b46443cf4cf46764d9c630f3fe4abaa7245be9417e56e9f52b8f65/rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d", size = 225768, upload-time = "2025-11-30T20:23:25.908Z" }, + { url = "https://files.pythonhosted.org/packages/86/81/dad16382ebbd3d0e0328776d8fd7ca94220e4fa0798d1dc5e7da48cb3201/rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0", size = 362099, upload-time = "2025-11-30T20:23:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/2b/60/19f7884db5d5603edf3c6bce35408f45ad3e97e10007df0e17dd57af18f8/rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be", size = 353192, upload-time = "2025-11-30T20:23:29.151Z" }, { url = "https://files.pythonhosted.org/packages/bf/c4/76eb0e1e72d1a9c4703c69607cec123c29028bff28ce41588792417098ac/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f", size = 384080, upload-time = "2025-11-30T20:23:30.785Z" }, { url = "https://files.pythonhosted.org/packages/72/87/87ea665e92f3298d1b26d78814721dc39ed8d2c74b86e83348d6b48a6f31/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f", size = 394841, upload-time = "2025-11-30T20:23:32.209Z" }, { url = "https://files.pythonhosted.org/packages/77/ad/7783a89ca0587c15dcbf139b4a8364a872a25f861bdb88ed99f9b0dec985/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87", size = 516670, upload-time = "2025-11-30T20:23:33.742Z" }, @@ -5012,6 +5602,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/70/ea/caa143cf6b772f823bc7929a45da1fa83569ee49b11d18d0ada7f5ee6fd6/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65", size = 565606, upload-time = "2025-11-30T20:23:42.186Z" }, { url = "https://files.pythonhosted.org/packages/64/91/ac20ba2d69303f961ad8cf55bf7dbdb4763f627291ba3d0d7d67333cced9/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f", size = 591126, upload-time = "2025-11-30T20:23:44.086Z" }, { url = "https://files.pythonhosted.org/packages/21/20/7ff5f3c8b00c8a95f75985128c26ba44503fb35b8e0259d812766ea966c7/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53", size = 553371, upload-time = "2025-11-30T20:23:46.004Z" }, + { url = "https://files.pythonhosted.org/packages/72/c7/81dadd7b27c8ee391c132a6b192111ca58d866577ce2d9b0ca157552cce0/rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed", size = 215298, upload-time = "2025-11-30T20:23:47.696Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d2/1aaac33287e8cfb07aab2e6b8ac1deca62f6f65411344f1433c55e6f3eb8/rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950", size = 228604, upload-time = "2025-11-30T20:23:49.501Z" }, + { url = "https://files.pythonhosted.org/packages/e8/95/ab005315818cc519ad074cb7784dae60d939163108bd2b394e60dc7b5461/rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6", size = 222391, upload-time = "2025-11-30T20:23:50.96Z" }, + { url = "https://files.pythonhosted.org/packages/9e/68/154fe0194d83b973cdedcdcc88947a2752411165930182ae41d983dcefa6/rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb", size = 364868, upload-time = "2025-11-30T20:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/83/69/8bbc8b07ec854d92a8b75668c24d2abcb1719ebf890f5604c61c9369a16f/rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8", size = 353747, upload-time = "2025-11-30T20:23:54.036Z" }, { url = "https://files.pythonhosted.org/packages/ab/00/ba2e50183dbd9abcce9497fa5149c62b4ff3e22d338a30d690f9af970561/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7", size = 383795, upload-time = "2025-11-30T20:23:55.556Z" }, { url = "https://files.pythonhosted.org/packages/05/6f/86f0272b84926bcb0e4c972262f54223e8ecc556b3224d281e6598fc9268/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898", size = 393330, upload-time = "2025-11-30T20:23:57.033Z" }, { url = "https://files.pythonhosted.org/packages/cb/e9/0e02bb2e6dc63d212641da45df2b0bf29699d01715913e0d0f017ee29438/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e", size = 518194, upload-time = "2025-11-30T20:23:58.637Z" }, @@ -5022,31 +5617,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8c/28/882e72b5b3e6f718d5453bd4d0d9cf8df36fddeb4ddbbab17869d5868616/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404", size = 565630, upload-time = "2025-11-30T20:24:06.878Z" }, { url = "https://files.pythonhosted.org/packages/3b/97/04a65539c17692de5b85c6e293520fd01317fd878ea1995f0367d4532fb1/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856", size = 591148, upload-time = "2025-11-30T20:24:08.445Z" }, { url = "https://files.pythonhosted.org/packages/85/70/92482ccffb96f5441aab93e26c4d66489eb599efdcf96fad90c14bbfb976/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40", size = 556030, upload-time = "2025-11-30T20:24:10.956Z" }, + { url = "https://files.pythonhosted.org/packages/20/53/7c7e784abfa500a2b6b583b147ee4bb5a2b3747a9166bab52fec4b5b5e7d/rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0", size = 211570, upload-time = "2025-11-30T20:24:12.735Z" }, + { url = "https://files.pythonhosted.org/packages/d0/02/fa464cdfbe6b26e0600b62c528b72d8608f5cc49f96b8d6e38c95d60c676/rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3", size = 226532, upload-time = "2025-11-30T20:24:14.634Z" }, ] [[package]] name = "ruff" -version = "0.15.10" +version = "0.15.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/d9/aa3f7d59a10ef6b14fe3431706f854dbf03c5976be614a9796d36326810c/ruff-0.15.10.tar.gz", hash = "sha256:d1f86e67ebfdef88e00faefa1552b5e510e1d35f3be7d423dc7e84e63788c94e", size = 4631728, upload-time = "2026-04-09T14:06:09.884Z" } +sdist = { url = "https://files.pythonhosted.org/packages/99/43/3291f1cc9106f4c63bdce7a8d0df5047fe8422a75b091c16b5e9355e0b11/ruff-0.15.12.tar.gz", hash = "sha256:ecea26adb26b4232c0c2ca19ccbc0083a68344180bba2a600605538ce51a40a6", size = 4643852, upload-time = "2026-04-24T18:17:14.305Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/eb/00/a1c2fdc9939b2c03691edbda290afcd297f1f389196172826b03d6b6a595/ruff-0.15.10-py3-none-linux_armv6l.whl", hash = "sha256:0744e31482f8f7d0d10a11fcbf897af272fefdfcb10f5af907b18c2813ff4d5f", size = 10563362, upload-time = "2026-04-09T14:06:21.189Z" }, - { url = "https://files.pythonhosted.org/packages/5c/15/006990029aea0bebe9d33c73c3e28c80c391ebdba408d1b08496f00d422d/ruff-0.15.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b1e7c16ea0ff5a53b7c2df52d947e685973049be1cdfe2b59a9c43601897b22e", size = 10951122, upload-time = "2026-04-09T14:06:02.236Z" }, - { url = "https://files.pythonhosted.org/packages/f2/c0/4ac978fe874d0618c7da647862afe697b281c2806f13ce904ad652fa87e4/ruff-0.15.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:93cc06a19e5155b4441dd72808fdf84290d84ad8a39ca3b0f994363ade4cebb1", size = 10314005, upload-time = "2026-04-09T14:06:00.026Z" }, - { url = "https://files.pythonhosted.org/packages/da/73/c209138a5c98c0d321266372fc4e33ad43d506d7e5dd817dd89b60a8548f/ruff-0.15.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e1dd04312997c99ea6965df66a14fb4f03ba978564574ffc68b0d61fd3989e", size = 10643450, upload-time = "2026-04-09T14:05:42.137Z" }, - { url = "https://files.pythonhosted.org/packages/ec/76/0deec355d8ec10709653635b1f90856735302cb8e149acfdf6f82a5feb70/ruff-0.15.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8154d43684e4333360fedd11aaa40b1b08a4e37d8ffa9d95fee6fa5b37b6fab1", size = 10379597, upload-time = "2026-04-09T14:05:49.984Z" }, - { url = "https://files.pythonhosted.org/packages/dc/be/86bba8fc8798c081e28a4b3bb6d143ccad3fd5f6f024f02002b8f08a9fa3/ruff-0.15.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ab88715f3a6deb6bde6c227f3a123410bec7b855c3ae331b4c006189e895cef", size = 11146645, upload-time = "2026-04-09T14:06:12.246Z" }, - { url = "https://files.pythonhosted.org/packages/a8/89/140025e65911b281c57be1d385ba1d932c2366ca88ae6663685aed8d4881/ruff-0.15.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a768ff5969b4f44c349d48edf4ab4f91eddb27fd9d77799598e130fb628aa158", size = 12030289, upload-time = "2026-04-09T14:06:04.776Z" }, - { url = "https://files.pythonhosted.org/packages/88/de/ddacca9545a5e01332567db01d44bd8cf725f2db3b3d61a80550b48308ea/ruff-0.15.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ee3ef42dab7078bda5ff6a1bcba8539e9857deb447132ad5566a038674540d0", size = 11496266, upload-time = "2026-04-09T14:05:55.485Z" }, - { url = "https://files.pythonhosted.org/packages/bc/bb/7ddb00a83760ff4a83c4e2fc231fd63937cc7317c10c82f583302e0f6586/ruff-0.15.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51cb8cc943e891ba99989dd92d61e29b1d231e14811db9be6440ecf25d5c1609", size = 11256418, upload-time = "2026-04-09T14:05:57.69Z" }, - { url = "https://files.pythonhosted.org/packages/dc/8d/55de0d35aacf6cd50b6ee91ee0f291672080021896543776f4170fc5c454/ruff-0.15.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:e59c9bdc056a320fb9ea1700a8d591718b8faf78af065484e801258d3a76bc3f", size = 11288416, upload-time = "2026-04-09T14:05:44.695Z" }, - { url = "https://files.pythonhosted.org/packages/68/cf/9438b1a27426ec46a80e0a718093c7f958ef72f43eb3111862949ead3cc1/ruff-0.15.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:136c00ca2f47b0018b073f28cb5c1506642a830ea941a60354b0e8bc8076b151", size = 10621053, upload-time = "2026-04-09T14:05:52.782Z" }, - { url = "https://files.pythonhosted.org/packages/4c/50/e29be6e2c135e9cd4cb15fbade49d6a2717e009dff3766dd080fcb82e251/ruff-0.15.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8b80a2f3c9c8a950d6237f2ca12b206bccff626139be9fa005f14feb881a1ae8", size = 10378302, upload-time = "2026-04-09T14:06:14.361Z" }, - { url = "https://files.pythonhosted.org/packages/18/2f/e0b36a6f99c51bb89f3a30239bc7bf97e87a37ae80aa2d6542d6e5150364/ruff-0.15.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e3e53c588164dc025b671c9df2462429d60357ea91af7e92e9d56c565a9f1b07", size = 10850074, upload-time = "2026-04-09T14:06:16.581Z" }, - { url = "https://files.pythonhosted.org/packages/11/08/874da392558ce087a0f9b709dc6ec0d60cbc694c1c772dab8d5f31efe8cb/ruff-0.15.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b0c52744cf9f143a393e284125d2576140b68264a93c6716464e129a3e9adb48", size = 11358051, upload-time = "2026-04-09T14:06:18.948Z" }, - { url = "https://files.pythonhosted.org/packages/e4/46/602938f030adfa043e67112b73821024dc79f3ab4df5474c25fa4c1d2d14/ruff-0.15.10-py3-none-win32.whl", hash = "sha256:d4272e87e801e9a27a2e8df7b21011c909d9ddd82f4f3281d269b6ba19789ca5", size = 10588964, upload-time = "2026-04-09T14:06:07.14Z" }, - { url = "https://files.pythonhosted.org/packages/25/b6/261225b875d7a13b33a6d02508c39c28450b2041bb01d0f7f1a83d569512/ruff-0.15.10-py3-none-win_amd64.whl", hash = "sha256:28cb32d53203242d403d819fd6983152489b12e4a3ae44993543d6fe62ab42ed", size = 11745044, upload-time = "2026-04-09T14:05:39.473Z" }, - { url = "https://files.pythonhosted.org/packages/58/ed/dea90a65b7d9e69888890fb14c90d7f51bf0c1e82ad800aeb0160e4bacfd/ruff-0.15.10-py3-none-win_arm64.whl", hash = "sha256:601d1610a9e1f1c2165a4f561eeaa2e2ea1e97f3287c5aa258d3dab8b57c6188", size = 11035607, upload-time = "2026-04-09T14:05:47.593Z" }, + { url = "https://files.pythonhosted.org/packages/c3/6e/e78ffb61d4686f3d96ba3df2c801161843746dcbcbb17a1e927d4829312b/ruff-0.15.12-py3-none-linux_armv6l.whl", hash = "sha256:f86f176e188e94d6bdbc09f09bfd9dc729059ad93d0e7390b5a73efe19f8861c", size = 10640713, upload-time = "2026-04-24T18:17:22.841Z" }, + { url = "https://files.pythonhosted.org/packages/ae/08/a317bc231fb9e7b93e4ef3089501e51922ff88d6936ce5cf870c4fe55419/ruff-0.15.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:e3bcd123364c3770b8e1b7baaf343cc99a35f197c5c6e8af79015c666c423a6c", size = 11069267, upload-time = "2026-04-24T18:17:30.105Z" }, + { url = "https://files.pythonhosted.org/packages/aa/a4/f828e9718d3dce1f5f11c39c4f65afd32783c8b2aebb2e3d259e492c47bd/ruff-0.15.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fe87510d000220aa1ed530d4448a7c696a0cae1213e5ec30e5874287b66557b5", size = 10397182, upload-time = "2026-04-24T18:17:07.177Z" }, + { url = "https://files.pythonhosted.org/packages/71/e0/3310fc6d1b5e1fdea22bf3b1b807c7e187b581021b0d7d4514cccdb5fb71/ruff-0.15.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84a1630093121375a3e2a95b4a6dc7b59e2b4ee76216e32d81aae550a832d002", size = 10758012, upload-time = "2026-04-24T18:16:55.759Z" }, + { url = "https://files.pythonhosted.org/packages/11/c1/a606911aee04c324ddaa883ae418f3569792fd3c4a10c50e0dd0a2311e1e/ruff-0.15.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fb129f40f114f089ebe0ca56c0d251cf2061b17651d464bb6478dc01e69f11f5", size = 10447479, upload-time = "2026-04-24T18:16:51.677Z" }, + { url = "https://files.pythonhosted.org/packages/9d/68/4201e8444f0894f21ab4aeeaee68aa4f10b51613514a20d80bd628d57e88/ruff-0.15.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0c862b172d695db7598426b8af465e7e9ac00a3ea2a3630ee67eb82e366aaa6", size = 11234040, upload-time = "2026-04-24T18:17:16.529Z" }, + { url = "https://files.pythonhosted.org/packages/34/ff/8a6d6cf4ccc23fd67060874e832c18919d1557a0611ebef03fdb01fff11e/ruff-0.15.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2849ea9f3484c3aca43a82f484210370319e7170df4dfe4843395ddf6c57bc33", size = 12087377, upload-time = "2026-04-24T18:17:04.944Z" }, + { url = "https://files.pythonhosted.org/packages/85/f6/c669cf73f5152f623d34e69866a46d5e6185816b19fcd5b6dd8a2d299922/ruff-0.15.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9e77c7e51c07fe396826d5969a5b846d9cd4c402535835fb6e21ce8b28fef847", size = 11367784, upload-time = "2026-04-24T18:17:25.409Z" }, + { url = "https://files.pythonhosted.org/packages/e8/39/c61d193b8a1daaa8977f7dea9e8d8ba866e02ea7b65d32f6861693aa4c12/ruff-0.15.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b2f4f2f3b1026b5fb449b467d9264bf22067b600f7b6f41fc5958909f449d0", size = 11344088, upload-time = "2026-04-24T18:17:12.258Z" }, + { url = "https://files.pythonhosted.org/packages/c2/8d/49afab3645e31e12c590acb6d3b5b69d7aab5b81926dbaf7461f9441f37a/ruff-0.15.12-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9ba3b8f1afd7e2e43d8943e55f249e13f9682fde09711644a6e7290eb4f3e339", size = 11271770, upload-time = "2026-04-24T18:17:02.457Z" }, + { url = "https://files.pythonhosted.org/packages/46/06/33f41fe94403e2b755481cdfb9b7ef3e4e0ed031c4581124658d935d52b4/ruff-0.15.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e852ba9fdc890655e1d78f2df1499efbe0e54126bd405362154a75e2bde159c5", size = 10719355, upload-time = "2026-04-24T18:17:27.648Z" }, + { url = "https://files.pythonhosted.org/packages/0d/59/18aa4e014debbf559670e4048e39260a85c7fcee84acfd761ac01e7b8d35/ruff-0.15.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:dd8aed930da53780d22fc70bdf84452c843cf64f8cb4eb38984319c24c5cd5fd", size = 10462758, upload-time = "2026-04-24T18:17:32.347Z" }, + { url = "https://files.pythonhosted.org/packages/25/e7/cc9f16fd0f3b5fddcbd7ec3d6ae30c8f3fde1047f32a4093a98d633c6570/ruff-0.15.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:01da3988d225628b709493d7dc67c3b9b12c0210016b08690ef9bd27970b262b", size = 10953498, upload-time = "2026-04-24T18:17:20.674Z" }, + { url = "https://files.pythonhosted.org/packages/72/7a/a9ba7f98c7a575978698f4230c5e8cc54bbc761af34f560818f933dafa0c/ruff-0.15.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9cae0f92bd5700d1213188b31cd3bdd2b315361296d10b96b8e2337d3d11f53e", size = 11447765, upload-time = "2026-04-24T18:17:09.755Z" }, + { url = "https://files.pythonhosted.org/packages/ea/f9/0ae446942c846b8266059ad8a30702a35afae55f5cdc54c5adf8d7afdc27/ruff-0.15.12-py3-none-win32.whl", hash = "sha256:d0185894e038d7043ba8fd6aee7499ece6462dc0ea9f1e260c7451807c714c20", size = 10657277, upload-time = "2026-04-24T18:17:18.591Z" }, + { url = "https://files.pythonhosted.org/packages/33/f1/9614e03e1cdcbf9437570b5400ced8a720b5db22b28d8e0f1bda429f660d/ruff-0.15.12-py3-none-win_amd64.whl", hash = "sha256:c87a162d61ab3adca47c03f7f717c68672edec7d1b5499e652331780fe74950d", size = 11837758, upload-time = "2026-04-24T18:17:00.113Z" }, + { url = "https://files.pythonhosted.org/packages/c0/98/6beb4b351e472e5f4c4613f7c35a5290b8be2497e183825310c4c3a3984b/ruff-0.15.12-py3-none-win_arm64.whl", hash = "sha256:a538f7a82d061cee7be55542aca1d86d1393d55d81d4fcc314370f4340930d4f", size = 11120821, upload-time = "2026-04-24T18:16:57.979Z" }, ] [[package]] @@ -5161,17 +5758,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/07/39/338d9219c4e87f3e708f18857ecd24d22a0c3094752393319553096b98af/scipy-1.17.1-cp314-cp314t-win_arm64.whl", hash = "sha256:200e1050faffacc162be6a486a984a0497866ec54149a01270adc8a59b7c7d21", size = 25489165, upload-time = "2026-02-23T00:22:29.563Z" }, ] +[[package]] +name = "send2trash" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c5/f0/184b4b5f8d00f2a92cf96eec8967a3d550b52cf94362dad1100df9e48d57/send2trash-2.1.0.tar.gz", hash = "sha256:1c72b39f09457db3c05ce1d19158c2cbef4c32b8bedd02c155e49282b7ea7459", size = 17255, upload-time = "2026-01-14T06:27:36.056Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/78/504fdd027da3b84ff1aecd9f6957e65f35134534ccc6da8628eb71e76d3f/send2trash-2.1.0-py3-none-any.whl", hash = "sha256:0da2f112e6d6bb22de6aa6daa7e144831a4febf2a87261451c4ad849fe9a873c", size = 17610, upload-time = "2026-01-14T06:27:35.218Z" }, +] + [[package]] name = "sentry-sdk" -version = "2.57.0" +version = "2.58.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4f/87/46c0406d8b5ddd026f73adaf5ab75ce144219c41a4830b52df4b9ab55f7f/sentry_sdk-2.57.0.tar.gz", hash = "sha256:4be8d1e71c32fb27f79c577a337ac8912137bba4bcbc64a4ec1da4d6d8dc5199", size = 435288, upload-time = "2026-03-31T09:39:29.264Z" } +sdist = { url = "https://files.pythonhosted.org/packages/26/b3/fb8291170d0e844173164709fc0fa0c221ed75a5da740c8746f2a83b4eb1/sentry_sdk-2.58.0.tar.gz", hash = "sha256:c1144d947352d54e5b7daa63596d9f848adf684989c06c4f5a659f0c85a18f6f", size = 438764, upload-time = "2026-04-13T17:23:26.265Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/64/982e07b93219cb52e1cca5d272cb579e2f3eb001956c9e7a9a6d106c9473/sentry_sdk-2.57.0-py2.py3-none-any.whl", hash = "sha256:812c8bf5ff3d2f0e89c82f5ce80ab3a6423e102729c4706af7413fd1eb480585", size = 456489, upload-time = "2026-03-31T09:39:27.524Z" }, + { url = "https://files.pythonhosted.org/packages/fa/eb/d875669993b762556ae8b2efd86219943b4c0864d22204d622a9aee3052b/sentry_sdk-2.58.0-py2.py3-none-any.whl", hash = "sha256:688d1c704ddecf382ea3326f21a67453d4caa95592d722b7c780a36a9d23109e", size = 460919, upload-time = "2026-04-13T17:23:24.675Z" }, ] [[package]] @@ -5261,6 +5867,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c1/d4/59e74daffcb57a07668852eeeb6035af9f32cbfd7a1d2511f17d2fe6a738/smmap-5.0.3-py3-none-any.whl", hash = "sha256:c106e05d5a61449cf6ba9a1e650227ecfb141590d2a98412103ff35d89fc7b2f", size = 24390, upload-time = "2026-03-09T03:43:24.361Z" }, ] +[[package]] +name = "soupsieve" +version = "2.8.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7b/ae/2d9c981590ed9999a0d91755b47fc74f74de286b0f5cee14c9269041e6c4/soupsieve-2.8.3.tar.gz", hash = "sha256:3267f1eeea4251fb42728b6dfb746edc9acaffc4a45b27e19450b676586e8349", size = 118627, upload-time = "2026-01-20T04:27:02.457Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/2c/1462b1d0a634697ae9e55b3cecdcb64788e8b7d63f54d923fcd0bb140aed/soupsieve-2.8.3-py3-none-any.whl", hash = "sha256:ed64f2ba4eebeab06cc4962affce381647455978ffc1e36bb79a545b91f45a95", size = 37016, upload-time = "2026-01-20T04:27:01.012Z" }, +] + [[package]] name = "stack-data" version = "0.6.3" @@ -5302,7 +5917,7 @@ wheels = [ [[package]] name = "teleop" -version = "0.1.4" +version = "0.1.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fastapi" }, @@ -5313,9 +5928,9 @@ dependencies = [ { name = "uvicorn", extra = ["standard"] }, { name = "websocket-client" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ec/8c/902ef4c0fa148325e6b19a5af63c3aac5927c67551efabcd5732fc446c6d/teleop-0.1.4.tar.gz", hash = "sha256:b5cedcff336c612a3f7e6f93e379e24979ed42070903b722f5fefe07c8fca3ce", size = 44051, upload-time = "2025-12-08T10:49:45.823Z" } +sdist = { url = "https://files.pythonhosted.org/packages/87/dc/312c19122c8e64fcff16dc8a74659b84ba8a7bcd3ef7b3c330cfc65a2a29/teleop-0.1.5.tar.gz", hash = "sha256:9f5367b167e0f67abe818f346c467671bd2c1ad653df604bdfb2fa69b2937da9", size = 44173, upload-time = "2026-04-19T21:17:42.795Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/9c/217176617df23f634b0388111adbeb17ccb0409072639a97512e6c1c818d/teleop-0.1.4-py3-none-any.whl", hash = "sha256:6b8013947b27b89dbce50f9231a57d29f2e59ea864807b1ce6611ea3ad1694f4", size = 42332, upload-time = "2025-12-08T10:49:44.531Z" }, + { url = "https://files.pythonhosted.org/packages/f2/d1/45c79fcbf2551f2035c375e81d560c4ac46a5bbdb1622583b559eedcfc4e/teleop-0.1.5-py3-none-any.whl", hash = "sha256:75c3e63bb9eed1ea8ca32b48086cea45fa5ae3eb022dd0dcf0d615cf0b0d58dc", size = 42380, upload-time = "2026-04-19T21:17:41.386Z" }, ] [[package]] @@ -5370,6 +5985,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734, upload-time = "2025-12-29T12:55:20.718Z" }, ] +[[package]] +name = "terminado" +version = "0.18.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess", marker = "os_name != 'nt'" }, + { name = "pywinpty", marker = "os_name == 'nt' and sys_platform != 'linux'" }, + { name = "tornado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/11/965c6fd8e5cc254f1fe142d547387da17a8ebfd75a3455f637c663fb38a0/terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e", size = 32701, upload-time = "2024-03-12T14:34:39.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/9e/2064975477fdc887e47ad42157e214526dcad8f317a948dee17e1659a62f/terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0", size = 14154, upload-time = "2024-03-12T14:34:36.569Z" }, +] + [[package]] name = "thop" version = "0.1.1.post2209072238" @@ -5409,6 +6038,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6f/e9/bebf3d50e3fc847378988235f87c37ad3ac26d386041ab915d15e92025cd/timm-1.0.26-py3-none-any.whl", hash = "sha256:985c330de5ccc3a2aa0224eb7272e6a336084702390bb7e3801f3c91603d3683", size = 2568766, upload-time = "2026-03-23T18:12:08.062Z" }, ] +[[package]] +name = "tinycss2" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7a/fd/7a5ee21fd08ff70d3d33a5781c255cbe779659bd03278feb98b19ee550f4/tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7", size = 87085, upload-time = "2024-10-24T14:58:29.895Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610, upload-time = "2024-10-24T14:58:28.029Z" }, +] + [[package]] name = "tokenizers" version = "0.22.2" @@ -5646,7 +6287,7 @@ wheels = [ [[package]] name = "typer" -version = "0.24.1" +version = "0.25.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc" }, @@ -5654,9 +6295,9 @@ dependencies = [ { name = "rich" }, { name = "shellingham" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f5/24/cb09efec5cc954f7f9b930bf8279447d24618bb6758d4f6adf2574c41780/typer-0.24.1.tar.gz", hash = "sha256:e39b4732d65fbdcde189ae76cf7cd48aeae72919dea1fdfc16593be016256b45", size = 118613, upload-time = "2026-02-21T16:54:40.609Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7b/27/ede8cec7596e0041ba7e7b80b47d132562f56ff454313a16f6084e555c9f/typer-0.25.0.tar.gz", hash = "sha256:123eaf9f19bb40fd268310e12a542c0c6b4fab9c98d9d23342a01ff95e3ce930", size = 120150, upload-time = "2026-04-26T08:46:14.767Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4a/91/48db081e7a63bb37284f9fbcefda7c44c277b18b0e13fbc36ea2335b71e6/typer-0.24.1-py3-none-any.whl", hash = "sha256:112c1f0ce578bfb4cab9ffdabc68f031416ebcc216536611ba21f04e9aa84c9e", size = 56085, upload-time = "2026-02-21T16:54:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/9a/72/193d4e586ec5a4db834a36bbeb47641a62f951f114ffd0fe5b1b46e8d56f/typer-0.25.0-py3-none-any.whl", hash = "sha256:ac01b48823d3db9a83c9e164338057eadbb1c9957a2a6b4eeb486669c560b5dc", size = 55993, upload-time = "2026-04-26T08:46:15.889Z" }, ] [[package]] @@ -5695,11 +6336,11 @@ wheels = [ [[package]] name = "tzdata" -version = "2026.1" +version = "2026.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/19/f5/cd531b2d15a671a40c0f66cf06bc3570a12cd56eef98960068ebbad1bf5a/tzdata-2026.1.tar.gz", hash = "sha256:67658a1903c75917309e753fdc349ac0efd8c27db7a0cb406a25be4840f87f98", size = 197639, upload-time = "2026-04-03T11:25:22.002Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/19/1b9b0e29f30c6d35cb345486df41110984ea67ae69dddbc0e8a100999493/tzdata-2026.2.tar.gz", hash = "sha256:9173fde7d80d9018e02a662e168e5a2d04f87c41ea174b139fbef642eda62d10", size = 198254, upload-time = "2026-04-24T15:22:08.651Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/70/d460bd685a170790ec89317e9bd33047988e4bce507b831f5db771e142de/tzdata-2026.1-py2.py3-none-any.whl", hash = "sha256:4b1d2be7ac37ceafd7327b961aa3a54e467efbdb563a23655fbfe0d39cfc42a9", size = 348952, upload-time = "2026-04-03T11:25:20.313Z" }, + { url = "https://files.pythonhosted.org/packages/ce/e4/dccd7f47c4b64213ac01ef921a1337ee6e30e8c6466046018326977efd95/tzdata-2026.2-py2.py3-none-any.whl", hash = "sha256:bbe9af844f658da81a5f95019480da3a89415801f6cc966806612cc7169bffe7", size = 349321, upload-time = "2026-04-24T15:22:05.876Z" }, ] [[package]] @@ -5711,6 +6352,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b1/5e/512aeb40fd819f4660d00f96f5c7371ee36fc8c6b605128c5ee59e0b28c6/u_msgpack_python-2.8.0-py2.py3-none-any.whl", hash = "sha256:1d853d33e78b72c4228a2025b4db28cda81214076e5b0422ed0ae1b1b2bb586a", size = 10590, upload-time = "2023-05-18T09:28:10.323Z" }, ] +[[package]] +name = "uri-template" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/31/c7/0336f2bd0bcbada6ccef7aaa25e443c118a704f828a0620c6fa0207c1b64/uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7", size = 21678, upload-time = "2023-06-21T01:49:05.374Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/00/3fca040d7cf8a32776d3d81a00c8ee7457e00f80c649f1e4a863c8321ae9/uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363", size = 11140, upload-time = "2023-06-21T01:49:03.467Z" }, +] + [[package]] name = "urllib3" version = "2.6.3" @@ -5722,15 +6372,15 @@ wheels = [ [[package]] name = "uvicorn" -version = "0.44.0" +version = "0.46.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "h11" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5e/da/6eee1ff8b6cbeed47eeb5229749168e81eb4b7b999a1a15a7176e51410c9/uvicorn-0.44.0.tar.gz", hash = "sha256:6c942071b68f07e178264b9152f1f16dfac5da85880c4ce06366a96d70d4f31e", size = 86947, upload-time = "2026-04-06T09:23:22.826Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/93/041fca8274050e40e6791f267d82e0e2e27dd165627bd640d3e0e378d877/uvicorn-0.46.0.tar.gz", hash = "sha256:fb9da0926999cc6cb22dc7cd71a94a632f078e6ae47ff683c5c420750fb7413d", size = 88758, upload-time = "2026-04-23T07:16:00.151Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/23/a5bbd9600dd607411fa644c06ff4951bec3a4d82c4b852374024359c19c0/uvicorn-0.44.0-py3-none-any.whl", hash = "sha256:ce937c99a2cc70279556967274414c087888e8cec9f9c94644dfca11bd3ced89", size = 69425, upload-time = "2026-04-06T09:23:21.524Z" }, + { url = "https://files.pythonhosted.org/packages/31/a3/5b1562db76a5a488274b2332a97199b32d0442aca0ed193697fd47786316/uvicorn-0.46.0-py3-none-any.whl", hash = "sha256:bbebbcbed972d162afca128605223022bedd345b7bc7855ce66deb31487a9048", size = 70926, upload-time = "2026-04-23T07:15:58.355Z" }, ] [package.optional-dependencies] @@ -5778,7 +6428,7 @@ wheels = [ [[package]] name = "virtualenv" -version = "21.2.1" +version = "21.2.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "distlib" }, @@ -5786,9 +6436,9 @@ dependencies = [ { name = "platformdirs" }, { name = "python-discovery" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/97/c5/aff062c66b42e2183201a7ace10c6b2e959a9a16525c8e8ca8e59410d27a/virtualenv-21.2.1.tar.gz", hash = "sha256:b66ffe81301766c0d5e2208fc3576652c59d44e7b731fc5f5ed701c9b537fa78", size = 5844770, upload-time = "2026-04-09T18:47:11.482Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0c/98/3a7e644e19cb26133488caff231be390579860bbbb3da35913c49a1d0a46/virtualenv-21.2.4.tar.gz", hash = "sha256:b294ef68192638004d72524ce7ef303e9d0cf5a44c95ce2e54a7500a6381cada", size = 5850742, upload-time = "2026-04-14T22:15:31.438Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/0e/f083a76cb590e60dff3868779558eefefb8dfb7c9ed020babc7aa014ccbf/virtualenv-21.2.1-py3-none-any.whl", hash = "sha256:bd16b49c53562b28cf1a3ad2f36edb805ad71301dee70ddc449e5c88a9f919a2", size = 5828326, upload-time = "2026-04-09T18:47:09.331Z" }, + { url = "https://files.pythonhosted.org/packages/27/8d/edd0bd910ff803c308ee9a6b7778621af0d10252219ad9f19ef4d4982a61/virtualenv-21.2.4-py3-none-any.whl", hash = "sha256:29d21e941795206138d0f22f4e45ff7050e5da6c6472299fb7103318763861ac", size = 5831232, upload-time = "2026-04-14T22:15:29.342Z" }, ] [[package]] @@ -5899,6 +6549,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/68/5a/199c59e0a824a3db2b89c5d2dade7ab5f9624dbf6448dc291b46d5ec94d3/wcwidth-0.6.0-py3-none-any.whl", hash = "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad", size = 94189, upload-time = "2026-02-06T19:19:39.646Z" }, ] +[[package]] +name = "webcolors" +version = "25.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/7a/eb316761ec35664ea5174709a68bbd3389de60d4a1ebab8808bfc264ed67/webcolors-25.10.0.tar.gz", hash = "sha256:62abae86504f66d0f6364c2a8520de4a0c47b80c03fc3a5f1815fedbef7c19bf", size = 53491, upload-time = "2025-10-31T07:51:03.977Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/cc/e097523dd85c9cf5d354f78310927f1656c422bd7b2613b2db3e3f9a0f2c/webcolors-25.10.0-py3-none-any.whl", hash = "sha256:032c727334856fc0b968f63daa252a1ac93d33db2f5267756623c210e57a4f1d", size = 14905, upload-time = "2025-10-31T07:51:01.778Z" }, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721, upload-time = "2017-04-05T20:21:34.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" }, +] + [[package]] name = "websocket-client" version = "1.9.0" @@ -5965,6 +6633,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/93/8c/2e650f2afeb7ee576912636c23ddb621c91ac6a98e66dc8d29c3c69446e1/werkzeug-3.1.8-py3-none-any.whl", hash = "sha256:63a77fb8892bf28ebc3178683445222aa500e48ebad5ec77b0ad80f8726b1f50", size = 226459, upload-time = "2026-04-02T18:49:12.72Z" }, ] +[[package]] +name = "widgetsnbextension" +version = "4.0.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/f4/c67440c7fb409a71b7404b7aefcd7569a9c0d6bd071299bf4198ae7a5d95/widgetsnbextension-4.0.15.tar.gz", hash = "sha256:de8610639996f1567952d763a5a41af8af37f2575a41f9852a38f947eb82a3b9", size = 1097402, upload-time = "2025-11-01T21:15:55.178Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/0e/fa3b193432cfc60c93b42f3be03365f5f909d2b3ea410295cf36df739e31/widgetsnbextension-4.0.15-py3-none-any.whl", hash = "sha256:8156704e4346a571d9ce73b84bee86a29906c9abfd7223b7228a28899ccf3366", size = 2196503, upload-time = "2025-11-01T21:15:53.565Z" }, +] + [[package]] name = "wrapt" version = "1.17.3" @@ -6016,85 +6693,115 @@ wheels = [ [[package]] name = "xxhash" -version = "3.6.0" +version = "3.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/02/84/30869e01909fb37a6cc7e18688ee8bf1e42d57e7e0777636bd47524c43c7/xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6", size = 85160, upload-time = "2025-10-02T14:37:08.097Z" } +sdist = { url = "https://files.pythonhosted.org/packages/24/2f/e183a1b407002f5af81822bee18b61cdb94b8670208ef34734d8d2b8ebe9/xxhash-3.7.0.tar.gz", hash = "sha256:6cc4eefbb542a5d6ffd6d70ea9c502957c925e800f998c5630ecc809d6702bae", size = 82022, upload-time = "2026-04-25T11:10:32.553Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/07/d9412f3d7d462347e4511181dea65e47e0d0e16e26fbee2ea86a2aefb657/xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c", size = 32744, upload-time = "2025-10-02T14:34:34.622Z" }, - { url = "https://files.pythonhosted.org/packages/79/35/0429ee11d035fc33abe32dca1b2b69e8c18d236547b9a9b72c1929189b9a/xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204", size = 30816, upload-time = "2025-10-02T14:34:36.043Z" }, - { url = "https://files.pythonhosted.org/packages/b7/f2/57eb99aa0f7d98624c0932c5b9a170e1806406cdbcdb510546634a1359e0/xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490", size = 194035, upload-time = "2025-10-02T14:34:37.354Z" }, - { url = "https://files.pythonhosted.org/packages/4c/ed/6224ba353690d73af7a3f1c7cdb1fc1b002e38f783cb991ae338e1eb3d79/xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2", size = 212914, upload-time = "2025-10-02T14:34:38.6Z" }, - { url = "https://files.pythonhosted.org/packages/38/86/fb6b6130d8dd6b8942cc17ab4d90e223653a89aa32ad2776f8af7064ed13/xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa", size = 212163, upload-time = "2025-10-02T14:34:39.872Z" }, - { url = "https://files.pythonhosted.org/packages/ee/dc/e84875682b0593e884ad73b2d40767b5790d417bde603cceb6878901d647/xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0", size = 445411, upload-time = "2025-10-02T14:34:41.569Z" }, - { url = "https://files.pythonhosted.org/packages/11/4f/426f91b96701ec2f37bb2b8cec664eff4f658a11f3fa9d94f0a887ea6d2b/xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2", size = 193883, upload-time = "2025-10-02T14:34:43.249Z" }, - { url = "https://files.pythonhosted.org/packages/53/5a/ddbb83eee8e28b778eacfc5a85c969673e4023cdeedcfcef61f36731610b/xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9", size = 210392, upload-time = "2025-10-02T14:34:45.042Z" }, - { url = "https://files.pythonhosted.org/packages/1e/c2/ff69efd07c8c074ccdf0a4f36fcdd3d27363665bcdf4ba399abebe643465/xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e", size = 197898, upload-time = "2025-10-02T14:34:46.302Z" }, - { url = "https://files.pythonhosted.org/packages/58/ca/faa05ac19b3b622c7c9317ac3e23954187516298a091eb02c976d0d3dd45/xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374", size = 210655, upload-time = "2025-10-02T14:34:47.571Z" }, - { url = "https://files.pythonhosted.org/packages/d4/7a/06aa7482345480cc0cb597f5c875b11a82c3953f534394f620b0be2f700c/xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d", size = 414001, upload-time = "2025-10-02T14:34:49.273Z" }, - { url = "https://files.pythonhosted.org/packages/23/07/63ffb386cd47029aa2916b3d2f454e6cc5b9f5c5ada3790377d5430084e7/xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae", size = 191431, upload-time = "2025-10-02T14:34:50.798Z" }, - { url = "https://files.pythonhosted.org/packages/0f/93/14fde614cadb4ddf5e7cebf8918b7e8fac5ae7861c1875964f17e678205c/xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb", size = 30617, upload-time = "2025-10-02T14:34:51.954Z" }, - { url = "https://files.pythonhosted.org/packages/13/5d/0d125536cbe7565a83d06e43783389ecae0c0f2ed037b48ede185de477c0/xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c", size = 31534, upload-time = "2025-10-02T14:34:53.276Z" }, - { url = "https://files.pythonhosted.org/packages/54/85/6ec269b0952ec7e36ba019125982cf11d91256a778c7c3f98a4c5043d283/xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829", size = 27876, upload-time = "2025-10-02T14:34:54.371Z" }, - { url = "https://files.pythonhosted.org/packages/33/76/35d05267ac82f53ae9b0e554da7c5e281ee61f3cad44c743f0fcd354f211/xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec", size = 32738, upload-time = "2025-10-02T14:34:55.839Z" }, - { url = "https://files.pythonhosted.org/packages/31/a8/3fbce1cd96534a95e35d5120637bf29b0d7f5d8fa2f6374e31b4156dd419/xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1", size = 30821, upload-time = "2025-10-02T14:34:57.219Z" }, - { url = "https://files.pythonhosted.org/packages/0c/ea/d387530ca7ecfa183cb358027f1833297c6ac6098223fd14f9782cd0015c/xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6", size = 194127, upload-time = "2025-10-02T14:34:59.21Z" }, - { url = "https://files.pythonhosted.org/packages/ba/0c/71435dcb99874b09a43b8d7c54071e600a7481e42b3e3ce1eb5226a5711a/xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263", size = 212975, upload-time = "2025-10-02T14:35:00.816Z" }, - { url = "https://files.pythonhosted.org/packages/84/7a/c2b3d071e4bb4a90b7057228a99b10d51744878f4a8a6dd643c8bd897620/xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546", size = 212241, upload-time = "2025-10-02T14:35:02.207Z" }, - { url = "https://files.pythonhosted.org/packages/81/5f/640b6eac0128e215f177df99eadcd0f1b7c42c274ab6a394a05059694c5a/xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89", size = 445471, upload-time = "2025-10-02T14:35:03.61Z" }, - { url = "https://files.pythonhosted.org/packages/5e/1e/3c3d3ef071b051cc3abbe3721ffb8365033a172613c04af2da89d5548a87/xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d", size = 193936, upload-time = "2025-10-02T14:35:05.013Z" }, - { url = "https://files.pythonhosted.org/packages/2c/bd/4a5f68381939219abfe1c22a9e3a5854a4f6f6f3c4983a87d255f21f2e5d/xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7", size = 210440, upload-time = "2025-10-02T14:35:06.239Z" }, - { url = "https://files.pythonhosted.org/packages/eb/37/b80fe3d5cfb9faff01a02121a0f4d565eb7237e9e5fc66e73017e74dcd36/xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db", size = 197990, upload-time = "2025-10-02T14:35:07.735Z" }, - { url = "https://files.pythonhosted.org/packages/d7/fd/2c0a00c97b9e18f72e1f240ad4e8f8a90fd9d408289ba9c7c495ed7dc05c/xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42", size = 210689, upload-time = "2025-10-02T14:35:09.438Z" }, - { url = "https://files.pythonhosted.org/packages/93/86/5dd8076a926b9a95db3206aba20d89a7fc14dd5aac16e5c4de4b56033140/xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11", size = 414068, upload-time = "2025-10-02T14:35:11.162Z" }, - { url = "https://files.pythonhosted.org/packages/af/3c/0bb129170ee8f3650f08e993baee550a09593462a5cddd8e44d0011102b1/xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd", size = 191495, upload-time = "2025-10-02T14:35:12.971Z" }, - { url = "https://files.pythonhosted.org/packages/e9/3a/6797e0114c21d1725e2577508e24006fd7ff1d8c0c502d3b52e45c1771d8/xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799", size = 30620, upload-time = "2025-10-02T14:35:14.129Z" }, - { url = "https://files.pythonhosted.org/packages/86/15/9bc32671e9a38b413a76d24722a2bf8784a132c043063a8f5152d390b0f9/xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392", size = 31542, upload-time = "2025-10-02T14:35:15.21Z" }, - { url = "https://files.pythonhosted.org/packages/39/c5/cc01e4f6188656e56112d6a8e0dfe298a16934b8c47a247236549a3f7695/xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6", size = 27880, upload-time = "2025-10-02T14:35:16.315Z" }, - { url = "https://files.pythonhosted.org/packages/f3/30/25e5321c8732759e930c555176d37e24ab84365482d257c3b16362235212/xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702", size = 32956, upload-time = "2025-10-02T14:35:17.413Z" }, - { url = "https://files.pythonhosted.org/packages/9f/3c/0573299560d7d9f8ab1838f1efc021a280b5ae5ae2e849034ef3dee18810/xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db", size = 31072, upload-time = "2025-10-02T14:35:18.844Z" }, - { url = "https://files.pythonhosted.org/packages/7a/1c/52d83a06e417cd9d4137722693424885cc9878249beb3a7c829e74bf7ce9/xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54", size = 196409, upload-time = "2025-10-02T14:35:20.31Z" }, - { url = "https://files.pythonhosted.org/packages/e3/8e/c6d158d12a79bbd0b878f8355432075fc82759e356ab5a111463422a239b/xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f", size = 215736, upload-time = "2025-10-02T14:35:21.616Z" }, - { url = "https://files.pythonhosted.org/packages/bc/68/c4c80614716345d55071a396cf03d06e34b5f4917a467faf43083c995155/xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5", size = 214833, upload-time = "2025-10-02T14:35:23.32Z" }, - { url = "https://files.pythonhosted.org/packages/7e/e9/ae27c8ffec8b953efa84c7c4a6c6802c263d587b9fc0d6e7cea64e08c3af/xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1", size = 448348, upload-time = "2025-10-02T14:35:25.111Z" }, - { url = "https://files.pythonhosted.org/packages/d7/6b/33e21afb1b5b3f46b74b6bd1913639066af218d704cc0941404ca717fc57/xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee", size = 196070, upload-time = "2025-10-02T14:35:26.586Z" }, - { url = "https://files.pythonhosted.org/packages/96/b6/fcabd337bc5fa624e7203aa0fa7d0c49eed22f72e93229431752bddc83d9/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd", size = 212907, upload-time = "2025-10-02T14:35:28.087Z" }, - { url = "https://files.pythonhosted.org/packages/4b/d3/9ee6160e644d660fcf176c5825e61411c7f62648728f69c79ba237250143/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729", size = 200839, upload-time = "2025-10-02T14:35:29.857Z" }, - { url = "https://files.pythonhosted.org/packages/0d/98/e8de5baa5109394baf5118f5e72ab21a86387c4f89b0e77ef3e2f6b0327b/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292", size = 213304, upload-time = "2025-10-02T14:35:31.222Z" }, - { url = "https://files.pythonhosted.org/packages/7b/1d/71056535dec5c3177eeb53e38e3d367dd1d16e024e63b1cee208d572a033/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf", size = 416930, upload-time = "2025-10-02T14:35:32.517Z" }, - { url = "https://files.pythonhosted.org/packages/dc/6c/5cbde9de2cd967c322e651c65c543700b19e7ae3e0aae8ece3469bf9683d/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033", size = 193787, upload-time = "2025-10-02T14:35:33.827Z" }, - { url = "https://files.pythonhosted.org/packages/19/fa/0172e350361d61febcea941b0cc541d6e6c8d65d153e85f850a7b256ff8a/xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec", size = 30916, upload-time = "2025-10-02T14:35:35.107Z" }, - { url = "https://files.pythonhosted.org/packages/ad/e6/e8cf858a2b19d6d45820f072eff1bea413910592ff17157cabc5f1227a16/xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8", size = 31799, upload-time = "2025-10-02T14:35:36.165Z" }, - { url = "https://files.pythonhosted.org/packages/56/15/064b197e855bfb7b343210e82490ae672f8bc7cdf3ddb02e92f64304ee8a/xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746", size = 28044, upload-time = "2025-10-02T14:35:37.195Z" }, - { url = "https://files.pythonhosted.org/packages/7e/5e/0138bc4484ea9b897864d59fce9be9086030825bc778b76cb5a33a906d37/xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e", size = 32754, upload-time = "2025-10-02T14:35:38.245Z" }, - { url = "https://files.pythonhosted.org/packages/18/d7/5dac2eb2ec75fd771957a13e5dda560efb2176d5203f39502a5fc571f899/xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405", size = 30846, upload-time = "2025-10-02T14:35:39.6Z" }, - { url = "https://files.pythonhosted.org/packages/fe/71/8bc5be2bb00deb5682e92e8da955ebe5fa982da13a69da5a40a4c8db12fb/xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3", size = 194343, upload-time = "2025-10-02T14:35:40.69Z" }, - { url = "https://files.pythonhosted.org/packages/e7/3b/52badfb2aecec2c377ddf1ae75f55db3ba2d321c5e164f14461c90837ef3/xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6", size = 213074, upload-time = "2025-10-02T14:35:42.29Z" }, - { url = "https://files.pythonhosted.org/packages/a2/2b/ae46b4e9b92e537fa30d03dbc19cdae57ed407e9c26d163895e968e3de85/xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063", size = 212388, upload-time = "2025-10-02T14:35:43.929Z" }, - { url = "https://files.pythonhosted.org/packages/f5/80/49f88d3afc724b4ac7fbd664c8452d6db51b49915be48c6982659e0e7942/xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7", size = 445614, upload-time = "2025-10-02T14:35:45.216Z" }, - { url = "https://files.pythonhosted.org/packages/ed/ba/603ce3961e339413543d8cd44f21f2c80e2a7c5cfe692a7b1f2cccf58f3c/xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b", size = 194024, upload-time = "2025-10-02T14:35:46.959Z" }, - { url = "https://files.pythonhosted.org/packages/78/d1/8e225ff7113bf81545cfdcd79eef124a7b7064a0bba53605ff39590b95c2/xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd", size = 210541, upload-time = "2025-10-02T14:35:48.301Z" }, - { url = "https://files.pythonhosted.org/packages/6f/58/0f89d149f0bad89def1a8dd38feb50ccdeb643d9797ec84707091d4cb494/xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0", size = 198305, upload-time = "2025-10-02T14:35:49.584Z" }, - { url = "https://files.pythonhosted.org/packages/11/38/5eab81580703c4df93feb5f32ff8fa7fe1e2c51c1f183ee4e48d4bb9d3d7/xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152", size = 210848, upload-time = "2025-10-02T14:35:50.877Z" }, - { url = "https://files.pythonhosted.org/packages/5e/6b/953dc4b05c3ce678abca756416e4c130d2382f877a9c30a20d08ee6a77c0/xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11", size = 414142, upload-time = "2025-10-02T14:35:52.15Z" }, - { url = "https://files.pythonhosted.org/packages/08/a9/238ec0d4e81a10eb5026d4a6972677cbc898ba6c8b9dbaec12ae001b1b35/xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5", size = 191547, upload-time = "2025-10-02T14:35:53.547Z" }, - { url = "https://files.pythonhosted.org/packages/f1/ee/3cf8589e06c2164ac77c3bf0aa127012801128f1feebf2a079272da5737c/xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f", size = 31214, upload-time = "2025-10-02T14:35:54.746Z" }, - { url = "https://files.pythonhosted.org/packages/02/5d/a19552fbc6ad4cb54ff953c3908bbc095f4a921bc569433d791f755186f1/xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad", size = 32290, upload-time = "2025-10-02T14:35:55.791Z" }, - { url = "https://files.pythonhosted.org/packages/b1/11/dafa0643bc30442c887b55baf8e73353a344ee89c1901b5a5c54a6c17d39/xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679", size = 28795, upload-time = "2025-10-02T14:35:57.162Z" }, - { url = "https://files.pythonhosted.org/packages/2c/db/0e99732ed7f64182aef4a6fb145e1a295558deec2a746265dcdec12d191e/xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4", size = 32955, upload-time = "2025-10-02T14:35:58.267Z" }, - { url = "https://files.pythonhosted.org/packages/55/f4/2a7c3c68e564a099becfa44bb3d398810cc0ff6749b0d3cb8ccb93f23c14/xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67", size = 31072, upload-time = "2025-10-02T14:35:59.382Z" }, - { url = "https://files.pythonhosted.org/packages/c6/d9/72a29cddc7250e8a5819dad5d466facb5dc4c802ce120645630149127e73/xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad", size = 196579, upload-time = "2025-10-02T14:36:00.838Z" }, - { url = "https://files.pythonhosted.org/packages/63/93/b21590e1e381040e2ca305a884d89e1c345b347404f7780f07f2cdd47ef4/xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b", size = 215854, upload-time = "2025-10-02T14:36:02.207Z" }, - { url = "https://files.pythonhosted.org/packages/ce/b8/edab8a7d4fa14e924b29be877d54155dcbd8b80be85ea00d2be3413a9ed4/xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b", size = 214965, upload-time = "2025-10-02T14:36:03.507Z" }, - { url = "https://files.pythonhosted.org/packages/27/67/dfa980ac7f0d509d54ea0d5a486d2bb4b80c3f1bb22b66e6a05d3efaf6c0/xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca", size = 448484, upload-time = "2025-10-02T14:36:04.828Z" }, - { url = "https://files.pythonhosted.org/packages/8c/63/8ffc2cc97e811c0ca5d00ab36604b3ea6f4254f20b7bc658ca825ce6c954/xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a", size = 196162, upload-time = "2025-10-02T14:36:06.182Z" }, - { url = "https://files.pythonhosted.org/packages/4b/77/07f0e7a3edd11a6097e990f6e5b815b6592459cb16dae990d967693e6ea9/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99", size = 213007, upload-time = "2025-10-02T14:36:07.733Z" }, - { url = "https://files.pythonhosted.org/packages/ae/d8/bc5fa0d152837117eb0bef6f83f956c509332ce133c91c63ce07ee7c4873/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3", size = 200956, upload-time = "2025-10-02T14:36:09.106Z" }, - { url = "https://files.pythonhosted.org/packages/26/a5/d749334130de9411783873e9b98ecc46688dad5db64ca6e04b02acc8b473/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6", size = 213401, upload-time = "2025-10-02T14:36:10.585Z" }, - { url = "https://files.pythonhosted.org/packages/89/72/abed959c956a4bfc72b58c0384bb7940663c678127538634d896b1195c10/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93", size = 417083, upload-time = "2025-10-02T14:36:12.276Z" }, - { url = "https://files.pythonhosted.org/packages/0c/b3/62fd2b586283b7d7d665fb98e266decadf31f058f1cf6c478741f68af0cb/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518", size = 193913, upload-time = "2025-10-02T14:36:14.025Z" }, - { url = "https://files.pythonhosted.org/packages/9a/9a/c19c42c5b3f5a4aad748a6d5b4f23df3bed7ee5445accc65a0fb3ff03953/xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119", size = 31586, upload-time = "2025-10-02T14:36:15.603Z" }, - { url = "https://files.pythonhosted.org/packages/03/d6/4cc450345be9924fd5dc8c590ceda1db5b43a0a889587b0ae81a95511360/xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f", size = 32526, upload-time = "2025-10-02T14:36:16.708Z" }, - { url = "https://files.pythonhosted.org/packages/0f/c9/7243eb3f9eaabd1a88a5a5acadf06df2d83b100c62684b7425c6a11bcaa8/xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95", size = 28898, upload-time = "2025-10-02T14:36:17.843Z" }, + { url = "https://files.pythonhosted.org/packages/f2/8a/51a14cdef4728c6c2337db8a7d8704422cc65676d9199d77215464c880af/xxhash-3.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:082c87bfdd2b9f457606c7a4a53457f4c4b48b0cdc48de0277f4349d79bb3d7a", size = 33357, upload-time = "2026-04-25T11:06:20.44Z" }, + { url = "https://files.pythonhosted.org/packages/b9/1b/0c2c933809421ffd9bf42b59315552c143c755db5d9a816b2f1ae273e884/xxhash-3.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5e7ce913b61f35b0c1c839a49ac9c8e75dd8d860150688aed353b0ce1bf409d8", size = 30869, upload-time = "2026-04-25T11:06:21.989Z" }, + { url = "https://files.pythonhosted.org/packages/03/a8/89d5fdd6ee12d70ba99451de46dd0e8010167468dcd913ec855653f4dd50/xxhash-3.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3beb1de3b1e9694fcdd853e570ee64c631c7062435d2f8c69c1adf809bc086f0", size = 194100, upload-time = "2026-04-25T11:06:23.586Z" }, + { url = "https://files.pythonhosted.org/packages/87/ee/2f9f2ed993e77206d1e66991290a1ebe22e843351ca3ebec8e49e01ba186/xxhash-3.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3e7b689c3bce16699efcf736066f5c6cc4472c3840fe4b22bd8279daf4abdac", size = 212977, upload-time = "2026-04-25T11:06:25.019Z" }, + { url = "https://files.pythonhosted.org/packages/de/60/5a91644615a9e9d4e42c2e9925f1908e3a24e4e691d9de7340d565bea024/xxhash-3.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a6545e6b409e3d5cbafc850fb84c55a1ca26ed15a6b11e3bf07a0e0cd84517c8", size = 236373, upload-time = "2026-04-25T11:06:26.482Z" }, + { url = "https://files.pythonhosted.org/packages/22/c0/f3a9384eaaed9d14d4d062a5d953aa0da489bfe9747877aa994caa87cd0b/xxhash-3.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:31ab1461c77a11461d703c88eb949e132a1c6515933cf675d97ec680f4bd18de", size = 212229, upload-time = "2026-04-25T11:06:28.065Z" }, + { url = "https://files.pythonhosted.org/packages/2e/67/02f07a9fd79726804190f2172c4894c3ed9a4ebccaca05653c84beb58025/xxhash-3.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7c4d596b7676f811172687ec567cbafb9e4dea2f9be1bbb4f622410cb7f40f40", size = 445462, upload-time = "2026-04-25T11:06:30.048Z" }, + { url = "https://files.pythonhosted.org/packages/40/37/558f5a90c0672fc9b4402dc25d87ac5b7406616e8969430c9ca4e52ee74d/xxhash-3.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13805f0461cba0a857924e70ff91ae6d52d2598f79a884e788db80532614a4a1", size = 193932, upload-time = "2026-04-25T11:06:31.857Z" }, + { url = "https://files.pythonhosted.org/packages/d5/90/aaa09cd58661d32044dbbad7df55bbe22a623032b810e7ed3b8c569a2a6f/xxhash-3.7.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1d398f372496152f1c6933a33566373f8d1b37b98b8c9d608fa6edc0976f23b2", size = 284807, upload-time = "2026-04-25T11:06:33.697Z" }, + { url = "https://files.pythonhosted.org/packages/d6/f3/53df3719ab127a02c174f0c1c74924fcd110866e89c966bc7909cfa8fa84/xxhash-3.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d610aa62cdb7d4d497740741772a24a794903bf3e79eaa51d2e800082abe11e5", size = 210445, upload-time = "2026-04-25T11:06:35.488Z" }, + { url = "https://files.pythonhosted.org/packages/72/33/d219975c0e8b6fa2eb9ccd486fe47e21bf1847985b878dd2fbc3126e0d5c/xxhash-3.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:073c23900a9fbf3d26616c17c830db28af9803677cd5b33aea3224d824111514", size = 241273, upload-time = "2026-04-25T11:06:37.24Z" }, + { url = "https://files.pythonhosted.org/packages/3e/50/49b1afe610eb3964cedcb90a4d4c3d46a261ee8669cbd4f060652619ae3c/xxhash-3.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:418a463c3e6a590c0cdc890f8be19adb44a8c8acd175ca5b2a6de77e61d0b386", size = 197950, upload-time = "2026-04-25T11:06:39.148Z" }, + { url = "https://files.pythonhosted.org/packages/c6/75/5f42a1a4c78717d906a4b6a140c6dbf837ab1f547a54d23c4e2903310936/xxhash-3.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:03f8ff4474ee61c845758ce00711d7087a770d77efb36f7e74a6e867301000b8", size = 210709, upload-time = "2026-04-25T11:06:40.958Z" }, + { url = "https://files.pythonhosted.org/packages/8a/85/237e446c25abced71e9c53d269f2cef5bab8a82b3f88a12e00c5368e7368/xxhash-3.7.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:44fba4a5f1d179b7ddc7b3dc40f56f9209046421679b57025d4d8821b376fd8d", size = 275345, upload-time = "2026-04-25T11:06:42.525Z" }, + { url = "https://files.pythonhosted.org/packages/62/34/c2c26c0a6a9cc739bc2a5f0ae03ba8b87deb12b8bce35f7ac495e790dc6d/xxhash-3.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:31e3516a0f829d06ded4a2c0f3c7c5561993256bfa1c493975fb9dc7bfa828a1", size = 414056, upload-time = "2026-04-25T11:06:44.343Z" }, + { url = "https://files.pythonhosted.org/packages/a0/aa/5c58e9bc8071b8afd8dcf297ff362f723c4892168faba149f19904132bf4/xxhash-3.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b59ee2ac81de57771a09ecad09191e840a1d2fae1ef684208320591055768f83", size = 191485, upload-time = "2026-04-25T11:06:46.262Z" }, + { url = "https://files.pythonhosted.org/packages/d4/69/a929cf9d1e2e65a48b818cdce72cb6b69eab2e6877f21436d0a1942aff43/xxhash-3.7.0-cp312-cp312-win32.whl", hash = "sha256:74bbd92f8c7fcc397ba0a11bfdc106bc72ad7f11e3a60277753f87e7532b4d81", size = 30671, upload-time = "2026-04-25T11:06:48.039Z" }, + { url = "https://files.pythonhosted.org/packages/b9/1b/104b41a8947f4e1d4a66ce1e628eea752f37d1890bfd7453559ca7a3d950/xxhash-3.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:7bd7bc82dd4f185f28f35193c2e968ef46131628e3cac62f639dadf321cba4d1", size = 31514, upload-time = "2026-04-25T11:06:49.279Z" }, + { url = "https://files.pythonhosted.org/packages/98/a0/1fd0ea1f1b886d9e7c73f0397571e22333a7d79e31da6d7127c2a4a71d75/xxhash-3.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:7d7148180ec99ba36585b42c8c5de25e9b40191613bc4be68909b4d25a77a852", size = 27761, upload-time = "2026-04-25T11:06:50.448Z" }, + { url = "https://files.pythonhosted.org/packages/c1/ca/d5174b4c36d10f64d4ca7050563138c5a599efb01a765858ddefc9c1202a/xxhash-3.7.0-cp313-cp313-android_21_arm64_v8a.whl", hash = "sha256:4b6d6b33f141158692bd4eafbb96edbc5aa0dabdb593a962db01a91983d4f8fa", size = 36813, upload-time = "2026-04-25T11:06:51.73Z" }, + { url = "https://files.pythonhosted.org/packages/41/d0/abc6c9d347ba1f1e1e1d98125d0881a0452c7f9a76a9dd03a7b5d2197f23/xxhash-3.7.0-cp313-cp313-android_21_x86_64.whl", hash = "sha256:845d347df254d6c619f616afa921331bada8614b8d373d58725c663ba97c3605", size = 35121, upload-time = "2026-04-25T11:06:53.048Z" }, + { url = "https://files.pythonhosted.org/packages/bf/11/4cc834eb3d79f2f2b3a6ef7324195208bcdfbdcf7534d2b17267aa5f3a8f/xxhash-3.7.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:fddbbb69a6fff4f421e7a0d1fa28f894b20112e9e3fab306af451e2dfd0e459b", size = 29624, upload-time = "2026-04-25T11:06:54.311Z" }, + { url = "https://files.pythonhosted.org/packages/23/83/e97d3e7b635fe73a1dfb1e91f805324dd6d930bb42041cbf18f183bc0b6d/xxhash-3.7.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:54876a4e45101cec2bf8f31a973cda073a23e2e108538dad224ba07f85f22487", size = 30638, upload-time = "2026-04-25T11:06:55.864Z" }, + { url = "https://files.pythonhosted.org/packages/f4/40/d84951d80c35db1f4c40a29a64a8520eea5d56e764c603906b4fe763580f/xxhash-3.7.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:0c72fe9c7e3d6dfd7f1e21e224a877917fa09c465694ba4e06464b9511b65544", size = 33323, upload-time = "2026-04-25T11:06:57.336Z" }, + { url = "https://files.pythonhosted.org/packages/89/cc/c7dc6558d97e9ab023f663d69ab28b340ed9bf4d2d94f2c259cf896bb354/xxhash-3.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a6d73a830b17ef49bc04e00182bd839164c1b3c59c127cd7c54fcb10c7ed8ee8", size = 33362, upload-time = "2026-04-25T11:06:58.656Z" }, + { url = "https://files.pythonhosted.org/packages/2a/6e/46b84017b1301d54091430353d4ad5901654a3e0871649877a416f7f1644/xxhash-3.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:91c3b07cf3362086d8f126c6aecd8e5e9396ad8b2f2219ea7e49a8250c318acd", size = 30874, upload-time = "2026-04-25T11:06:59.834Z" }, + { url = "https://files.pythonhosted.org/packages/df/5e/8f9158e3ab906ad3fec51e09b5ea0093e769f12207bfa42a368ca204e7ab/xxhash-3.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:50e879ebbac351c81565ca108db766d7832f5b8b6a5b14b8c0151f7190028e3d", size = 194185, upload-time = "2026-04-25T11:07:01.658Z" }, + { url = "https://files.pythonhosted.org/packages/f3/29/a804ded9f5d3d3758292678d23e7528b08fda7b7e750688d08b052322475/xxhash-3.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:921c14e93817842dd0dd9f372890a0f0c72e534650b6ab13c5be5cd0db11d47e", size = 213033, upload-time = "2026-04-25T11:07:03.606Z" }, + { url = "https://files.pythonhosted.org/packages/8b/91/1ce5a7d2fdc975267320e2c78fc1cecfe7ab735ccbcf6993ec5dd541cb2c/xxhash-3.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e64a7c9d7dfca3e0fafcbc5e455519090706a3e36e95d655cec3e04e79f95aaa", size = 236140, upload-time = "2026-04-25T11:07:05.396Z" }, + { url = "https://files.pythonhosted.org/packages/34/04/fd595a4fd8617b05fa27bd9b684ecb4985bfed27917848eea85d54036d06/xxhash-3.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2220af08163baf5fa36c2b8af079dc2cbe6e66ae061385267f9472362dfd53c6", size = 212291, upload-time = "2026-04-25T11:07:06.966Z" }, + { url = "https://files.pythonhosted.org/packages/03/fb/f1a379cbc372ae5b9f4ab36154c48a849ca6ebe3ac477067a57865bf3bc6/xxhash-3.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f14bb8b22a4a91325813e3d553b8963c10cf8c756cff65ee50c194431296c655", size = 445532, upload-time = "2026-04-25T11:07:08.525Z" }, + { url = "https://files.pythonhosted.org/packages/65/59/172424b79f8cfd4b6d8a122b2193e6b8ad4b11f7159bb3b6f9b3191329bb/xxhash-3.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:496736f86a9bedaf64b0dc70e3539d0766df01c71ea22032698e88f3f04a1ce9", size = 193990, upload-time = "2026-04-25T11:07:10.315Z" }, + { url = "https://files.pythonhosted.org/packages/b9/19/aeac22161d953f139f07ba5586cb4a17c5b7b6dff985122803bb12933500/xxhash-3.7.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0ff71596bd79816975b3de7130ab1ff4541410285a3c084584eeb1c8239996fd", size = 284876, upload-time = "2026-04-25T11:07:12.15Z" }, + { url = "https://files.pythonhosted.org/packages/77/d5/4fd0b59e7a02242953da05ff679fbb961b0a4368eac97a217e11dae110c1/xxhash-3.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1ad86695c19b1d46fe106925db3c7a37f16be37669dcf58dcc70a9dd6e324676", size = 210495, upload-time = "2026-04-25T11:07:13.952Z" }, + { url = "https://files.pythonhosted.org/packages/aa/fb/976a3165c728c7faf74aa1b5ab3cf6a85e6d731612894741840524c7d28c/xxhash-3.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:970f9f8c50961d639cbd0d988c96f80ddf66006de93641719282c4fe7a87c5e6", size = 241331, upload-time = "2026-04-25T11:07:15.557Z" }, + { url = "https://files.pythonhosted.org/packages/4a/2c/6763d5901d53ac9e6ba296e5717ae599025c9d268396e8faa8b4b0a8e0ac/xxhash-3.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5886ad85e9e347911783760a1d16cb6b393e8f9e3b52c982568226cb56927bdc", size = 198037, upload-time = "2026-04-25T11:07:17.563Z" }, + { url = "https://files.pythonhosted.org/packages/61/2b/876e722d533833f5f9a83473e6ba993e48745701096944e77bbecf29b2c3/xxhash-3.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6e934bbae1e0ec74e27d5f0d7f37ef547ce5ff9f0a7e63fb39e559fc99526734", size = 210744, upload-time = "2026-04-25T11:07:19.055Z" }, + { url = "https://files.pythonhosted.org/packages/21/e6/d7e7baef7ce24166b4668d3c48557bb35a23b92ecadcac7e7718d099ab69/xxhash-3.7.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:3b6b3d28228af044ebcded71c4a3dd86e1dbd7e2f4645bf40f7b5da65bb5fb5a", size = 275406, upload-time = "2026-04-25T11:07:20.908Z" }, + { url = "https://files.pythonhosted.org/packages/92/fe/198b3763b2e01ca908f2154969a2352ec99bda892b574a11a9a151c5ede4/xxhash-3.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:6be4d70d9ab76c9f324ead9c01af6ff52c324745ea0c3731682a0cf99720f1fe", size = 414125, upload-time = "2026-04-25T11:07:23.037Z" }, + { url = "https://files.pythonhosted.org/packages/3a/6d/019a11affd5a5499137cacca53808659964785439855b5aa40dfd3412916/xxhash-3.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:151d7520838d4465461a0b7f4ae488b3b00de16183dd3214c1a6b14bf89d7fb6", size = 191555, upload-time = "2026-04-25T11:07:24.991Z" }, + { url = "https://files.pythonhosted.org/packages/76/21/b96d58568df2d01533244c3e0e5cbdd0c8b2b25c4bec4d72f19259a292d7/xxhash-3.7.0-cp313-cp313-win32.whl", hash = "sha256:d798c1e291bffb8e37b5bbe0dda77fc767cd19e89cadaf66e6ed5d0ff88c9fe6", size = 30668, upload-time = "2026-04-25T11:07:26.665Z" }, + { url = "https://files.pythonhosted.org/packages/99/57/d849a8d3afa1f8f4bc6a831cd89f49f9706fbbad94d2975d6140a171988c/xxhash-3.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:875811ba23c543b1a1c3143c926e43996eb27ebb8f52d3500744aa608c275aed", size = 31524, upload-time = "2026-04-25T11:07:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/81/52/bacc753e92dee78b058af8dcef0a50815f5f860986c664a92d75f965b6a5/xxhash-3.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:54a675cb300dda83d71daae2a599389d22db8021a0f8db0dd659e14626eb3ecc", size = 27768, upload-time = "2026-04-25T11:07:29.113Z" }, + { url = "https://files.pythonhosted.org/packages/1c/47/ddbd683b7fc7e592c1a8d9d65f73ce9ab513f082b3967eee2baf549b8fc6/xxhash-3.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a3b19a42111c4057c1547a4a1396a53961dca576a0f6b82bfa88a2d1561764b2", size = 33576, upload-time = "2026-04-25T11:07:30.469Z" }, + { url = "https://files.pythonhosted.org/packages/07/f2/36d3310161db7f72efb4562aadde0ed429f1d0531782dd6345b12d2da527/xxhash-3.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8f4608a06e4d61b7a3425665a46d00e0579122e1a2fae97a0c52953a3aad9aa3", size = 31123, upload-time = "2026-04-25T11:07:31.989Z" }, + { url = "https://files.pythonhosted.org/packages/0d/3f/75937a5c69556ed213021e43cbedd84c8e0279d0d74e7d41a255d84ba4b1/xxhash-3.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ad37c7792479e49cf96c1ab25517d7003fe0d93687a772ba19a097d235bbe41e", size = 196491, upload-time = "2026-04-25T11:07:33.358Z" }, + { url = "https://files.pythonhosted.org/packages/22/29/f10d7ff8c7a733d4403a43b9de18c8fabc005f98cec054644f04418659ee/xxhash-3.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc026e3b89d98e30a8288c95cb696e77d150b3f0fb7a51f73dcd49ee6b5577fa", size = 215793, upload-time = "2026-04-25T11:07:34.919Z" }, + { url = "https://files.pythonhosted.org/packages/8b/fd/778f60aa295f58907938f030a8b514611f391405614a525cccd2ffc00eb5/xxhash-3.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c9b31ab1f28b078a6a1ac1a54eb35e7d5390deddd56870d0be3a0a733d1c321c", size = 237993, upload-time = "2026-04-25T11:07:36.638Z" }, + { url = "https://files.pythonhosted.org/packages/70/f5/736db5de387b4a540e37a05b84b40dc58a1ce974bfd2b4e5754ce29b68c3/xxhash-3.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3bb5fd680c038fd5229e44e9c493782f90df9bef632fd0499d442374688ff70b", size = 214887, upload-time = "2026-04-25T11:07:38.564Z" }, + { url = "https://files.pythonhosted.org/packages/4d/aa/09a095f22fdb9a27fbb716841fbff52119721f9ca4261952d07a912f7839/xxhash-3.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:030c0fd688fce3569fbb49a2feefd4110cbb0b650186fb4610759ecfac677548", size = 448407, upload-time = "2026-04-25T11:07:40.552Z" }, + { url = "https://files.pythonhosted.org/packages/74/8a/b745efeeca9e34a91c26fdc97ad8514c43d5a81ac78565cba80a1353870a/xxhash-3.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5b1bde10324f4c31812ae0d0502e92d916ae8917cad7209353f122b8b8f610c3", size = 196119, upload-time = "2026-04-25T11:07:42.101Z" }, + { url = "https://files.pythonhosted.org/packages/8a/5c/0cfceb024af90c191f665c7933b1f318ee234f4797858383bebd1881d52f/xxhash-3.7.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:503722d52a615f2604f5e7611de7d43878df010dc0053094ef91cb9a9ac3d987", size = 286751, upload-time = "2026-04-25T11:07:43.568Z" }, + { url = "https://files.pythonhosted.org/packages/0b/0a/0793e405dc3cf8f4ebe2c1acec1e4e4608cd9e7e50ea691dabbc2a95ccbb/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c72500a3b6d6c30ebfc135035bcace9eb5884f2dc220804efcaaba43e9f611dd", size = 212961, upload-time = "2026-04-25T11:07:45.388Z" }, + { url = "https://files.pythonhosted.org/packages/0c/7e/721118ffc63bfff94aa565bcf2555a820f9f4bdb0f001e0d609bdfad70de/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:43475925a766d01ca8cd9a857fd87f3d50406983c8506a4c07c4df12adcc867f", size = 243703, upload-time = "2026-04-25T11:07:47.053Z" }, + { url = "https://files.pythonhosted.org/packages/6e/18/16f6267160488b8276fd3d449d425712512add292ba545c1b6946bfdb7dd/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8d09dfd2ab135b985daf868b594315ebe11ad86cd9fea46e6c69f19b28f7d25a", size = 200894, upload-time = "2026-04-25T11:07:48.657Z" }, + { url = "https://files.pythonhosted.org/packages/2d/94/80ba841287fd97e3e9cac1d228788c8ef623746f570404961eec748ecb5c/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c50269d0055ac1faecfd559886d2cbe4b730de236585aba0e873f9d9dadbe585", size = 213357, upload-time = "2026-04-25T11:07:50.257Z" }, + { url = "https://files.pythonhosted.org/packages/a1/7e/106d4067130c59f1e18a55ffadcd876d8c68534883a1e02685b29d3d8153/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:1910df4756a5ab58cfad8744fc2d0f23926e3efcc346ee76e87b974abab922f4", size = 277600, upload-time = "2026-04-25T11:07:51.745Z" }, + { url = "https://files.pythonhosted.org/packages/c5/86/a081dd30da71d720b2612a792bfd55e45fa9a07ac76a0507f60487473c25/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d006faf3b491957efcb433489be3c149efe4787b7063d5cddb8ddaefdc60e0c1", size = 416980, upload-time = "2026-04-25T11:07:53.504Z" }, + { url = "https://files.pythonhosted.org/packages/35/29/1a95221a029a3c1293773869e1ab47b07cbbdd82444a42809e8c60156626/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:abb65b4e947e958f7b3b0d71db3ce447d1bc5f37f5eab871ce7223bda8768a04", size = 193840, upload-time = "2026-04-25T11:07:55.103Z" }, + { url = "https://files.pythonhosted.org/packages/c5/e0/db909dd0823285de2286f67e10ee4d81e96ad35d7d8e964ecb07fccd8af9/xxhash-3.7.0-cp313-cp313t-win32.whl", hash = "sha256:178959906cb1716a1ce08e0d69c82886c70a15a6f2790fc084fdd146ca30cd49", size = 30966, upload-time = "2026-04-25T11:07:56.524Z" }, + { url = "https://files.pythonhosted.org/packages/7b/ff/d705b15b22f21ee106adce239cb65d35067a158c630b240270f09b17c2e6/xxhash-3.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2524a1e20d4c231d13b50f7cf39e44265b055669a64a7a4b9a2a44faa03f19b6", size = 31784, upload-time = "2026-04-25T11:07:57.758Z" }, + { url = "https://files.pythonhosted.org/packages/a2/1f/b2cf83c3638fd0588e0b17f22e5a9400bdfb1a3e3755324ac0aee2250b88/xxhash-3.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:37d994d0ffe81ef087bb330d392caa809bb5853c77e22ea3f71db024a0543dba", size = 27932, upload-time = "2026-04-25T11:07:59.109Z" }, + { url = "https://files.pythonhosted.org/packages/0e/cc/431db584f6fbb9312e40a173af027644e5580d39df1f73603cbb9dca4d6b/xxhash-3.7.0-cp314-cp314-android_24_arm64_v8a.whl", hash = "sha256:8c5fcfd806c335bfa2adf1cd0b3110a44fc7b6995c3a648c27489bae85801465", size = 36644, upload-time = "2026-04-25T11:08:00.658Z" }, + { url = "https://files.pythonhosted.org/packages/bc/01/255ec513e0a705d1f9a61413e78dfce4e3235203f0ed525a24c2b4b56345/xxhash-3.7.0-cp314-cp314-android_24_x86_64.whl", hash = "sha256:506a0b488f190f0a06769575e30caf71615c898ed93ab18b0dbcb6dec5c3713c", size = 35003, upload-time = "2026-04-25T11:08:02.338Z" }, + { url = "https://files.pythonhosted.org/packages/68/70/c55fc33c93445b44d8fc5a17b41ed99e3cebe92bcf8396809e63fc9a1165/xxhash-3.7.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:ec68dbba21532c0173a9872298e65c89749f7c9d21538c3a78b5bb6105871568", size = 29655, upload-time = "2026-04-25T11:08:03.701Z" }, + { url = "https://files.pythonhosted.org/packages/c2/72/ff8de73df000d74467d12a59ce6d6e2b2a368b978d41ab7b1fba5ed442be/xxhash-3.7.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:fa77e7ec1450d415d20129961814787c9abd9a07f98872f070b1fe96c5084611", size = 30664, upload-time = "2026-04-25T11:08:05.011Z" }, + { url = "https://files.pythonhosted.org/packages/b6/91/08416d9bd9bc3bf39d831abe8a5631ac2db5141dfd6fe81c3fe59a1f9264/xxhash-3.7.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:fe32736295ea38e43e7d9424053c8c47c9f64fecfc7c895fb3da9b30b131c9ee", size = 33317, upload-time = "2026-04-25T11:08:06.413Z" }, + { url = "https://files.pythonhosted.org/packages/0e/3b/86b1caa4dee10a99f4bf9521e623359341c5e50d05158fa10c275b2bd079/xxhash-3.7.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:ab9dd2c83c4bbd63e422181a76f13502d049d3ddcac9a1bdc29196263d692bb8", size = 33457, upload-time = "2026-04-25T11:08:08.099Z" }, + { url = "https://files.pythonhosted.org/packages/ed/38/98ea14ad1517e1461292a65906951458d520689782bfbae111050145bdba/xxhash-3.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3afec3a336a2286601a437cb07562ab0227685e6fbb9ec17e8c18457ff348ecf", size = 30894, upload-time = "2026-04-25T11:08:09.429Z" }, + { url = "https://files.pythonhosted.org/packages/61/a2/074654d0b893606541199993c7db70067d9fc63b748e0d60020a52a1bd36/xxhash-3.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:565df64437a9390f84465dcca33e7377114c7ede8d05cd2cf20081f831ea788e", size = 194409, upload-time = "2026-04-25T11:08:10.91Z" }, + { url = "https://files.pythonhosted.org/packages/e2/26/6d2a1afc468189f77ca28c32e1c83e1b9da1178231e05641dbc1b350e332/xxhash-3.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:12eca820a5d558633d423bf8bb78ce72a55394823f64089247f788a7e0ae691e", size = 213135, upload-time = "2026-04-25T11:08:12.575Z" }, + { url = "https://files.pythonhosted.org/packages/8e/0e/d8aecf95e09c42547453137be74d2f7b8b14e08f5177fa2fab6144a19061/xxhash-3.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f262b8f7599516567e070abf607b9af649052b2c4bd6f9be02b0cb41b7024805", size = 236379, upload-time = "2026-04-25T11:08:14.206Z" }, + { url = "https://files.pythonhosted.org/packages/f2/74/8140e8210536b3dd0cc816c4faaeb5ba6e63e8125ab25af4bcddd6a037b3/xxhash-3.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1598916cb197681e03e601901e4ab96a9a963de398c59d0964f8a6f44a2b361", size = 212447, upload-time = "2026-04-25T11:08:15.79Z" }, + { url = "https://files.pythonhosted.org/packages/a0/d2/462001d2903b4bee5a5689598a0a55e5e7cd1ac7f4247a5545cff10d3ebb/xxhash-3.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:322b2f0622230f526aeb1738149948a7ae357a9e2ceb1383c6fd1fdaecdafa16", size = 445660, upload-time = "2026-04-25T11:08:17.441Z" }, + { url = "https://files.pythonhosted.org/packages/23/09/2bd1ed7f8689b20e51727952cac8329d50c694dc32b2eba06ba5bc742b37/xxhash-3.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:24cc22070880cc57b830a65cde4e65fa884c6d9b28ae4803b5ee05911e7bafba", size = 194076, upload-time = "2026-04-25T11:08:19.134Z" }, + { url = "https://files.pythonhosted.org/packages/c9/6e/692302cd0a5f4ac4e6289f37fa888dc2e1e07750b68fe3e4bfe939b8cea3/xxhash-3.7.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb5a888a968b2434abf9ecda357b5d43f10d7b5a6da6fdbbe036208473aff0e2", size = 284990, upload-time = "2026-04-25T11:08:20.618Z" }, + { url = "https://files.pythonhosted.org/packages/05/d9/e54b159b3d9df7999d2a7c676ce7b323d1b5588a64f8f51ed8172567bd87/xxhash-3.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a999771ff97bec27d18341be4f3a36b163bb1ac41ec17bef6d2dabd84acd33c7", size = 210590, upload-time = "2026-04-25T11:08:22.24Z" }, + { url = "https://files.pythonhosted.org/packages/50/93/0e0df1a3a196ced4ca71de76d65ead25d8e87bbfb87b64306ea47a40c00d/xxhash-3.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:ed4a6efe2dee1655adb73e7ad40c6aa955a6892422b1e3b95de6a34de56e3cbb", size = 241442, upload-time = "2026-04-25T11:08:23.844Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a9/d917a7a814e90b218f8a0d37967105eea91bf752c3303683c99a1f7bfc1f/xxhash-3.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9fd17f14ac0faa12126c2f9ca774a8cf342957265ec3c8669c144e5e6cdb478c", size = 198356, upload-time = "2026-04-25T11:08:25.99Z" }, + { url = "https://files.pythonhosted.org/packages/89/5e/f2ba1877c39469abbefc72991d6ebdcbd4c0880db01ae8cb1f553b0c537d/xxhash-3.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:05fd1254268c59b5cb2a029dfc204275e9fc52de2913f1e53aa8d01442c96b4d", size = 210898, upload-time = "2026-04-25T11:08:27.608Z" }, + { url = "https://files.pythonhosted.org/packages/90/c6/be56b58e73de531f39a10de1355bb77ceb663900dc4bf2d6d3002a9c3f9e/xxhash-3.7.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:a2eae53197c6276d5b317f75a1be226bbf440c20b58bf525f36b5d0e1f657ca6", size = 275519, upload-time = "2026-04-25T11:08:29.301Z" }, + { url = "https://files.pythonhosted.org/packages/92/e2/17ddc85d5765b9c709f192009ed8f5a1fc876f4eb35bba7c307b5b1169f9/xxhash-3.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:bfe6f92e3522dcbe8c4281efd74fa7542a336cb00b0e3272c4ec0edabeaeaf67", size = 414191, upload-time = "2026-04-25T11:08:31.16Z" }, + { url = "https://files.pythonhosted.org/packages/9c/42/85f5b79f4bf1ec7ba052491164adfd4f4e9519f5dc7246de4fbd64a1bd56/xxhash-3.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7ab9a49c410d8c6c786ab99e79c529938d894c01433130353dd0fe999111077a", size = 191604, upload-time = "2026-04-25T11:08:32.862Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d0/6127b623aa4cca18d8b7743592b048d689fd6c6e37ff26a22cddf6cd9d7f/xxhash-3.7.0-cp314-cp314-win32.whl", hash = "sha256:040ea63668f9185b92bc74942df09c7e65703deed71431333678fc6e739a9955", size = 31271, upload-time = "2026-04-25T11:08:34.651Z" }, + { url = "https://files.pythonhosted.org/packages/64/4f/44fc4788568004c43921701cbc127f48218a1eede2c9aea231115323564d/xxhash-3.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2a61e2a3fb23c892496d587b470dee7fa1b58b248a187719c65ea8e94ec13257", size = 32284, upload-time = "2026-04-25T11:08:35.987Z" }, + { url = "https://files.pythonhosted.org/packages/6d/77/18bb895eb60a49453d16e17d67990e5caff557c78eafc90ad4e2eabf4570/xxhash-3.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:c7741c7524961d8c0cb4d4c21b28957ff731a3fd5b5cd8b856dc80a40e9e5acc", size = 28701, upload-time = "2026-04-25T11:08:37.767Z" }, + { url = "https://files.pythonhosted.org/packages/45/a0/46f72244570c550fbbb7db1ef554183dd5ebe9136385f30e032b781ae8f6/xxhash-3.7.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:fc84bf7aa7592f31ec63a3e7b11d624f468a3f19f5238cec7282a42e838ab1d7", size = 33646, upload-time = "2026-04-25T11:08:39.109Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3a/453846a7eceea11e75def361eed01ec6a0205b9822c19927ed364ccae7cc/xxhash-3.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9f1563fdc8abfc389748e6932c7e4e99c89a53e4ec37d4563c24fc06f5e5644b", size = 31125, upload-time = "2026-04-25T11:08:40.467Z" }, + { url = "https://files.pythonhosted.org/packages/bd/3e/49434aba738885d512f9e486db1bdd19db28dfa40372b56da26ef7a4e738/xxhash-3.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2d415f18becf6f153046ab6adc97da77e3643a0ee205dae61c4012604113a020", size = 196633, upload-time = "2026-04-25T11:08:41.943Z" }, + { url = "https://files.pythonhosted.org/packages/a4/e9/006cb6127baeb9f8abe6d15e62faa01349f09b34e2bfd65175b2422d026b/xxhash-3.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bb16aa13ed175bc9be5c2491ba031b85a9b51c4ed90e0b3d4ebe63cf3fb54f8e", size = 215899, upload-time = "2026-04-25T11:08:43.645Z" }, + { url = "https://files.pythonhosted.org/packages/27/e4/cc57d72e66df0ae29b914335f1c6dcf61e8f3746ddf0ae3c471aa4f15e00/xxhash-3.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f9fd595f1e5941b3d7863e4774e4b30caa6731fc34b9277da032295aa5656ee5", size = 238116, upload-time = "2026-04-25T11:08:45.698Z" }, + { url = "https://files.pythonhosted.org/packages/af/78/3531d4a3fd8a0038cc6be1f265a69c1b3587f557a10b677dd736de2202c1/xxhash-3.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1295325c5a98d552333fa53dc2b026b0ef0ec9c8e73ca3a952990b4c7d65d459", size = 215012, upload-time = "2026-04-25T11:08:47.355Z" }, + { url = "https://files.pythonhosted.org/packages/b4/f6/259fb1eaaec921f59b17203b0daee69829761226d3b980d5191d7723dd83/xxhash-3.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3573a651d146912da9daa9e29e5fbc45994420daaa9ef1e2fa5823e1dc485513", size = 448534, upload-time = "2026-04-25T11:08:49.149Z" }, + { url = "https://files.pythonhosted.org/packages/7b/16/a66d0eaf6a7e68532c07714361ddc904c663ec940f3b028c1ae4a21a7b9d/xxhash-3.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ec1e080a3d02d94ea9335bfab0e3374b877e25411422c18f51a943fa4b46381", size = 196217, upload-time = "2026-04-25T11:08:50.805Z" }, + { url = "https://files.pythonhosted.org/packages/8d/ef/d2efc7fc51756dc52509109d1a25cefc859d74bc4b19a167b12dbd8c2786/xxhash-3.7.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:84415265192072d8638a3afc3c1bc5995e310570cd9acb54dc46d3939e364fe0", size = 286906, upload-time = "2026-04-25T11:08:52.418Z" }, + { url = "https://files.pythonhosted.org/packages/fc/67/25decd1d4a4018582ec4db2a868a2b7e40640f4adb20dfeb19ac923aa825/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8d4dea659b57443989ef32f4295104fd6912c73d0bf26d1d148bb88a9f159b02", size = 213057, upload-time = "2026-04-25T11:08:54.105Z" }, + { url = "https://files.pythonhosted.org/packages/0d/5d/17651eb29d06786cdc40c60ae3d27d645aa5d61d2eca6237a7ba0b94789b/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:05ece0fe4d9c9c2728912d1981ae1566cfc83a011571b24732cbf76e1fb70dca", size = 243886, upload-time = "2026-04-25T11:08:56.109Z" }, + { url = "https://files.pythonhosted.org/packages/8a/d4/174d9cf7502243d586e6a9ae842b1ae23026620995114f85f1380e588bc9/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:fd880353cf1ffaf321bc18dd663e111976dbd0d3bbd8a66d58d2b470dfa7f396", size = 201015, upload-time = "2026-04-25T11:08:57.777Z" }, + { url = "https://files.pythonhosted.org/packages/91/8c/2254e2d06c3ac5e6fe22eaf3da791b87ea823ae9f2c17b4af66755c5752d/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:4e15cc9e2817f6481160f930c62842b3ff419e20e13072bcbab12230943092bc", size = 213457, upload-time = "2026-04-25T11:08:59.826Z" }, + { url = "https://files.pythonhosted.org/packages/79/a2/e3daa762545921173e3360f3b4ff7fc63c2d27359f7230ec1a7a74e117f6/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:90b9d1a8bd37d768ffc92a1f651ec69afc532a96fa1ac2ea7abbed5d630b3237", size = 277738, upload-time = "2026-04-25T11:09:01.423Z" }, + { url = "https://files.pythonhosted.org/packages/e1/4c/e186da2c46b87f5204640e008d42730bf3c1ee9f0efb71ae1ebcdfeac681/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:157c49475b34ecea8809e51123d9769a534e139d1247942f7a4bc67710bb2533", size = 417127, upload-time = "2026-04-25T11:09:03.592Z" }, + { url = "https://files.pythonhosted.org/packages/17/28/3798e15007a3712d0da3d3fe70f8e11916569858b5cc371053bc26270832/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5a6ddec83325685e729ca119d1f5c518ec39294212ecd770e60693cdc5f7eb79", size = 193962, upload-time = "2026-04-25T11:09:06.228Z" }, + { url = "https://files.pythonhosted.org/packages/ad/95/a26baa93b5241fd7630998816a4ec47a5a0bad193b3f8fc8f3593e1a4a67/xxhash-3.7.0-cp314-cp314t-win32.whl", hash = "sha256:a04a6cab47e2166435aaf5b9e5ee41d1532cc8300efdef87f2a4d0acb7db19ed", size = 31643, upload-time = "2026-04-25T11:09:08.153Z" }, + { url = "https://files.pythonhosted.org/packages/44/36/5454f13c447e395f9b06a3e91274c59f503d31fad84e1836efe3bdb71f6a/xxhash-3.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8653dd7c2eda020545bb2c71c7f7039b53fe7434d0fc1a0a9deb79ab3f1a4fc1", size = 32522, upload-time = "2026-04-25T11:09:09.534Z" }, + { url = "https://files.pythonhosted.org/packages/74/35/698e7e3ff38e22992ea24870a511d8762474fb6783627a2910ff22a185c2/xxhash-3.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:468f0fc114faaa4b36699f8e328bbc3bb11dc418ba94ac52c26dd736d4b6c637", size = 28807, upload-time = "2026-04-25T11:09:11.234Z" }, ] [[package]] @@ -6203,9 +6910,9 @@ wheels = [ [[package]] name = "zipp" -version = "3.23.0" +version = "3.23.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +sdist = { url = "https://files.pythonhosted.org/packages/30/21/093488dfc7cc8964ded15ab726fad40f25fd3d788fd741cc1c5a17d78ee8/zipp-3.23.1.tar.gz", hash = "sha256:32120e378d32cd9714ad503c1d024619063ec28aad2248dc6672ad13edfa5110", size = 25965, upload-time = "2026-04-13T23:21:46.6Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, + { url = "https://files.pythonhosted.org/packages/08/8a/0861bec20485572fbddf3dfba2910e38fe249796cb73ecdeb74e07eeb8d3/zipp-3.23.1-py3-none-any.whl", hash = "sha256:0b3596c50a5c700c9cb40ba8d86d9f2cc4807e9bedb06bcdf7fac85633e444dc", size = 10378, upload-time = "2026-04-13T23:21:45.386Z" }, ]