diff --git a/.github/workflows/documentation-upload-pr.yml b/.github/workflows/documentation-upload-pr.yml index 927f70597..85a6ba934 100644 --- a/.github/workflows/documentation-upload-pr.yml +++ b/.github/workflows/documentation-upload-pr.yml @@ -33,7 +33,7 @@ jobs: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' && github.repository == 'huggingface/lerobot' - uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@9ad2de8582b56c017cb530c1165116d40433f1c6 # main + uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@2430c1ec91d04667414e2fa31ecfc36c153ea391 # main with: package_name: lerobot secrets: diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 6efa1273e..e55ef414a 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -55,7 +55,7 @@ jobs: github.repository == 'huggingface/lerobot' permissions: contents: read - uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@90b4ee2c10b81b5c1a6367c4e6fc9e2fb510a7e3 # main + uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@2430c1ec91d04667414e2fa31ecfc36c153ea391 # main with: commit_sha: ${{ github.sha }} package: lerobot @@ -78,7 +78,7 @@ jobs: permissions: contents: read pull-requests: write - uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@90b4ee2c10b81b5c1a6367c4e6fc9e2fb510a7e3 # main + uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@2430c1ec91d04667414e2fa31ecfc36c153ea391 # main with: commit_sha: ${{ github.event.pull_request.head.sha }} pr_number: ${{ github.event.number }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 4dc119b5e..e55410cdf 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -24,14 +24,14 @@ on: env: CLOSE_ISSUE_MESSAGE: > - This issue was closed because it has been stalled for 14 days with no activity. + This issue was closed because it has been stalled for 30 days with no activity. Feel free to reopen if is still relevant, or to ping a collaborator if you have any questions. CLOSE_PR_MESSAGE: > - This PR was closed because it has been stalled for 21 days with no activity. + This PR was closed because it has been stalled for 30 days with no activity. Feel free to reopen if is still relevant, or to ping a collaborator if you have any questions. WARN_ISSUE_MESSAGE: > This issue has been automatically marked as stale because it has not had - recent activity (6 months). It will be closed if no further activity occurs. + recent activity (1 year). It will be closed if no further activity occurs. Any change, comment or update to this issue will reset this count. Thank you for your contributions. WARN_PR_MESSAGE: > @@ -59,10 +59,10 @@ jobs: stale-pr-label: stale exempt-issue-labels: never-stale exempt-pr-labels: never-stale - days-before-issue-stale: 180 - days-before-issue-close: 14 + days-before-issue-stale: 365 + days-before-issue-close: 30 days-before-pr-stale: 365 - days-before-pr-close: 21 + days-before-pr-close: 30 delete-branch: true close-issue-message: ${{ env.CLOSE_ISSUE_MESSAGE }} close-pr-message: ${{ env.CLOSE_PR_MESSAGE }} diff --git a/AGENTS.md b/AGENTS.md index c1aba7471..bd1bf0af1 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,5 +1,7 @@ This file provides guidance to AI agents when working with code in this repository. +> **User-facing help → [`AGENT_GUIDE.md`](./AGENT_GUIDE.md)** (SO-101 setup, recording, picking a policy, training duration, eval — with copy-pasteable commands). + ## Project Overview LeRobot is a PyTorch-based library for real-world robotics, providing datasets, pretrained policies, and tools for training, evaluation, data collection, and robot control. It integrates with Hugging Face Hub for model/dataset sharing. diff --git a/AGENT_GUIDE.md b/AGENT_GUIDE.md new file mode 100644 index 000000000..725948dc9 --- /dev/null +++ b/AGENT_GUIDE.md @@ -0,0 +1,410 @@ +# AGENT_GUIDE.md — LeRobot Helper for AI Agents & Users + +This file is a practical, copy-paste-friendly companion for any AI agent (Cursor, Claude, ChatGPT, Codex, etc.) helping a user work with LeRobot. It complements [`AGENTS.md`](./AGENTS.md) (dev/contributor context) with **user-facing guidance**: how to start, what to train, how long, how to record, and how to calibrate an SO-101. + +--- + +## 1. Start here — ask the user first (MANDATORY) + +Before suggesting any command, an agent MUST ask the user at least these questions and wait for answers: + +1. **What's your goal?** (e.g. "teach my SO-101 to fold a cloth", "train a policy on an existing HF dataset", "contribute a PR", "understand the codebase") +2. **What hardware do you have?** + - Robot: none / SO-100 / SO-101 / Koch / LeKiwi / Reachy / other + - Teleop: leader arm / phone / keyboard / gamepad / none + - Cameras: how many, resolution, fixed or moving? +3. **What machine will you train on?** + - GPU model + VRAM (e.g. "laptop 3060 6 GB", "RTX 4090 24 GB", "A100 80 GB", "CPU only") + - OS: macOS / Linux / Windows +4. **Skill level & time budget?** First time, some ML, experienced? Hours, days, a weekend? +5. **Do you already have a dataset?** Yes (HF repo id?) / no / want to record one +6. **How can I help right now?** (pick one concrete next step) + +Only after you have answers, propose a concrete path. If something is ambiguous, ask again rather than guessing. Bias toward **the simplest thing that works** for the user's hardware and goal. + +--- + +## 2. LeRobot in 60 seconds + +LeRobot = **datasets + policies + envs + robot control**, unified by a small set of strong abstractions. + +- **`LeRobotDataset`** — episode-aware dataset (video or images + actions + state), loadable from the Hub or disk. +- **Policies** (`ACT`, `Diffusion`, `SmolVLA`, `π0`, `π0.5`, `Wall-X`, `X-VLA`, `VQ-BeT`, `TD-MPC`, …) — all inherit `PreTrainedPolicy` and can be pushed/pulled from the Hub. +- **Processors** — small composable transforms between dataset → policy → robot. +- **Envs** (sim) and **Robots** (real) — same action/observation contract so code swaps cleanly. +- **CLI** — `lerobot-record`, `lerobot-train`, `lerobot-eval`, `lerobot-teleoperate`, `lerobot-calibrate`, `lerobot-find-port`, `lerobot-setup-motors`, `lerobot-replay`. + +See [`AGENTS.md`](./AGENTS.md) for repo architecture. + +--- + +## 3. Quickstart paths (pick one) + +### Path A — "I have an SO-101 and want my first trained policy" + +Go to §4 (SO-101 end-to-end), then §5 (data tips), then §6 (pick a policy — likely **ACT**), then §7 (how long), then §8 (eval). + +### Path B — "No hardware, I want to train on an existing dataset" + +Skip §4. Pick a policy in §6, pick a duration in §7, then run `lerobot-train` per §4.9 with a Hub `--dataset.repo_id` and an `--env.type` for eval. Finish with §8. + +### Path C — "I just want to understand the codebase" + +Read §2 above, then `AGENTS.md` "Architecture", then open `src/lerobot/policies/act/` and `src/lerobot/datasets/lerobot_dataset.py` as canonical examples. + +--- + +## 4. SO-101 end-to-end cheat-sheet + +Full details in [`docs/source/so101.mdx`](./docs/source/so101.mdx) and [`docs/source/il_robots.mdx`](./docs/source/il_robots.mdx). Minimum commands in order. Confirm arms are assembled + powered before issuing. + +**4.1 Install** + +```bash +pip install 'lerobot[feetech]' # SO-100/SO-101 motor stack +# pip install 'lerobot[all]' # everything +# pip install 'lerobot[aloha,pusht]' # specific features +# pip install 'lerobot[smolvla]' # add SmolVLA deps +git lfs install && git lfs pull +hf auth login # required to push datasets/policies +``` + +Contributors can alternatively use `uv sync --locked --extra feetech` (see `AGENTS.md`). + +**4.2 Find USB ports** — run once per arm, unplug when prompted. + +```bash +lerobot-find-port +``` + +macOS: `/dev/tty.usbmodem...`; Linux: `/dev/ttyACM0` (may need `sudo chmod 666 /dev/ttyACM0`). + +**4.3 Setup motor IDs & baudrate** (one-time, per arm) + +```bash +lerobot-setup-motors --robot.type=so101_follower --robot.port= +lerobot-setup-motors --teleop.type=so101_leader --teleop.port= +``` + +**4.4 Calibrate** — center all joints, press Enter, sweep each joint through its full range. The `id` is the calibration key — reuse it everywhere. + +```bash +lerobot-calibrate --robot.type=so101_follower --robot.port= --robot.id=my_follower +lerobot-calibrate --teleop.type=so101_leader --teleop.port= --teleop.id=my_leader +``` + +**4.5 Teleoperate** (sanity check, no recording) + +```bash +lerobot-teleoperate \ + --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --teleop.type=so101_leader --teleop.port= --teleop.id=my_leader \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --display_data=true +``` + +> **Feetech timeout / comms error on SO-100 / SO-101?** Before touching software, check the **red motor LEDs** on the daisy chain. +> +> - **All steady red, gripper → base chain** → wiring OK. +> - **One or more motors dark / chain stops mid-way** → wiring issue: reseat the 3-pin cables, check the controller-board power supply, and make sure each motor is fully clicked in. +> - **LEDs blinking** → the motor is in an **error state**: usually overload (forcing a joint past its limit) **or wrong power supply voltage**. SO-100 / SO-101 ship in two variants — a **5 V / 7.4 V** build and a **12 V** build — they are NOT interchangeable. Using a 12 V PSU on a 5 V / 7.4 V arm (or vice-versa) will trip this error; confirm your motor variant before powering up. +> +> Most "timeout" errors are physical, not code. + +**4.6 Record a dataset** — keys: **→** next, **←** redo, **ESC** finish & upload. + +```bash +HF_USER=$(NO_COLOR=1 hf auth whoami | awk -F': *' 'NR==1 {print $2}') + +lerobot-record \ + --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --teleop.type=so101_leader --teleop.port= --teleop.id=my_leader \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/my_task \ + --dataset.single_task="" \ + --dataset.num_episodes=50 \ + --dataset.episode_time_s=30 \ + --dataset.reset_time_s=10 \ + --display_data=true +``` + +**4.7 Visualize** — **always** do this before training. Look for missing frames, camera blur, unreachable targets, inconsistent object positions. +After upload: https://huggingface.co/spaces/lerobot/visualize_dataset → paste `${HF_USER}/my_task`. Works for **any LeRobot-formatted Hub dataset** — use it to scout other datasets, inspect episode quality, or debug your own data before retraining. + +**4.8 Replay an episode** (sanity check) + +```bash +lerobot-replay --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --dataset.repo_id=${HF_USER}/my_task --dataset.episode=0 +``` + +**4.9 Train** (default: ACT — fastest, lowest memory). Apple silicon: `--policy.device=mps`. See §6/§7 for policy and duration. + +```bash +lerobot-train \ + --dataset.repo_id=${HF_USER}/my_task \ + --policy.type=act \ + --policy.device=cuda \ + --output_dir=outputs/train/act_my_task \ + --job_name=act_my_task \ + --batch_size=8 \ + --wandb.enable=true \ + --policy.repo_id=${HF_USER}/act_my_task +``` + +**4.10 Evaluate on the real robot** — compare success rate to a teleoperated baseline. + +```bash +lerobot-record \ + --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/eval_my_task \ + --dataset.single_task="" \ + --dataset.num_episodes=10 \ + --policy.path=${HF_USER}/act_my_task +``` + +--- + +## 5. Data collection tips (beginner → reliable policy) + +Good data beats clever models. Adopt these defaults and deviate only with evidence. + +### 5.1 Setup & ergonomics + +- **Fix the rig and cameras** before touching the software. If the rig vibrates or the operator gets frustrated, fix that first — more bad data won't help. +- **Lighting matters more than resolution.** Diffuse, consistent light. Avoid moving shadows. +- **"Can you do the task from the camera view alone?"** If no, your cameras are wrong. Fix before recording. +- Enable **action interpolation** for rollouts when available for smoother trajectories. + +### 5.2 Practice before you record + +- Do 5–10 demos without recording. Build a deliberate, repeatable strategy. +- Hesitant or inconsistent demos teach the model hesitation. + +### 5.3 Quality over speed + +Deliberate, high-quality execution beats fast sloppy runs. Optimize for speed only **after** strategy is dialed in — never trade quality for it. + +### 5.4 Consistency within and across episodes + +Same grasp, approach vector, and timing. Coherent strategies are much easier to learn than wildly varying movements. + +### 5.5 Start small, then extend (the golden rule) + +- **First 50 episodes = constrained version** of the task: one object, fixed position, fixed camera setup, one operator. +- Train a quick ACT model. See what fails. +- **Then add diversity** along one axis at a time: more positions → more lighting → more objects → more operators. +- Don't try to collect the "perfect dataset" on day one. Iterate. + +### 5.6 Policy choice for beginners + +- **Laptop / first time / want results fast → ACT.** Works surprisingly well, trains fast even on a laptop GPU. +- **Bigger GPU / language-conditioned / multi-task → SmolVLA.** Unfreezing the vision encoder (see §7) is a big win here. +- Defer π0 / π0.5 / Wall-X / X-VLA until you have a proven ACT baseline and a 20+ GB GPU. + +### 5.7 Recommended defaults for your first task + +| Setting | Value | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| Episodes | **50** to start, scale to 100–300 after first training | +| Episode length | 20–45 s (shorter is fine for grasp/place) | +| Reset time | 10 s | +| FPS | 30 | +| Cameras | **2 cameras recommended**: 1 fixed front + 1 wrist. Multi-view often outperforms single-view. A single fixed camera also works to keep things simple. | +| Task description | Short, specific, action-phrased sentence | + +### 5.8 Troubleshooting signal + +- Policy fails at one specific stage → record 10–20 more episodes **targeting that stage**. +- Policy flaps / oscillates → likely inconsistent demos, or need more training; re-record worst episodes (use **←** to redo). +- Policy ignores the object → camera framing or lighting issue, not a model issue. + +See also: [What makes a good dataset](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset). + +--- + +## 6. Which policy should I train? + +Match the policy to the user's **GPU memory** and **time budget**. Numbers below come from an internal profiling run (one training update per policy). They are **indicative only** — see caveats. + +### 6.1 Profiling snapshot (indicative) + +All policies typically train for **5–10 epochs** (see §7). + +| Policy | Batch | Update (ms) | Peak GPU mem (GB) | Best for | +| ----------- | ----: | ----------: | ----------------: | ------------------------------------------------------------------------------------------------ | +| `act` | 4 | **83.9** | **0.94** | First-time users, laptops, single-task. Fast and reliable. | +| `diffusion` | 4 | 168.6 | 4.94 | Multi-modal action distributions; needs mid-range GPU. | +| `smolvla` | 1 | 357.8 | 3.93 | Language-conditioned, multi-task, small VLA. **Unfreeze vision encoder for big gains** (see §7). | +| `xvla` | 1 | 731.6 | 15.52 | Large VLA, multi-task. | +| `wall_x` | 1 | 716.5 | 15.95 | Large VLA with world-model objective. | +| `pi0` | 1 | 940.3 | 15.50 | Strong large VLA baseline (Physical Intelligence). | +| `pi05` | 1 | 1055.8 | 16.35 | Newer π policy; similar footprint to `pi0`. | + +**Critical caveats:** + +- **Optimizer:** measured with **SGD**. LeRobot's default is **AdamW**, which keeps extra optimizer state → **peak memory will be noticeably higher** with the default, especially for `pi0`, `pi05`, `wall_x`, `xvla`. +- **Batch size:** the large policies were profiled at batch 1. In practice use a **larger batch** for stable training (see §7.4). Memory scales roughly linearly with batch. + +### 6.2 Decision rules + +- **< 8 GB VRAM (laptop, 3060, M-series Mac):** → `act`. Maybe `diffusion` if you have ~6–8 GB free. +- **12–16 GB VRAM (4070/4080, A4000):** → `smolvla` with defaults, or `act`/`diffusion` with larger batch. `pi0`/`pi05`/`wall_x`/`xvla` feasible only with small batch + gradient accumulation. +- **24+ GB VRAM (3090/4090/A5000):** → any policy. Prefer `smolvla` (unfrozen) for multi-task; `act` for single-task grasp-and-place (still often the best ROI). Could experiment with `pi0` or `pi05` or `xvla` +- **80 GB (A100/H100):** → any, with healthy batch. `pi05`, `xvla`, `wall_x` become comfortable. +- **CPU only:** → don't train here. Use Google Colab (see [`docs/source/notebooks.mdx`](./docs/source/notebooks.mdx)) or a rented GPU. + +--- + +## 7. How long should I train? + +Robotics imitation learning usually converges in a **few epochs over the dataset**, not hundreds of thousands of raw steps. Think **epochs first**, then translate to steps. + +### 7.1 Rule of thumb + +- **Typical total: 5–10 epochs.** Start at 5, eval, then decide if more helps. +- Very small datasets (< 30 episodes) may want slightly more epochs — but first, **collect more data**. +- VLAs with a pretrained vision backbone typically need **fewer** epochs than training from scratch. + +### 7.2 Steps ↔ epochs conversion + +``` +total_frames = sum of frames over all episodes # e.g. 50 eps × 30 fps × 30 s ≈ 45,000 +steps_per_epoch = ceil(total_frames / batch_size) +total_steps = epochs × steps_per_epoch +``` + +Examples for `--batch_size=8`: + +| Dataset size | Frames | Steps / epoch | 5 epochs | 10 epochs | +| ----------------------- | ------: | ------------: | -------: | --------: | +| 50 eps × 30 s @ 30 fps | 45,000 | ~5,625 | 28k | 56k | +| 100 eps × 30 s @ 30 fps | 90,000 | ~11,250 | 56k | 113k | +| 300 eps × 30 s @ 30 fps | 270,000 | ~33,750 | 169k | 338k | + +Pass the resulting total with `--steps=`; eval at intermediate checkpoints (`outputs/train/.../checkpoints/`). + +### 7.3 Per-policy starting points (single-task, ~50 episodes) + +| Policy | Batch | Steps (first run) | Notes | +| -------------- | ----: | ----------------: | ----------------------------------------------------------------- | +| `act` | 8–16 | 30k–80k | Usually converges under 50k for single-task. | +| `diffusion` | 8–16 | 80k–150k | Benefits from longer training than ACT. | +| `smolvla` | 4–8 | 30k–80k | Pretrained VLM → converges fast. | +| `pi0` / `pi05` | 1–4 | 30k–80k | Memory-bound; use gradient accumulation for effective batch ≥ 16! | + +### 7.4 Batch size guidance + +- **Bigger batch is preferable** for stable gradients on teleop data. +- If GPU memory is the bottleneck, use **gradient accumulation** to raise _effective_ batch without raising peak memory. +- Scale **learning rate** gently with batch; most LeRobot defaults work fine for a 2–4× batch change. + +### 7.5 Scale LR schedule & checkpoints with `--steps` + +LeRobot's default schedulers (e.g. SmolVLA's cosine decay) use `scheduler_decay_steps=30_000`, which is sized for long training runs. When you shorten training (e.g. 5k–10k steps on a small dataset), **scale the scheduler down to match** — otherwise the LR stays near the peak and never decays. Same for checkpoint frequency. + +```bash +lerobot-train ... \ + --steps=5000 \ + --policy.scheduler_decay_steps=5000 \ + --save_freq=5000 +``` + +Rule of thumb: set `scheduler_decay_steps ≈ steps`, and `save_freq` to whatever granularity you want for eval (e.g. every 1k–5k steps). Match `scheduler_warmup_steps` proportionally if your run is very short. + +### 7.6 SmolVLA: unfreeze the vision encoder for real gains + +SmolVLA ships with `freeze_vision_encoder=True`. Unfreezing usually **improves performance substantially** on specialized tasks, at the cost of more VRAM and slower steps. Enable with: + +```bash +lerobot-train ... --policy.type=smolvla \ + --policy.freeze_vision_encoder=false \ + --policy.train_expert_only=false +``` + +### 7.7 Signals to stop / keep going + +- Train loss plateaus → stop, save a Hub checkpoint. +- Train loss still dropping and you're under 10 epochs → keep going. + +--- + +## 8. Evaluation & benchmarks + +Two flavors of evaluation: + +### 8.1 Real-robot eval (SO-101, etc.) + +Reuse `lerobot-record` with `--policy.path` to run the trained policy on-robot and save the run as an eval dataset. Convention: prefix the dataset with `eval_`. + +```bash +lerobot-record \ + --robot.type=so101_follower --robot.port= --robot.id=my_follower \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/eval_my_task \ + --dataset.single_task="" \ + --dataset.num_episodes=10 \ + --policy.path=${HF_USER}/act_my_task +``` + +Report success rate across episodes. Compare to a teleoperated baseline and to an earlier checkpoint to catch regressions. + +### 8.2 Sim-benchmark eval + +For policies trained on sim datasets (PushT, Aloha, LIBERO, MetaWorld, RoboCasa, …) use `lerobot-eval` against the matching `env.type`: + +```bash +lerobot-eval \ + --policy.path=${HF_USER}/diffusion_pusht \ + --env.type=pusht \ + --eval.n_episodes=50 \ + --eval.batch_size=10 \ + --policy.device=cuda +``` + +- Use `--policy.path=outputs/train/.../checkpoints//pretrained_model` for local checkpoints. +- `--eval.n_episodes` should be ≥ 50 for a stable success-rate estimate. +- Available envs live in `src/lerobot/envs/`. See [`docs/source/libero.mdx`](./docs/source/libero.mdx), [`metaworld.mdx`](./docs/source/metaworld.mdx), [`robocasa.mdx`](./docs/source/robocasa.mdx), [`vlabench.mdx`](./docs/source/vlabench.mdx) for specific benchmarks. +- To add a new benchmark, see [`docs/source/adding_benchmarks.mdx`](./docs/source/adding_benchmarks.mdx) and [`envhub.mdx`](./docs/source/envhub.mdx). + +### 8.2b Dockerfiles for benchmark eval + +Benchmark envs have native dependencies that are painful to install locally. The repo ships **pre-baked Dockerfiles** for each supported benchmark — use these to run `lerobot-eval` in a reproducible environment: + +| Benchmark | Dockerfile | +| ----------- | -------------------------------------------------------------------------------------- | +| LIBERO | [`docker/Dockerfile.benchmark.libero`](./docker/Dockerfile.benchmark.libero) | +| LIBERO+ | [`docker/Dockerfile.benchmark.libero_plus`](./docker/Dockerfile.benchmark.libero_plus) | +| MetaWorld | [`docker/Dockerfile.benchmark.metaworld`](./docker/Dockerfile.benchmark.metaworld) | +| RoboCasa | [`docker/Dockerfile.benchmark.robocasa`](./docker/Dockerfile.benchmark.robocasa) | +| RoboCerebra | [`docker/Dockerfile.benchmark.robocerebra`](./docker/Dockerfile.benchmark.robocerebra) | +| RoboMME | [`docker/Dockerfile.benchmark.robomme`](./docker/Dockerfile.benchmark.robomme) | +| RoboTwin | [`docker/Dockerfile.benchmark.robotwin`](./docker/Dockerfile.benchmark.robotwin) | +| VLABench | [`docker/Dockerfile.benchmark.vlabench`](./docker/Dockerfile.benchmark.vlabench) | + +Build and run (adapt to your benchmark): + +```bash +docker build -f docker/Dockerfile.benchmark.robomme -t lerobot-bench-robomme . +docker run --gpus all --rm -it \ + -v $HOME/.cache/huggingface:/root/.cache/huggingface \ + lerobot-bench-robomme \ + lerobot-eval --policy.path= --env.type= --eval.n_episodes=50 +``` + +See [`docker/README.md`](./docker/README.md) for base-image details. + +### 8.3 Target success rates + +Single-task grasp-and-place with 50 clean episodes: ACT should reach **> 70% success** on the training configuration. Less → data problem (see §5), not model problem. Expect a drop when generalizing to new positions — scale episodes or diversity to recover. + +--- + +## 9. Further reading & resources + +- **Getting started:** [`installation.mdx`](./docs/source/installation.mdx) · [`il_robots.mdx`](./docs/source/il_robots.mdx) · [What makes a good dataset](https://huggingface.co/blog/lerobot-datasets) +- **Per-policy docs:** browse [`docs/source/*.mdx`](./docs/source/) (policies, hardware, benchmarks, advanced training). +- **Community:** [Discord](https://discord.com/invite/s3KuuzsPFb) · [Hub `LeRobot` tag](https://huggingface.co/datasets?other=LeRobot) · [Dataset visualizer](https://huggingface.co/spaces/lerobot/visualize_dataset) + +> Keep this file current. If you learn a rule that would prevent a class of user mistakes, add it here and in [`AGENTS.md`](./AGENTS.md). diff --git a/MANIFEST.in b/MANIFEST.in index c1fce3b5a..650e9d48f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,4 @@ include src/lerobot/templates/lerobot_modelcard_template.md +include src/lerobot/templates/lerobot_rewardmodel_modelcard_template.md include src/lerobot/datasets/card_template.md include src/lerobot/envs/metaworld_config.json diff --git a/docker/Dockerfile.internal b/docker/Dockerfile.internal index 6e4550933..c48e61bbb 100644 --- a/docker/Dockerfile.internal +++ b/docker/Dockerfile.internal @@ -18,9 +18,8 @@ # docker build -f docker/Dockerfile.internal -t lerobot-internal . # Configure the base image for CI with GPU access -# TODO(Steven): Bump these versions -ARG CUDA_VERSION=12.4.1 -ARG OS_VERSION=22.04 +ARG CUDA_VERSION=12.6.3 +ARG OS_VERSION=24.04 FROM nvidia/cuda:${CUDA_VERSION}-base-ubuntu${OS_VERSION} # Define Python version argument @@ -36,16 +35,13 @@ ENV DEBIAN_FRONTEND=noninteractive \ # Install Python, system dependencies, and uv (as root) RUN apt-get update && apt-get install -y --no-install-recommends \ - software-properties-common build-essential git curl \ - libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \ + build-essential git curl \ + libglib2.0-0 libgl1 libegl1 ffmpeg \ libusb-1.0-0-dev speech-dispatcher libgeos-dev portaudio19-dev \ cmake pkg-config ninja-build \ - && add-apt-repository -y ppa:deadsnakes/ppa \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - python${PYTHON_VERSION} \ - python${PYTHON_VERSION}-venv \ - python${PYTHON_VERSION}-dev \ + python${PYTHON_VERSION} \ + python${PYTHON_VERSION}-venv \ + python${PYTHON_VERSION}-dev \ && curl -LsSf https://astral.sh/uv/install.sh | sh \ && mv /root/.local/bin/uv /usr/local/bin/uv \ && useradd --create-home --shell /bin/bash user_lerobot \ diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 42ea06d7d..690a7b4a4 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -63,6 +63,8 @@ title: SARM title: "Reward Models" - sections: + - local: inference + title: Policy Deployment (lerobot-rollout) - local: async title: Use Async Inference - local: rtc diff --git a/docs/source/hil_data_collection.mdx b/docs/source/hil_data_collection.mdx index c4839577f..ba68959d1 100644 --- a/docs/source/hil_data_collection.mdx +++ b/docs/source/hil_data_collection.mdx @@ -50,30 +50,30 @@ This process can be repeated iteratively: deploy, collect, fine-tune, repeat. Ea ### Teleoperator Requirements -The `examples/hil` HIL scripts require **teleoperators with active motors** that can: +The `lerobot-rollout --strategy.type=dagger` mode requires **teleoperators with active motors** that can: - Enable/disable torque programmatically - Move to target positions (to mirror the robot state when pausing) -**Compatible teleoperators in the current `examples/hil` scripts:** +**Compatible teleoperators:** - `openarm_mini` - OpenArm Mini - `so_leader` - SO100 / SO101 leader arm > [!IMPORTANT] -> The provided `examples/hil` commands default to `bi_openarm_follower` + `openarm_mini`. +> The provided commands default to `bi_openarm_follower` + `openarm_mini`. > `so_follower` + `so_leader` configs are also registered and can be used via CLI flags. --- ## Script -A single script handles both synchronous and RTC-based inference. Toggle RTC with `--rtc.enabled=true`: +Use `lerobot-rollout` with `--strategy.type=dagger` for HIL data collection. Select the inference backend with `--inference.type=sync|rtc`: -| Mode | Flag | Models | -| ------------------------ | -------------------- | --------------------- | -| Standard (default) | _(no flag needed)_ | ACT, Diffusion Policy | -| Real-Time Chunking (RTC) | `--rtc.enabled=true` | Pi0, Pi0.5, SmolVLA | +| Mode | Flag | Models | +| ------------------------ | ---------------------- | --------------------- | +| Standard (default) | _(no flag needed)_ | ACT, Diffusion Policy | +| Real-Time Chunking (RTC) | `--inference.type=rtc` | Pi0, Pi0.5, SmolVLA | --- @@ -97,7 +97,7 @@ python src/lerobot/scripts/lerobot_train.py \ **Standard inference (ACT, Diffusion Policy):** ```bash -python examples/hil/hil_data_collection.py \ +lerobot-rollout --strategy.type=dagger \ --robot.type=bi_openarm_follower \ --robot.left_arm_config.port=can1 \ --robot.left_arm_config.side=left \ @@ -108,11 +108,10 @@ python examples/hil/hil_data_collection.py \ --teleop.port_left=/dev/ttyACM0 \ --teleop.port_right=/dev/ttyACM1 \ --policy.path=outputs/pretrain/checkpoints/last/pretrained_model \ - --dataset.repo_id=your-username/hil-dataset \ + --dataset.repo_id=your-username/rollout_hil_dataset \ --dataset.single_task="Fold the T-shirt properly" \ --dataset.fps=30 \ - --dataset.episode_time_s=1000 \ - --dataset.num_episodes=50 \ + --strategy.num_episodes=50 \ --interpolation_multiplier=2 ``` @@ -121,11 +120,11 @@ python examples/hil/hil_data_collection.py \ For models with high inference latency, enable RTC for smooth execution: ```bash -python examples/hil/hil_data_collection.py \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --rtc.max_guidance_weight=5.0 \ - --rtc.prefix_attention_schedule=LINEAR \ +lerobot-rollout --strategy.type=dagger \ + --inference.type=rtc \ + --inference.rtc.execution_horizon=20 \ + --inference.rtc.max_guidance_weight=5.0 \ + --inference.rtc.prefix_attention_schedule=LINEAR \ --robot.type=bi_openarm_follower \ --robot.left_arm_config.port=can1 \ --robot.left_arm_config.side=left \ @@ -136,11 +135,10 @@ python examples/hil/hil_data_collection.py \ --teleop.port_left=/dev/ttyACM0 \ --teleop.port_right=/dev/ttyACM1 \ --policy.path=outputs/pretrain/checkpoints/last/pretrained_model \ - --dataset.repo_id=your-username/hil-rtc-dataset \ + --dataset.repo_id=your-username/rollout_hil_rtc_dataset \ --dataset.single_task="Fold the T-shirt properly" \ --dataset.fps=30 \ - --dataset.episode_time_s=1000 \ - --dataset.num_episodes=50 \ + --strategy.num_episodes=50 \ --interpolation_multiplier=3 ``` @@ -235,7 +233,7 @@ This HIL data collection approach builds on ideas from interactive imitation lea - **HG-DAgger** (Kelly et al., 2019) made this practical for robotics: a human expert monitors the robot and only intervenes when needed, rather than labeling every state. The gating between autonomous and human control is exactly the pause → takeover → return-to-policy loop used in the scripts here. -- **RaC** (Hu et al., 2025) scales this loop to long-horizon tasks by explicitly decomposing interventions into **recovery** (teleoperating back to a good state) and **correction** (demonstrating the right behavior from there). This decomposition is the protocol followed by the HIL scripts in `examples/hil`. +- **RaC** (Hu et al., 2025) scales this loop to long-horizon tasks by explicitly decomposing interventions into **recovery** (teleoperating back to a good state) and **correction** (demonstrating the right behavior from there). This decomposition is the protocol followed by the DAgger strategy in `lerobot-rollout`. - **π0.6/RECAP** (Physical Intelligence, 2025) applies the same iterative collect-and-finetune loop at scale with VLA models, showing that even large pretrained policies benefit substantially from targeted human corrections on their own failure modes. π0.6 is trained using RECAP. diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx index 2356a93cc..ff0a6229e 100644 --- a/docs/source/il_robots.mdx +++ b/docs/source/il_robots.mdx @@ -509,121 +509,42 @@ hf upload ${HF_USER}/act_so101_test${CKPT} \ ## Run inference and evaluate your policy -You can use the `record` script from [`lerobot-record`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/lerobot_record.py) with a policy checkpoint as input, to run inference and evaluate your policy. For instance, run this command or API example to run inference and record 10 evaluation episodes: +Use `lerobot-rollout` to deploy a trained policy on your robot. You can choose different strategies depending on your needs: - + ```bash -lerobot-record \ +lerobot-rollout \ + --strategy.type=base \ + --policy.path=${HF_USER}/my_policy \ --robot.type=so100_follower \ --robot.port=/dev/ttyACM1 \ --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ - --robot.id=my_awesome_follower_arm \ - --display_data=false \ - --dataset.repo_id=${HF_USER}/eval_so100 \ - --dataset.single_task="Put lego brick into the transparent box" \ - --dataset.streaming_encoding=true \ - --dataset.encoder_threads=2 \ - # --dataset.vcodec=auto \ - # <- Teleop optional if you want to teleoperate in between episodes \ - # --teleop.type=so100_leader \ - # --teleop.port=/dev/ttyACM0 \ - # --teleop.id=my_awesome_leader_arm \ - --policy.path=${HF_USER}/my_policy + --task="Put lego brick into the transparent box" \ + --duration=60 ``` - - - -```python -from lerobot.cameras.opencv import OpenCVCameraConfig -from lerobot.datasets import LeRobotDataset -from lerobot.utils.feature_utils import hw_to_dataset_features -from lerobot.policies.act import ACTPolicy -from lerobot.policies import make_pre_post_processors -from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig -from lerobot.scripts.lerobot_record import record_loop -from lerobot.common.control_utils import init_keyboard_listener -from lerobot.utils.utils import log_say -from lerobot.utils.visualization_utils import init_rerun - - -NUM_EPISODES = 5 -FPS = 30 -EPISODE_TIME_SEC = 60 -TASK_DESCRIPTION = "My task description" -HF_MODEL_ID = "/" -HF_DATASET_ID = "/" - -# Create the robot configuration -camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} -robot_config = SO100FollowerConfig( - port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config -) - -# Initialize the robot -robot = SO100Follower(robot_config) - -# Initialize the policy -policy = ACTPolicy.from_pretrained(HF_MODEL_ID) - -# Configure the dataset features -action_features = hw_to_dataset_features(robot.action_features, "action") -obs_features = hw_to_dataset_features(robot.observation_features, "observation") -dataset_features = {**action_features, **obs_features} - -# Create the dataset -dataset = LeRobotDataset.create( - repo_id=HF_DATASET_ID, - fps=FPS, - features=dataset_features, - robot_type=robot.name, - use_videos=True, - image_writer_threads=4, -) - -# Initialize the keyboard listener and rerun visualization -_, events = init_keyboard_listener() -init_rerun(session_name="recording") - -# Connect the robot -robot.connect() - -preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=policy, - pretrained_path=HF_MODEL_ID, - dataset_stats=dataset.meta.stats, -) - -for episode_idx in range(NUM_EPISODES): - log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") - - # Run the policy inference loop - record_loop( - robot=robot, - events=events, - fps=FPS, - policy=policy, - preprocessor=preprocessor, - postprocessor=postprocessor, - dataset=dataset, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - ) - - dataset.save_episode() - -# Clean up -robot.disconnect() -dataset.push_to_hub() + +```bash +lerobot-rollout \ + --strategy.type=sentry \ + --strategy.upload_every_n_episodes=5 \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM1 \ + --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/eval_so100 \ + --dataset.single_task="Put lego brick into the transparent box" \ + --duration=600 ``` - - -As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: +The `--strategy.type` flag selects the execution mode: -1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`). -2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`). +- `base`: Autonomous rollout with no data recording (useful for quick evaluation) +- `sentry`: Continuous recording with auto-upload (useful for large-scale evaluation) +- `highlight`: Ring buffer recording with keystroke save (useful for capturing interesting events) +- `dagger`: Human-in-the-loop data collection (see [HIL Data Collection](./hil_data_collection)) + +All strategies support `--inference.type=rtc` for smooth execution with slow VLA models (Pi0, Pi0.5, SmolVLA). diff --git a/docs/source/inference.mdx b/docs/source/inference.mdx new file mode 100644 index 000000000..b2874d823 --- /dev/null +++ b/docs/source/inference.mdx @@ -0,0 +1,261 @@ +# Policy Deployment (lerobot-rollout) + +`lerobot-rollout` is the single CLI for deploying trained policies on real robots. It supports multiple execution strategies and inference backends, from quick evaluation to continuous recording and human-in-the-loop data collection. + +## Quick Start + +No extra dependencies are needed beyond your robot and policy extras. + +```bash +lerobot-rollout \ + --strategy.type=base \ + --policy.path=lerobot/act_koch_real \ + --robot.type=koch_follower \ + --robot.port=/dev/ttyACM0 \ + --task="pick up cube" \ + --duration=30 +``` + +This runs the policy for 30 seconds with no recording. + +--- + +## Strategies + +Select a strategy with `--strategy.type=`. Each strategy defines a different control loop with its own recording and interaction semantics. + +### Base (`--strategy.type=base`) + +Autonomous policy execution with no data recording. Use this for quick evaluation, demos, or when you only need to observe the robot. + +```bash +lerobot-rollout \ + --strategy.type=base \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --task="Put lego brick into the box" \ + --duration=60 +``` + +| Flag | Description | +| ---------------- | ------------------------------------------------------ | +| `--duration` | Run time in seconds (0 = infinite) | +| `--task` | Task description passed to the policy | +| `--display_data` | Stream observations/actions to Rerun for visualization | + +### Sentry (`--strategy.type=sentry`) + +Continuous autonomous recording with periodic upload to the Hugging Face Hub. Episode boundaries are auto-computed from camera resolution and FPS so each saved episode produces a complete video file, keeping uploads efficient. + +Policy state (hidden state, RTC queue) persists across episode boundaries: the robot does not reset between episodes. + +```bash +lerobot-rollout \ + --strategy.type=sentry \ + --strategy.upload_every_n_episodes=5 \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --dataset.repo_id=${HF_USER}/rollout_eval_data \ + --dataset.single_task="Put lego brick into the box" \ + --duration=3600 +``` + +| Flag | Description | +| -------------------------------------- | ----------------------------------------------------------- | +| `--strategy.upload_every_n_episodes` | Push to Hub every N episodes (default: 5) | +| `--strategy.target_video_file_size_mb` | Target video file size for episode rotation (default: auto) | +| `--dataset.repo_id` | **Required.** Hub repository for the recorded dataset | +| `--dataset.push_to_hub` | Whether to push to Hub on teardown (default: true) | + +### Highlight (`--strategy.type=highlight`) + +Autonomous rollout with on-demand recording via a memory-bounded ring buffer. The robot runs continuously while the buffer captures the last N seconds of telemetry. Press the save key to flush the buffer and start live recording; press it again to save the episode. + +```bash +lerobot-rollout \ + --strategy.type=highlight \ + --strategy.ring_buffer_seconds=30 \ + --strategy.save_key=s \ + --strategy.push_key=h \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=koch_follower \ + --robot.port=/dev/ttyACM0 \ + --dataset.repo_id=${HF_USER}/rollout_highlight_data \ + --dataset.single_task="Pick up the red cube" +``` + +**Keyboard controls:** + +| Key | Action | +| ------------------ | -------------------------------------------------------- | +| `s` (configurable) | Start recording (flushes buffer) / stop and save episode | +| `h` (configurable) | Push dataset to Hub | +| `ESC` | Stop the session | + +| Flag | Description | +| -------------------------------------- | ---------------------------------------------- | +| `--strategy.ring_buffer_seconds` | Duration of buffered telemetry (default: 30) | +| `--strategy.ring_buffer_max_memory_mb` | Memory cap for the ring buffer (default: 2048) | +| `--strategy.save_key` | Key to toggle recording (default: `s`) | +| `--strategy.push_key` | Key to push to Hub (default: `h`) | + +### DAgger (`--strategy.type=dagger`) + +Human-in-the-loop data collection. Alternates between autonomous policy execution and human intervention via a teleoperator. Intervention frames are tagged with `intervention=True`. Requires a teleoperator (`--teleop.type`). + +See the [Human-In-the-Loop Data Collection](./hil_data_collection) guide for a detailed walkthrough. + +**Corrections-only mode** (default): Only human correction windows are recorded. Each correction becomes one episode. + +```bash +lerobot-rollout \ + --strategy.type=dagger \ + --strategy.num_episodes=20 \ + --policy.path=outputs/pretrain/checkpoints/last/pretrained_model \ + --robot.type=bi_openarm_follower \ + --teleop.type=openarm_mini \ + --dataset.repo_id=${HF_USER}/rollout_hil_data \ + --dataset.single_task="Fold the T-shirt" +``` + +**Continuous recording mode** (`--strategy.record_autonomous=true`): Both autonomous and correction frames are recorded with time-based episode rotation (same as Sentry). + +```bash +lerobot-rollout \ + --strategy.type=dagger \ + --strategy.record_autonomous=true \ + --strategy.num_episodes=50 \ + --policy.path=${HF_USER}/my_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --teleop.type=so101_leader \ + --teleop.port=/dev/ttyACM1 \ + --dataset.repo_id=${HF_USER}/rollout_dagger_data \ + --dataset.single_task="Grasp the block" +``` + +**Keyboard controls** (default input device): + +| Key | Action | +| ------- | ------------------------------------------- | +| `Space` | Pause / resume policy execution | +| `Tab` | Start / stop human correction | +| `Enter` | Push dataset to Hub (corrections-only mode) | +| `ESC` | Stop the session | + +Foot pedal input is also supported via `--strategy.input_device=pedal`. Configure pedal codes with `--strategy.pedal.*` flags. + +| Flag | Description | +| ------------------------------------ | ------------------------------------------------------- | +| `--strategy.num_episodes` | Number of correction episodes to record (default: 10) | +| `--strategy.record_autonomous` | Record autonomous frames too (default: false) | +| `--strategy.upload_every_n_episodes` | Push to Hub every N episodes (default: 5) | +| `--strategy.input_device` | Input device: `keyboard` or `pedal` (default: keyboard) | +| `--teleop.type` | **Required.** Teleoperator type | + +--- + +## Inference Backends + +Select a backend with `--inference.type=`. All strategies work with both backends. + +### Sync (default) + +One policy call per control tick. The main loop blocks until the action is computed. + +Works with all policies. No extra flags needed. + +### Real-Time Chunking (`--inference.type=rtc`) + +A background thread produces action chunks asynchronously. The main control loop polls for the next ready action while the policy computes the next chunk in parallel. + +Use RTC with large, slow VLA models (Pi0, Pi0.5, SmolVLA) for smooth, continuous motion despite high inference latency. + +```bash +lerobot-rollout \ + --strategy.type=base \ + --inference.type=rtc \ + --inference.rtc.execution_horizon=10 \ + --inference.rtc.max_guidance_weight=10.0 \ + --policy.path=${HF_USER}/pi0_policy \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --task="Pick up the cube" \ + --duration=60 \ + --device=cuda +``` + +| Flag | Description | +| ------------------------------------------- | -------------------------------------------------------------- | +| `--inference.rtc.execution_horizon` | Steps to blend with previous chunk (default: varies by policy) | +| `--inference.rtc.max_guidance_weight` | Consistency enforcement strength (default: varies by policy) | +| `--inference.rtc.prefix_attention_schedule` | Blend schedule: `LINEAR`, `EXP`, `ONES`, `ZEROS` | +| `--inference.queue_threshold` | Max queue size before backpressure (default: 30) | + +See the [Real-Time Chunking](./rtc) guide for details on tuning RTC parameters. + +--- + +## Common Flags + +| Flag | Description | Default | +| --------------------------------- | ----------------------------------------------------------------- | ------- | +| `--policy.path` | **Required.** HF Hub model ID or local checkpoint path | -- | +| `--robot.type` | **Required.** Robot type (e.g. `so100_follower`, `koch_follower`) | -- | +| `--robot.port` | Serial port for the robot | -- | +| `--robot.cameras` | Camera configuration (JSON dict) | -- | +| `--fps` | Control loop frequency | 30 | +| `--duration` | Run time in seconds (0 = infinite) | 0 | +| `--device` | Torch device (`cpu`, `cuda`, `mps`) | auto | +| `--task` | Task description (used when no dataset is provided) | -- | +| `--display_data` | Stream telemetry to Rerun visualization | false | +| `--display_ip` / `--display_port` | Remote Rerun server address | -- | +| `--interpolation_multiplier` | Action interpolation factor | 1 | +| `--use_torch_compile` | Enable `torch.compile` for inference | false | +| `--resume` | Resume a previous recording session | false | +| `--play_sounds` | Vocal synthesis for events | true | + +--- + +## Programmatic Usage + +For custom deployments (e.g. with kinematics processors), use the rollout module API directly: + +```python +from lerobot.rollout import BaseStrategyConfig, RolloutConfig, build_rollout_context +from lerobot.rollout.inference import SyncInferenceConfig +from lerobot.rollout.strategies import BaseStrategy +from lerobot.utils.process import ProcessSignalHandler + +cfg = RolloutConfig( + robot=my_robot_config, + policy=my_policy_config, + strategy=BaseStrategyConfig(), + inference=SyncInferenceConfig(), + fps=30, + duration=60, + task="my task", +) + +signal_handler = ProcessSignalHandler(use_threads=True) +ctx = build_rollout_context( + cfg, + signal_handler.shutdown_event, + robot_action_processor=my_custom_action_processor, # optional + robot_observation_processor=my_custom_obs_processor, # optional +) + +strategy = BaseStrategy(cfg.strategy) +try: + strategy.setup(ctx) + strategy.run(ctx) +finally: + strategy.teardown(ctx) +``` + +See `examples/so100_to_so100_EE/rollout.py` and `examples/phone_to_so100/rollout.py` for full examples with kinematics processors. diff --git a/docs/source/language_and_recipes.mdx b/docs/source/language_and_recipes.mdx index 952b6ef09..cb58c516a 100644 --- a/docs/source/language_and_recipes.mdx +++ b/docs/source/language_and_recipes.mdx @@ -1,7 +1,34 @@ # Language columns and recipes -LeRobot stores reusable language annotations directly next to frame data in `data/chunk-*/file-*.parquet`. -The two optional columns are: +Most LeRobot datasets ship with a single `task` string per episode — fine for +short, single-instruction skills, but not enough for the longer-horizon, +multi-modal robot policies the field is moving toward (high-level planning, +memory, interjections, VQA, tool use). To support those policies without +forking the dataset format, LeRobot extends `LeRobotDataset` with two optional +language columns and a small recipe layer that turns those rows into +chat-style training samples on the fly. + +The design splits cleanly into three layers: + +1. **Data in the dataset** — language annotations stored next to frames in + `data/chunk-*/file-*.parquet` as two optional columns (`language_persistent` + and `language_events`). Datasets without these columns keep their existing + behavior. +2. **Recipe** — a YAML file that declares which annotation rows to bind and + how to lay them out as chat turns (`role`, `content`, optional images, + optional tool calls). Recipes are pure config; no Python required to add a + new one. +3. **Training format** — at sample time, `RenderMessagesStep` resolves the + recipe against the per-frame annotations and emits HF-style `messages` plus + LeRobot-specific sidecars (`message_streams`, `target_message_indices`) + that policy processors consume. + +This page describes each layer in turn. + +## Layer 1 — language columns in the dataset + +The two optional columns live next to frame data in +`data/chunk-*/file-*.parquet`: - `language_persistent`: a list of rows broadcast across every frame in an episode for state that remains active, such as `subtask`, `plan`, and `memory`. - `language_events`: a list of rows only on the exact frame where an event was emitted, such as `interjection`, `vqa`, and speech tool calls. @@ -26,9 +53,9 @@ the validator enforce this via `validate_camera_field(style, camera)`. `meta/tasks.parquet` remains the canonical source for the task. The special `${task}` recipe binding always reads that task string and does not depend on language annotations. -## Architecture +### Architecture -The language stack has three layers: +The language stack itself has three internal modules backing layer 1: 1. `lerobot.datasets.language` defines the schema, style registry, and `column_for_style`. 2. `lerobot.datasets.language_render` resolves rows and renders messages. @@ -36,7 +63,7 @@ The language stack has three layers: `LeRobotDataset` stays recipe-agnostic. It passes `language_persistent` and `language_events` through when present, and unannotated datasets keep their existing behavior. -## Temporal semantics +### Temporal semantics Persistent styles are active after emission until replaced: @@ -52,7 +79,7 @@ Event styles only exist on their exact timestamp: Exact event matching has no tolerance window, so writers must stamp event rows with frame timestamps from the parquet data. -## View-dependent resolution +### View-dependent resolution For view-dependent styles (`vqa`, `motion`, `trace`), the resolver gains a `camera=` filter parallel to `role=` and `tool_name=`. Datasets with multiple @@ -78,9 +105,11 @@ ask_vqa_top: Add one such sub-recipe per camera the dataset records. -## Recipe anatomy +## Layer 2 — recipe anatomy -Recipes are YAML files backed by `TrainingRecipe` and `MessageTurn`. +Recipes are YAML files backed by `TrainingRecipe` and `MessageTurn`. They +declare which annotation rows to pull (via `bindings`) and how to compose them +into chat turns (`messages`). ```yaml messages: @@ -88,6 +117,13 @@ messages: - { role: assistant, content: "${subtask}", stream: low_level, target: true } ``` +A recipe can also branch into a weighted **blend** of sub-recipes. At sample +time, exactly one branch is selected deterministically from the sample index, +so different frames train different objectives (e.g. memory updates vs. +low-level execution vs. VQA) without any Python wiring. + +## Layer 3 — training format + Rendered samples use HF-style chat messages plus LeRobot sidecars: ```python @@ -96,12 +132,7 @@ sample["message_streams"] sample["target_message_indices"] ``` -The renderer does not apply a tokenizer chat template. Policy processors decide how to serialize the messages for their backbone. - -## Blends - -Blend recipes select one weighted sub-recipe deterministically from the sample index. -The canonical `recipes/pi05_hirobot.yaml` combines memory updates, interjection responses, high-level subtask prediction, low-level execution, and VQA. +The renderer does not apply a tokenizer chat template. Policy processors decide how to serialize the messages for their backbone, which keeps the same dataset usable across SmolVLA, Pi0.5, and any future VLM that expects OpenAI-style chat messages. ## Graceful absence diff --git a/docs/source/rename_map.mdx b/docs/source/rename_map.mdx index 6249faaca..16ee6344a 100644 --- a/docs/source/rename_map.mdx +++ b/docs/source/rename_map.mdx @@ -61,17 +61,6 @@ lerobot-eval \ --rename_map='{"observation.images.image": "observation.images.base_0_rgb", "observation.images.image2": "observation.images.left_wrist_0_rgb"}' ``` -### Recording - -`lerobot-record` also supports rename maps, nested under the dataset config: - -```bash -lerobot-record \ # When running inference - --policy.path="/smolVLA_finetuned" \ - ... \ - --dataset.rename_map='{"observation.images.glove2": "observation.images.image"}' -``` - ## Alternative: edit the policy config directly If you always use the same dataset or environment, you can **edit the policy's `config.json`** so its observation keys match your data source. Then no rename map is needed. @@ -105,10 +94,10 @@ XVLA-base has three visual inputs and `empty_cameras=0` by default. Your dataset ## Quick reference -| Goal | What to do | -| ----------------------------------------- | --------------------------------------------------------------------------- | -| Dataset keys ≠ policy keys | `--rename_map='{"dataset_key": "policy_key", ...}'` | -| Env keys ≠ policy keys (eval) | `--rename_map='{"env_key": "policy_key", ...}'` | -| Recording with different keys (inference) | `--dataset.rename_map='{"source_key": "policy_key", ...}'`. | -| Fewer cameras than policy expects | `--policy.empty_cameras=N` (supported by PI0, PI05, PI0Fast, SmolVLA, XVLA) | -| Avoid passing a rename map | Edit the policy's `config.json` so its keys match your data source | +| Goal | What to do | +| --------------------------------------- | --------------------------------------------------------------------------- | +| Dataset keys ≠ policy keys | `--rename_map='{"dataset_key": "policy_key", ...}'` | +| Env keys ≠ policy keys (eval) | `--rename_map='{"env_key": "policy_key", ...}'` | +| Rollout with different keys (inference) | `--rename_map='{"source_key": "policy_key", ...}'`. | +| Fewer cameras than policy expects | `--policy.empty_cameras=N` (supported by PI0, PI05, PI0Fast, SmolVLA, XVLA) | +| Avoid passing a rename map | Edit the policy's `config.json` so its keys match your data source | diff --git a/docs/source/rtc.mdx b/docs/source/rtc.mdx index 9485d8b66..eadc34344 100644 --- a/docs/source/rtc.mdx +++ b/docs/source/rtc.mdx @@ -34,7 +34,7 @@ pip install -e ".[smolvla]" ### Using RTC with Pi0 -You can find a complete reference implementation in [eval_with_real_robot.py](examples/rtc/eval_with_real_robot.py). +You can use `lerobot-rollout --strategy.type=base --inference.type=rtc` for RTC deployment on real robots. The snippet below provides a simplified pseudo-example of how RTC operates with Pi0 in your pipeline: ```python @@ -137,8 +137,12 @@ The script generates a visualization of the denoising process, comparing standar ## Testing RTC with a Real Robot ```bash -python examples/rtc/eval_with_real_robot.py \ +lerobot-rollout \ + --strategy.type=base \ --policy.path=${HF_USERNAME}/policy_repo_id \ + --inference.type=rtc \ + --inference.rtc.execution_horizon=10 \ + --inference.rtc.max_guidance_weight=10.0 \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58FA0834591 \ --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ @@ -178,7 +182,7 @@ visualizer = RTCDebugVisualizer() # ... create plots ``` -See `examples/rtc/eval_dataset.py` for a complete example of visualization. +See `examples/rtc/eval_dataset.py` for a complete example of offline RTC visualization. ## References diff --git a/docs/source/sarm.mdx b/docs/source/sarm.mdx index cd488fe1f..b5e63a07e 100644 --- a/docs/source/sarm.mdx +++ b/docs/source/sarm.mdx @@ -46,7 +46,7 @@ This ensures identical task states map to consistent progress values, even acros ## Inputs and Targets (What the new code expects) -SARM is trained through its processor (`src/lerobot/policies/sarm/processor_sarm.py`), which: +SARM is trained through its processor (`src/lerobot/rewards/sarm/processor_sarm.py`), which: - **Encodes** images and task text with CLIP (ViT-B/32) into `video_features` and `text_features` - **Pads/truncates** robot state into `state_features` (up to `max_state_dim`) @@ -347,7 +347,7 @@ Use `compute_rabc_weights.py` with `--visualize-only` to visualize model predict ```bash -python src/lerobot/policies/sarm/compute_rabc_weights.py \ +python -m lerobot.rewards.sarm.compute_rabc_weights \ --dataset-repo-id your-username/your-dataset \ --reward-model-path your-username/sarm-model \ --visualize-only \ @@ -360,7 +360,7 @@ python src/lerobot/policies/sarm/compute_rabc_weights.py \ ```bash -python src/lerobot/policies/sarm/compute_rabc_weights.py \ +python -m lerobot.rewards.sarm.compute_rabc_weights \ --dataset-repo-id your-username/your-dataset \ --reward-model-path your-username/sarm-model \ --visualize-only \ @@ -373,7 +373,7 @@ python src/lerobot/policies/sarm/compute_rabc_weights.py \ ```bash -python src/lerobot/policies/sarm/compute_rabc_weights.py \ +python -m lerobot.rewards.sarm.compute_rabc_weights \ --dataset-repo-id your-username/your-dataset \ --reward-model-path your-username/sarm-model \ --visualize-only \ @@ -429,7 +429,7 @@ The weighting follows **Equations 8-9** from the paper: First, run the SARM model on all frames in your dataset to compute progress values: ```bash -python src/lerobot/policies/sarm/compute_rabc_weights.py \ +python -m lerobot.rewards.sarm.compute_rabc_weights \ --dataset-repo-id your-username/your-dataset \ --reward-model-path your-username/sarm-model \ --head-mode sparse \ @@ -465,15 +465,15 @@ This script: ### Step 5b: Train Policy with RA-BC -Once you have the progress file, train your policy with RA-BC weighting. The progress file is auto-detected from the dataset path (`sarm_progress.parquet`). Currently PI0, PI0.5 and SmolVLA are supported with RA-BC: +Once you have the progress file, train your policy with RA-BC weighting. The progress file is auto-detected from the dataset path (`sarm_progress.parquet`) if not explicitly provided. Currently PI0, PI0.5 and SmolVLA are supported with RA-BC: ```bash lerobot-train \ --dataset.repo_id=your-username/your-dataset \ --policy.type=pi0 \ - --use_rabc=true \ - --rabc_head_mode=sparse \ - --rabc_kappa=0.01 \ + --sample_weighting.type=rabc \ + --sample_weighting.head_mode=sparse \ + --sample_weighting.kappa=0.01 \ --output_dir=outputs/train/policy_rabc \ --batch_size=32 \ --steps=40000 @@ -488,12 +488,13 @@ The training script automatically: **RA-BC Arguments:** -| Argument | Description | Default | -| ---------------------- | ---------------------------------------------------------- | ---------------------------------- | -| `--use_rabc` | Enable RA-BC sample weighting | `false` | -| `--rabc_progress_path` | Path to progress parquet file (auto-detected from dataset) | `sarm_progress.parquet` in dataset | -| `--rabc_head_mode` | Which SARM head's progress to use: `sparse` or `dense` | `sparse` | -| `--rabc_kappa` | Threshold κ for high-quality samples | `0.01` | +| Argument | Description | Default | +| ---------------------------------- | ------------------------------------------------------ | ----------------------- | +| `--sample_weighting.type` | Weighting strategy type (`rabc` or `uniform`) | `rabc` | +| `--sample_weighting.progress_path` | Path to progress parquet file | `sarm_progress.parquet` | +| `--sample_weighting.head_mode` | Which SARM head's progress to use: `sparse` or `dense` | `sparse` | +| `--sample_weighting.kappa` | Threshold κ for high-quality samples | `0.01` | +| `--sample_weighting.epsilon` | Small constant for numerical stability | `1e-6` | ### Tuning RA-BC Kappa @@ -511,30 +512,30 @@ The `kappa` parameter is the threshold that determines which samples get full we Monitor these WandB metrics during training: -| Metric | Healthy Range | Problem Indicator | -| ------------------ | ------------- | ------------------------- | -| `rabc_mean_weight` | 0.3 - 0.8 | ≈ 1.0 means kappa too low | -| `rabc_delta_mean` | > 0 | Should be positive | -| `rabc_delta_std` | > 0 | Variance in data quality | +| Metric | Healthy Range | Problem Indicator | +| ----------------------------- | ------------- | ------------------------- | +| `sample_weight_mean_weight` | 0.3 - 0.8 | ≈ 1.0 means kappa too low | +| `sample_weighting/delta_mean` | > 0 | Should be positive | +| `sample_weighting/delta_std` | > 0 | Variance in data quality | -**If `rabc_mean_weight ≈ 1.0`:** Your kappa is too low. Most samples have `delta > kappa` and bypass the soft-weighting entirely. RA-BC becomes equivalent to vanilla BC. +**If `sample_weight_mean_weight ≈ 1.0`:** Your kappa is too low. Most samples have `delta > kappa` and bypass the soft-weighting entirely. RA-BC becomes equivalent to vanilla BC. **Setting kappa based on your data:** -The default `kappa=0.01` was tuned for the paper's T-shirt folding task (~90s episodes at 30fps). For your dataset, check the logged `rabc_delta_mean` and `rabc_delta_std`: +The default `kappa=0.01` was tuned for the paper's T-shirt folding task (~90s episodes at 30fps). For your dataset, check the logged `sample_weighting/delta_mean` and `sample_weighting/delta_std`: ``` # If delta_mean ≈ 0.03 and delta_std ≈ 0.02: # Most deltas fall in range [0.01, 0.05] # Option 1: Set kappa = delta_mean (medium selectivity) ---rabc_kappa=0.03 +--sample_weighting.kappa=0.03 # Option 2: Set kappa = delta_mean + delta_std (high selectivity) ---rabc_kappa=0.05 +--sample_weighting.kappa=0.05 # Option 3: Set kappa = delta_mean + 2*delta_std (very selective) ---rabc_kappa=0.07 +--sample_weighting.kappa=0.07 ``` **When RA-BC may not help:** @@ -550,8 +551,8 @@ accelerate launch \ src/lerobot/scripts/lerobot_train.py \ --dataset.repo_id=your-username/your-dataset \ --policy.type=pi0 \ - --use_rabc=true \ - --rabc_kappa=0.01 \ + --sample_weighting.type=rabc \ + --sample_weighting.kappa=0.01 \ --output_dir=outputs/train/policy_rabc \ --batch_size=32 \ --steps=40000 @@ -576,7 +577,7 @@ accelerate launch \ ### RA-BC 1. **Train SARM first**: RA-BC quality depends entirely on SARM quality -2. **Monitor `rabc_mean_weight`**: If it's ≈ 1.0, increase kappa (see [Tuning RA-BC Kappa](#tuning-ra-bc-kappa)) +2. **Monitor `sample_weight_mean_weight`**: If it's ≈ 1.0, increase kappa (see [Tuning RA-BC Kappa](#tuning-ra-bc-kappa)) --- diff --git a/docs/source/tools.mdx b/docs/source/tools.mdx index 47d556a2e..7dc2500f1 100644 --- a/docs/source/tools.mdx +++ b/docs/source/tools.mdx @@ -6,9 +6,9 @@ runtime dispatches to a real implementation (TTS, controller, logger, …). This page covers: -1. Where the tool catalog lives (PR 1). -2. How the annotation pipeline produces tool-call atoms (PR 2). -3. How to add your own tool (PR 3). +1. Where the tool catalog lives. +2. How the annotation pipeline produces tool-call atoms. +3. How to add your own tool. ## Where tools are declared @@ -64,8 +64,8 @@ prompt_str = tokenizer.apply_chat_template( ``` **The implementations** — runnable Python — live under -`src/lerobot/tools/`, one file per tool. The `say` implementation -arrives in PR 3 and wraps Kyutai's pocket-tts model. +`src/lerobot/tools/`, one file per tool. The canonical `say` +implementation wraps Kyutai's pocket-tts model. ## Per-row tool *invocations* @@ -114,8 +114,7 @@ loop. Add an entry under `meta/info.json["tools"]`. Either edit the file directly on disk *before* running the annotation pipeline (it'll be -preserved) or hand it to `lerobot-annotate` via a config flag (PR 2 — -exact CLI lands with the pipeline change). +preserved) or hand it to `lerobot-annotate` via a config flag. ```json { @@ -167,12 +166,12 @@ class RecordObservationTool: ``` One file per tool keeps dependencies isolated — `record_observation` -might pull `pillow`, while `say` (PR 3) pulls `pocket-tts`. Users -installing only the tools they need avoid heavy transitive deps. +might pull `pillow`, while `say` pulls `pocket-tts`. Users installing +only the tools they need avoid heavy transitive deps. ### Step 3 — register it -Add to `src/lerobot/tools/registry.py` (PR 3): +Add to `src/lerobot/tools/registry.py`: ```python from .record_observation import RecordObservationTool @@ -184,14 +183,6 @@ That's it. At runtime `get_tools(meta)` looks up each schema in `meta.tools`, instantiates the matching registered class, and returns a name → instance dict the dispatcher can route into. -## Where this fits in the three-PR stack - -| Layer | PR | What lands | -|---|---|---| -| Catalog storage in `meta/info.json` + `meta.tools` accessor | PR 1 | This page; `SAY_TOOL_SCHEMA`, `DEFAULT_TOOLS` constants in `lerobot.datasets.language`; `LeRobotDatasetMetadata.tools` property | -| Annotation pipeline writes `tools` to meta after a run; honors anything users pre-populated | PR 2 | `lerobot-annotate` ensures `meta/info.json["tools"]` includes the canonical `say` and merges any user-declared tools | -| Runnable implementations under `src/lerobot/tools/`; runtime dispatcher; `say.py` wired to Kyutai's pocket-tts | PR 3 | One file per tool; `Tool` protocol; `TOOL_REGISTRY`; optional `[tools]` extra in `pyproject.toml` | - If you want to use a tool *without* writing an implementation (e.g. for training-time chat-template formatting only), step 1 alone is enough — the model still learns to *generate* the call. Steps 2 and 3 are only diff --git a/docs/source/unitree_g1.mdx b/docs/source/unitree_g1.mdx index 2e615085e..69965a561 100644 --- a/docs/source/unitree_g1.mdx +++ b/docs/source/unitree_g1.mdx @@ -274,7 +274,8 @@ python src/lerobot/scripts/lerobot_train.py \ Once trained, we recommend deploying policies using inference-time RTC: ```bash -python examples/rtc/eval_with_real_robot.py \ +lerobot-rollout \ + --strategy.type=base \ --policy.path=your-username/your-repo-id \ --policy.device=cuda \ --robot.type=unitree_g1 \ @@ -284,7 +285,7 @@ python examples/rtc/eval_with_real_robot.py \ --task="task_description" \ --duration=1000 \ --fps=30 \ - --rtc.enabled=true + --inference.type=rtc ``` --- diff --git a/docs/source/xvla.mdx b/docs/source/xvla.mdx index a5ae8f1df..be686512b 100644 --- a/docs/source/xvla.mdx +++ b/docs/source/xvla.mdx @@ -220,7 +220,7 @@ REAL_DIM = 12 # Postprocessing: Trim 20D predictions to 12D for deployment ``` -See the [action_hub.py](/home/jade_choghari/robot/lerobot/src/lerobot/policies/xvla/action_hub.py) implementation for details. +See the [action_hub.py](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/xvla/action_hub.py) implementation for details. #### Auto Action Mode (Recommended) @@ -519,9 +519,9 @@ If you use X-VLA in your research, please cite: - [X-VLA Paper](https://arxiv.org/pdf/2510.10274) - [LeRobot Documentation](https://github.com/huggingface/lerobot) -- [Action Registry Implementation](https://github.com/huggingface/lerobot/src/lerobot/policies/xvla/action_hub.py) -- [Processor Implementation](https://github.com/huggingface/lerobot/src/lerobot/policies/xvla/processor_xvla.py) -- [Model Configuration](https://github.com/huggingface/lerobot/src/lerobot/policies/xvla/configuration_xvla.py) +- [Action Registry Implementation](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/xvla/action_hub.py) +- [Processor Implementation](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/xvla/processor_xvla.py) +- [Model Configuration](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/xvla/configuration_xvla.py) ## Contributing diff --git a/examples/dataset/slurm_compute_rabc.py b/examples/dataset/slurm_compute_rabc.py index c71d8a0fc..6d7f75f6f 100644 --- a/examples/dataset/slurm_compute_rabc.py +++ b/examples/dataset/slurm_compute_rabc.py @@ -69,7 +69,7 @@ class ComputeProgressShards(PipelineStep): import torch from tqdm import tqdm - from lerobot.policies.sarm.compute_rabc_weights import ( + from lerobot.rewards.sarm.compute_rabc_weights import ( generate_all_frame_indices, interpolate_progress, load_sarm_resources, diff --git a/examples/hil/hil_data_collection.py b/examples/hil/hil_data_collection.py deleted file mode 100644 index 09a36dbe1..000000000 --- a/examples/hil/hil_data_collection.py +++ /dev/null @@ -1,1184 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2025 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Human-in-the-Loop (HIL) Data Collection with optional Real-Time Chunking (RTC). - -Implements the RaC paradigm (https://arxiv.org/abs/2509.07953) for LeRobot. By default uses synchronous -inference (best for fast models like ACT / Diffusion Policy). Set --rtc.enabled=true for -asynchronous background inference (recommended for large models like Pi0 / Pi0.5 / SmolVLA). - -The workflow: -1. Policy runs autonomously -2. Press SPACE to pause - robot holds position -3. Press 'c' to take control - human provides RECOVERY + CORRECTION -4. Press 'p' to hand control back to policy and continue recording -5. Press → to end episode (save and continue to next) -6. Reset, then do next rollout - -Keyboard Controls: - SPACE - Pause policy (robot holds position, no recording) - c - Take control (start correction, recording resumes) - p - Resume policy after pause/correction (recording continues) - → - End episode (save and continue to next) - ← - Re-record episode - ESC - Stop recording and push dataset to hub - -Usage: - # Standard synchronous inference (ACT, Diffusion Policy) - python examples/hil/hil_data_collection.py \ - --robot.type=bi_openarm_follower \ - --teleop.type=openarm_mini \ - --policy.path=path/to/pretrained_model \ - --dataset.repo_id=user/hil-dataset \ - --dataset.single_task="Fold the T-shirt properly" \ - --dataset.fps=30 \ - --interpolation_multiplier=2 - - # With RTC for large models (Pi0, Pi0.5, SmolVLA) - python examples/hil/hil_data_collection.py \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --rtc.max_guidance_weight=5.0 \ - --rtc.prefix_attention_schedule=LINEAR \ - --robot.type=bi_openarm_follower \ - --teleop.type=openarm_mini \ - --policy.path=path/to/pretrained_model \ - --dataset.repo_id=user/hil-dataset \ - --dataset.single_task="Fold the T-shirt properly" \ - --dataset.fps=30 \ - --interpolation_multiplier=3 - - # RTC with bi_openarm_follower + OpenArm Mini teleop and pi0.5 policy - python examples/hil/hil_data_collection.py \ - --policy.path=lerobot-data-collection/folding_final \ - --robot.type=bi_openarm_follower \ - --robot.cameras='{left_wrist: {type: opencv, index_or_path: "/dev/video4", width: 1280, height: 720, fps: 30}, base: {type: opencv, index_or_path: "/dev/video2", width: 640, height: 480, fps: 30}, right_wrist: {type: opencv, index_or_path: "/dev/video0", width: 1280, height: 720, fps: 30}}' \ - --robot.left_arm_config.port=can0 \ - --robot.left_arm_config.side=left \ - --robot.left_arm_config.can_interface=socketcan \ - --robot.left_arm_config.disable_torque_on_disconnect=true \ - --robot.left_arm_config.max_relative_target=8.0 \ - --robot.right_arm_config.port=can1 \ - --robot.right_arm_config.side=right \ - --robot.right_arm_config.can_interface=socketcan \ - --robot.right_arm_config.disable_torque_on_disconnect=true \ - --robot.right_arm_config.max_relative_target=8.0 \ - --teleop.type=openarm_mini \ - --teleop.port_left=/dev/ttyACM1 \ - --teleop.port_right=/dev/ttyACM0 \ - --dataset.repo_id=lerobot-data-collection/hil_folding \ - --dataset.single_task="Fold the T-shirt properly" \ - --dataset.fps=30 \ - --dataset.num_episodes=50 \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --rtc.max_guidance_weight=5.0 \ - --rtc.prefix_attention_schedule=LINEAR \ - --interpolation_multiplier=3 \ - --calibrate=true \ - --device=cuda -""" - -import logging -import math -import time -from dataclasses import dataclass, field -from pprint import pformat -from threading import Event, Lock, Thread -from typing import Any - -import torch -from hil_utils import ( - HILDatasetConfig, - init_keyboard_listener, - make_identity_processors, - print_controls, - reset_loop, - teleop_disable_torque, - teleop_smooth_move_to, -) - -from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401 -from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401 -from lerobot.common.control_utils import is_headless, predict_action -from lerobot.configs import PreTrainedConfig, parser -from lerobot.datasets import ( - LeRobotDataset, - VideoEncodingManager, - aggregate_pipeline_dataset_features, - create_initial_features, - safe_stop_image_writer, -) -from lerobot.policies import PreTrainedPolicy, get_policy_class, make_policy, make_pre_post_processors -from lerobot.policies.rtc import ActionInterpolator, ActionQueue, LatencyTracker, RTCConfig -from lerobot.policies.utils import make_robot_action -from lerobot.processor import ( - NormalizerProcessorStep, - PolicyProcessorPipeline, - RelativeActionsProcessorStep, - TransitionKey, - create_transition, - rename_stats, - to_relative_actions, -) -from lerobot.robots import Robot, RobotConfig, make_robot_from_config -from lerobot.robots.bi_openarm_follower import BiOpenArmFollowerConfig -from lerobot.robots.so_follower import SOFollowerRobotConfig # noqa: F401 -from lerobot.teleoperators import Teleoperator, TeleoperatorConfig, make_teleoperator_from_config -from lerobot.teleoperators.openarm_mini import OpenArmMiniConfig # noqa: F401 -from lerobot.teleoperators.so_leader import SOLeaderTeleopConfig # noqa: F401 -from lerobot.utils import get_safe_torch_device -from lerobot.utils.constants import ACTION, OBS_STATE, OBS_STR -from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts, hw_to_dataset_features -from lerobot.utils.robot_utils import precise_sleep -from lerobot.utils.utils import init_logging, log_say -from lerobot.utils.visualization_utils import init_rerun, log_rerun_data - -logger = logging.getLogger(__name__) - - -# RTC helpers - - -class ThreadSafeRobot: - """Thread-safe wrapper for robot operations (used with RTC background thread).""" - - def __init__(self, robot: Robot): - self._robot = robot - self._lock = Lock() - - def get_observation(self) -> dict[str, Any]: - with self._lock: - return self._robot.get_observation() - - def send_action(self, action: dict) -> None: - with self._lock: - self._robot.send_action(action) - - @property - def observation_features(self) -> dict: - return self._robot.observation_features - - @property - def action_features(self) -> dict: - return self._robot.action_features - - @property - def name(self) -> str: - return self._robot.name - - @property - def robot_type(self) -> str: - return self._robot.robot_type - - @property - def cameras(self): - return getattr(self._robot, "cameras", {}) - - -def _set_openarm_max_relative_target_if_missing( - robot_cfg: RobotConfig, max_relative_target: float = 8.0 -) -> None: - """Set a safe default max_relative_target for OpenArm followers when not provided.""" - if isinstance(robot_cfg, BiOpenArmFollowerConfig): - if robot_cfg.left_arm_config.max_relative_target is None: - robot_cfg.left_arm_config.max_relative_target = max_relative_target - if robot_cfg.right_arm_config.max_relative_target is None: - robot_cfg.right_arm_config.max_relative_target = max_relative_target - - -def _reanchor_relative_rtc_prefix( - prev_actions_absolute: torch.Tensor, - current_state: torch.Tensor, - relative_step: RelativeActionsProcessorStep | None, - normalizer_step: NormalizerProcessorStep | None, - policy_device: torch.device | str, -) -> torch.Tensor: - """Convert absolute leftovers into model space for relative-action RTC policies.""" - if relative_step is None: - return prev_actions_absolute.to(policy_device) - - state = current_state.detach().cpu() - if state.dim() == 1: - state = state.unsqueeze(0) - - action_cpu = prev_actions_absolute.detach().cpu() - mask = relative_step._build_mask(action_cpu.shape[-1]) - relative_actions = to_relative_actions(action_cpu, state, mask) - - transition = create_transition(action=relative_actions) - if normalizer_step is not None: - transition = normalizer_step(transition) - - return transition[TransitionKey.ACTION].to(policy_device) - - -def _normalize_prev_actions_length(prev_actions: torch.Tensor, target_steps: int) -> torch.Tensor: - """Pad/truncate RTC prefix actions to a fixed length for stable compiled inference.""" - if prev_actions.ndim != 2: - raise ValueError(f"Expected prev_actions to be 2D [T, A], got shape={tuple(prev_actions.shape)}") - - steps, action_dim = prev_actions.shape - if steps == target_steps: - return prev_actions - if steps > target_steps: - return prev_actions[:target_steps] - - padded = torch.zeros((target_steps, action_dim), dtype=prev_actions.dtype, device=prev_actions.device) - padded[:steps] = prev_actions - return padded - - -def _resolve_action_key_order(cfg, dataset_action_names: list[str]) -> list[str]: - """Choose action name ordering used to map policy tensor outputs to robot action dict.""" - policy_action_names = getattr(cfg.policy, "action_feature_names", None) - if not policy_action_names: - return dataset_action_names - - policy_action_names = list(policy_action_names) - if len(policy_action_names) != len(dataset_action_names): - logger.warning( - "[RTC] policy.action_feature_names length (%d) != dataset action dim (%d); " - "falling back to dataset order", - len(policy_action_names), - len(dataset_action_names), - ) - return dataset_action_names - - if set(dataset_action_names) != set(policy_action_names): - logger.warning( - "[RTC] policy.action_feature_names keys do not match dataset action keys; " - "falling back to dataset order" - ) - return dataset_action_names - - return policy_action_names - - -def _resolve_state_joint_order( - policy_action_names: list[str] | None, - available_joint_names: list[str], -) -> list[str]: - """Resolve joint-state ordering used to build observation.state.""" - if not policy_action_names: - return available_joint_names - - policy_action_names = list(policy_action_names) - available_set = set(available_joint_names) - policy_set = set(policy_action_names) - - if len(policy_action_names) != len(available_joint_names) or policy_set != available_set: - logger.warning( - "policy.action_feature_names does not match available state joints; " - "falling back to robot observation order" - ) - return available_joint_names - - logger.info("Using policy.action_feature_names order for observation.state mapping") - return policy_action_names - - -def _start_pedal_listener(events: dict): - """Start foot pedal listener thread if evdev is available. - - Pedal input is restricted to HIL control handoff only: - policy -> pause -> takeover -> resume policy. - Episode save/advance remains keyboard-only (right arrow). - """ - import threading - - try: - from evdev import InputDevice, categorize, ecodes - except ImportError: - logging.warning("[Pedal] evdev not installed - pedal support disabled") - return - - pedal_device = "/dev/input/by-id/usb-PCsensor_FootSwitch-event-kbd" - key_left = "KEY_A" - key_right = "KEY_C" - - def pedal_reader(): - try: - dev = InputDevice(pedal_device) - logger.info(f"[Pedal] Connected: {dev.name}") - - for ev in dev.read_loop(): - if ev.type != ecodes.EV_KEY: - continue - - key = categorize(ev) - code = key.keycode - if isinstance(code, (list, tuple)): - code = code[0] - - if key.keystate != 1: - continue - - if events["in_reset"]: - if code in [key_left, key_right]: - events["start_next_episode"] = True - else: - if code not in [key_left, key_right]: - continue - - if events["correction_active"]: - events["resume_policy"] = True - elif events["policy_paused"]: - events["start_next_episode"] = True - else: - events["policy_paused"] = True - - except FileNotFoundError: - logging.info(f"[Pedal] Device not found: {pedal_device}") - except PermissionError: - logging.warning(f"[Pedal] Permission denied for {pedal_device}") - except Exception as e: - logging.warning(f"[Pedal] Error: {e}") - - thread = threading.Thread(target=pedal_reader, daemon=True) - thread.start() - - -def _rtc_inference_thread( - policy: PreTrainedPolicy, - obs_holder: dict, - obs_lock: Lock, - hw_features: dict, - preprocessor: PolicyProcessorPipeline, - postprocessor: PolicyProcessorPipeline, - queue_holder: dict, - shutdown_event: Event, - policy_active: Event, - compile_warmup_done: Event, - cfg, -): - """Background thread for RTC action chunk generation.""" - latency_tracker = LatencyTracker() - time_per_chunk = 1.0 / cfg.dataset.fps - threshold = 30 - policy_device = policy.config.device - stats_window_start = time.perf_counter() - policy_inference_count = 0 - latency_sum_s = 0.0 - inference_count = 0 - warmup_required = max(1, int(cfg.compile_warmup_inferences)) if cfg.use_torch_compile else 0 - - relative_step = next( - ( - step - for step in preprocessor.steps - if isinstance(step, RelativeActionsProcessorStep) and step.enabled - ), - None, - ) - normalizer_step = next( - (step for step in preprocessor.steps if isinstance(step, NormalizerProcessorStep)), - None, - ) - if relative_step is not None: - if relative_step.action_names is None: - cfg_action_names = getattr(cfg.policy, "action_feature_names", None) - if cfg_action_names: - relative_step.action_names = list(cfg_action_names) - else: - fallback_action_names = obs_holder.get("action_feature_names") - if fallback_action_names: - relative_step.action_names = list(fallback_action_names) - logger.info("[RTC] Relative actions enabled: re-anchoring RTC prefix to current state") - - while not shutdown_event.is_set(): - if not policy_active.is_set(): - time.sleep(0.01) - continue - - queue = queue_holder.get("queue") - with obs_lock: - obs = obs_holder.get("obs") - if queue is None or obs is None: - time.sleep(0.01) - continue - - if queue.qsize() <= threshold: - try: - current_time = time.perf_counter() - idx_before = queue.get_action_index() - prev_actions = queue.get_left_over() - - latency = latency_tracker.max() - delay = math.ceil(latency / time_per_chunk) if latency else 0 - - obs_batch = build_dataset_frame(hw_features, obs, prefix="observation") - for name in obs_batch: - obs_batch[name] = torch.from_numpy(obs_batch[name]) - if "image" in name: - obs_batch[name] = obs_batch[name].float() / 255 - obs_batch[name] = obs_batch[name].permute(2, 0, 1).contiguous() - obs_batch[name] = obs_batch[name].unsqueeze(0).to(policy_device) - - obs_batch["task"] = [cfg.dataset.single_task] - obs_batch["robot_type"] = obs_holder.get("robot_type", "unknown") - - preprocessed = preprocessor(obs_batch) - - if prev_actions is not None and relative_step is not None and OBS_STATE in obs_batch: - prev_actions_absolute = queue.get_processed_left_over() - if prev_actions_absolute is not None and prev_actions_absolute.numel() > 0: - prev_actions = _reanchor_relative_rtc_prefix( - prev_actions_absolute=prev_actions_absolute, - current_state=obs_batch[OBS_STATE], - relative_step=relative_step, - normalizer_step=normalizer_step, - policy_device=policy_device, - ) - - if prev_actions is not None: - prev_actions = _normalize_prev_actions_length( - prev_actions, target_steps=cfg.rtc.execution_horizon - ) - - actions = policy.predict_action_chunk( - preprocessed, inference_delay=delay, prev_chunk_left_over=prev_actions - ) - - original = actions.squeeze(0).clone() - processed = postprocessor(actions).squeeze(0) - new_latency = time.perf_counter() - current_time - new_delay = math.ceil(new_latency / time_per_chunk) - inference_count += 1 - is_warmup_inference = cfg.use_torch_compile and inference_count <= warmup_required - if is_warmup_inference: - latency_tracker.reset() - else: - latency_tracker.add(new_latency) - queue.merge(original, processed, new_delay, idx_before) - policy_inference_count += 1 - latency_sum_s += new_latency - if ( - is_warmup_inference - and inference_count >= warmup_required - and not compile_warmup_done.is_set() - ): - compile_warmup_done.set() - logger.info( - "[RTC] Compile warmup complete (%d/%d inferences)", - inference_count, - warmup_required, - ) - logger.debug("[RTC] Inference latency=%.2fs, queue=%d", new_latency, queue.qsize()) - except Exception as e: - logger.error("[RTC] Error: %s", e) - time.sleep(0.5) - else: - time.sleep(0.01) - - now = time.perf_counter() - if cfg.log_hz and (window_elapsed := now - stats_window_start) >= cfg.hz_log_interval_s: - policy_hz = policy_inference_count / window_elapsed - avg_latency_ms = ( - (latency_sum_s / policy_inference_count * 1000.0) if policy_inference_count else 0.0 - ) - logger.info( - "[HIL RTC rates] policy=%.1f Hz | avg_inference=%.1f ms | queue=%d", - policy_hz, - avg_latency_ms, - queue.qsize(), - ) - stats_window_start = now - policy_inference_count = 0 - latency_sum_s = 0.0 - - -# Config - - -@dataclass -class HILConfig: - robot: RobotConfig - teleop: TeleoperatorConfig - dataset: HILDatasetConfig - policy: PreTrainedConfig | None = None - rtc: RTCConfig = field(default_factory=RTCConfig) - interpolation_multiplier: int = 2 - record_interpolated_actions: bool = False - display_data: bool = True - play_sounds: bool = True - resume: bool = False - device: str = "cuda" - use_torch_compile: bool = False - compile_warmup_inferences: int = 2 - calibrate: bool = False - log_hz: bool = True - hz_log_interval_s: float = 2.0 - - def __post_init__(self): - policy_path = parser.get_path_arg("policy") - if policy_path: - cli_overrides = parser.get_cli_overrides("policy") - self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.pretrained_path = policy_path - if self.policy is None: - raise ValueError("policy.path is required") - - @classmethod - def __get_path_fields__(cls) -> list[str]: - return ["policy"] - - -# Rollout loops - - -@safe_stop_image_writer -def _rollout_sync( - robot: Robot, - teleop: Teleoperator, - policy: PreTrainedPolicy, - preprocessor: PolicyProcessorPipeline, - postprocessor: PolicyProcessorPipeline, - dataset: LeRobotDataset, - events: dict, - cfg: HILConfig, -): - """Rollout loop with standard synchronous inference.""" - fps = cfg.dataset.fps - device = get_safe_torch_device(cfg.device) - stream_online = bool(cfg.dataset.streaming_encoding) - record_stride = 1 if cfg.record_interpolated_actions else max(1, cfg.interpolation_multiplier) - - policy.reset() - preprocessor.reset() - postprocessor.reset() - - frame_buffer: list[dict] = [] - teleop_disable_torque(teleop) - - was_paused = False - waiting_for_takeover = False - last_action: dict[str, Any] | None = None - robot_action: dict[str, Any] = {} - action_keys = list(dataset.features[ACTION]["names"]) - obs_state_names = list(dataset.features[f"{OBS_STR}.state"]["names"]) - obs_image_names = [ - key.removeprefix(f"{OBS_STR}.images.") - for key in dataset.features - if key.startswith(f"{OBS_STR}.images.") - ] - - interpolator = ActionInterpolator(multiplier=cfg.interpolation_multiplier) - control_interval = interpolator.get_control_interval(fps) - - timestamp = 0.0 - record_tick = 0 - start_t = time.perf_counter() - stats_window_start = start_t - policy_inference_count = 0 - robot_command_count = 0 - - while timestamp < cfg.dataset.episode_time_s: - loop_start = time.perf_counter() - - if events["exit_early"]: - events["exit_early"] = False - events["policy_paused"] = False - events["correction_active"] = False - events["resume_policy"] = False - break - - if events["resume_policy"] and ( - events["policy_paused"] or events["correction_active"] or waiting_for_takeover - ): - events["resume_policy"] = False - events["start_next_episode"] = False - events["policy_paused"] = False - events["correction_active"] = False - waiting_for_takeover = False - was_paused = False - last_action = None - interpolator.reset() - policy.reset() - preprocessor.reset() - postprocessor.reset() - - if events["policy_paused"] and not was_paused: - obs = robot.get_observation() - robot_pos = { - k: v for k, v in obs.items() if k.endswith(".pos") and k in robot.observation_features - } - teleop_smooth_move_to(teleop, robot_pos, duration_s=2.0, fps=50) - events["start_next_episode"] = False - waiting_for_takeover = True - was_paused = True - interpolator.reset() - - if waiting_for_takeover and events["start_next_episode"]: - teleop_disable_torque(teleop) - events["start_next_episode"] = False - events["correction_active"] = True - waiting_for_takeover = False - - obs = robot.get_observation() - obs_filtered = {k: obs[k] for k in obs_state_names if k in obs} - obs_filtered.update({k: obs[k] for k in obs_image_names if k in obs}) - obs_frame = build_dataset_frame(dataset.features, obs_filtered, prefix=OBS_STR) - - if events["correction_active"]: - robot_action = teleop.get_action() - robot.send_action(robot_action) - robot_command_count += 1 - action_frame = build_dataset_frame(dataset.features, robot_action, prefix=ACTION) - if record_tick % record_stride == 0: - frame = {**obs_frame, **action_frame, "task": cfg.dataset.single_task} - if stream_online: - dataset.add_frame(frame) - else: - frame_buffer.append(frame) - record_tick += 1 - - elif waiting_for_takeover or events["policy_paused"]: - if last_action: - robot.send_action(last_action) - robot_command_count += 1 - - else: - if interpolator.needs_new_action(): - action_values = predict_action( - observation=obs_frame, - policy=policy, - device=device, - preprocessor=preprocessor, - postprocessor=postprocessor, - use_amp=policy.config.use_amp, - task=cfg.dataset.single_task, - robot_type=robot.robot_type, - ) - policy_inference_count += 1 - robot_action = make_robot_action(action_values, dataset.features) - action_tensor = torch.tensor([robot_action[k] for k in action_keys]) - interpolator.add(action_tensor) - - interp_action = interpolator.get() - if interp_action is not None: - robot_action = {k: interp_action[i].item() for i, k in enumerate(action_keys)} - robot.send_action(robot_action) - robot_command_count += 1 - last_action = robot_action - action_frame = build_dataset_frame(dataset.features, robot_action, prefix=ACTION) - if record_tick % record_stride == 0: - frame = {**obs_frame, **action_frame, "task": cfg.dataset.single_task} - if stream_online: - dataset.add_frame(frame) - else: - frame_buffer.append(frame) - record_tick += 1 - - if cfg.display_data and robot_action: - log_rerun_data(observation=obs_filtered, action=robot_action) - - dt = time.perf_counter() - loop_start - if (sleep_time := control_interval - dt) > 0: - precise_sleep(sleep_time) - now = time.perf_counter() - timestamp = now - start_t - - if cfg.log_hz and (window_elapsed := now - stats_window_start) >= cfg.hz_log_interval_s: - policy_hz = policy_inference_count / window_elapsed - robot_hz = robot_command_count / window_elapsed - logger.info( - "[HIL rates] policy=%.1f Hz (target=%.1f) | robot=%.1f Hz (target=%.1f)", - policy_hz, - fps, - robot_hz, - fps * cfg.interpolation_multiplier, - ) - stats_window_start = now - policy_inference_count = 0 - robot_command_count = 0 - - teleop_disable_torque(teleop) - - if not stream_online: - for frame in frame_buffer: - dataset.add_frame(frame) - - -@safe_stop_image_writer -def _rollout_rtc( - robot, - teleop: Teleoperator, - policy: PreTrainedPolicy, - preprocessor: PolicyProcessorPipeline, - postprocessor: PolicyProcessorPipeline, - dataset: LeRobotDataset, - events: dict, - cfg: HILConfig, - queue_holder: dict, - obs_holder: dict, - obs_lock: Lock, - policy_active: Event, - compile_warmup_done: Event, - hw_features: dict, -): - """Rollout loop with RTC for asynchronous inference.""" - fps = cfg.dataset.fps - stream_online = bool(cfg.dataset.streaming_encoding) - record_stride = 1 if cfg.record_interpolated_actions else max(1, cfg.interpolation_multiplier) - - policy.reset() - preprocessor.reset() - postprocessor.reset() - - frame_buffer: list[dict] = [] - teleop_disable_torque(teleop) - - was_paused = False - waiting_for_takeover = False - last_action: dict[str, Any] | None = None - dataset_action_keys = list(dataset.features[ACTION]["names"]) - action_keys = _resolve_action_key_order(cfg, dataset_action_keys) - if action_keys != dataset_action_keys: - logger.info("[RTC] Using policy.action_feature_names order for action tensor mapping") - else: - logger.info("[RTC] Using dataset action feature order for action tensor mapping") - obs_state_names = list(dataset.features[f"{OBS_STR}.state"]["names"]) - obs_image_names = [ - key.removeprefix(f"{OBS_STR}.images.") - for key in dataset.features - if key.startswith(f"{OBS_STR}.images.") - ] - - interpolator = ActionInterpolator(multiplier=cfg.interpolation_multiplier) - control_interval = interpolator.get_control_interval(fps) - - robot_action: dict[str, Any] = {} - timestamp = 0.0 - start_t = time.perf_counter() - stats_window_start = start_t - robot_command_count = 0 - record_tick = 0 - obs_poll_interval = 1.0 / fps - last_obs_poll_t = 0.0 - obs_filtered: dict[str, Any] = {} - obs_frame: dict[str, Any] = {} - warmup_wait_logged = False - warmup_queue_flushed = False - - while timestamp < cfg.dataset.episode_time_s: - loop_start = time.perf_counter() - - if events["exit_early"]: - events["exit_early"] = False - events["policy_paused"] = False - events["correction_active"] = False - events["resume_policy"] = False - break - - if events["resume_policy"] and ( - events["policy_paused"] or events["correction_active"] or waiting_for_takeover - ): - events["resume_policy"] = False - events["start_next_episode"] = False - events["policy_paused"] = False - events["correction_active"] = False - waiting_for_takeover = False - was_paused = False - last_action = None - interpolator.reset() - queue_holder["queue"] = ActionQueue(cfg.rtc) - policy_active.clear() - policy.reset() - preprocessor.reset() - postprocessor.reset() - - if events["policy_paused"] and not was_paused: - policy_active.clear() - obs = robot.get_observation() - robot_pos = { - k: v for k, v in obs.items() if k.endswith(".pos") and k in robot.observation_features - } - teleop_smooth_move_to(teleop, robot_pos, duration_s=2.0, fps=50) - events["start_next_episode"] = False - waiting_for_takeover = True - was_paused = True - interpolator.reset() - - if waiting_for_takeover and events["start_next_episode"]: - teleop_disable_torque(teleop) - events["start_next_episode"] = False - events["correction_active"] = True - waiting_for_takeover = False - queue_holder["queue"] = ActionQueue(cfg.rtc) - - now_for_obs = time.perf_counter() - should_poll_obs = ( - not obs_filtered - or (now_for_obs - last_obs_poll_t) >= obs_poll_interval - or events["correction_active"] - or waiting_for_takeover - or events["policy_paused"] - ) - if should_poll_obs: - obs = robot.get_observation() - obs_filtered = {k: obs[k] for k in obs_state_names if k in obs} - obs_filtered.update({k: obs[k] for k in obs_image_names if k in obs}) - obs_frame = build_dataset_frame(dataset.features, obs_filtered, prefix=OBS_STR) - with obs_lock: - obs_holder["obs"] = obs_filtered - last_obs_poll_t = now_for_obs - - if events["correction_active"]: - robot_action = teleop.get_action() - robot.send_action(robot_action) - robot_command_count += 1 - action_frame = build_dataset_frame(dataset.features, robot_action, prefix=ACTION) - if record_tick % record_stride == 0: - frame = {**obs_frame, **action_frame, "task": cfg.dataset.single_task} - if stream_online: - dataset.add_frame(frame) - else: - frame_buffer.append(frame) - record_tick += 1 - - elif waiting_for_takeover or events["policy_paused"]: - if last_action: - robot.send_action(last_action) - robot_command_count += 1 - - else: - if not policy_active.is_set(): - policy_active.set() - - if cfg.use_torch_compile and not compile_warmup_done.is_set(): - if not warmup_wait_logged: - logger.info( - "[RTC] Waiting for compile warmup (%d inferences) before policy rollout", - max(1, int(cfg.compile_warmup_inferences)), - ) - warmup_wait_logged = True - else: - if cfg.use_torch_compile and not warmup_queue_flushed: - queue_holder["queue"] = ActionQueue(cfg.rtc) - interpolator.reset() - warmup_queue_flushed = True - logger.info("[RTC] Warmup queue cleared; starting live policy rollout") - - queue = queue_holder["queue"] - - if interpolator.needs_new_action(): - new_action = queue.get() if queue else None - if new_action is not None: - interpolator.add(new_action.cpu()) - - action_tensor = interpolator.get() - if action_tensor is not None: - robot_action = { - k: action_tensor[i].item() - for i, k in enumerate(action_keys) - if i < len(action_tensor) - } - robot.send_action(robot_action) - robot_command_count += 1 - last_action = robot_action - action_frame = build_dataset_frame(dataset.features, robot_action, prefix=ACTION) - if record_tick % record_stride == 0: - frame = {**obs_frame, **action_frame, "task": cfg.dataset.single_task} - if stream_online: - dataset.add_frame(frame) - else: - frame_buffer.append(frame) - record_tick += 1 - - dt = time.perf_counter() - loop_start - if (sleep_time := control_interval - dt) > 0: - precise_sleep(sleep_time) - now = time.perf_counter() - timestamp = now - start_t - - if cfg.log_hz and (window_elapsed := now - stats_window_start) >= cfg.hz_log_interval_s: - robot_hz = robot_command_count / window_elapsed - logger.info( - "[HIL RTC rates] robot=%.1f Hz (target=%.1f)", - robot_hz, - fps * cfg.interpolation_multiplier, - ) - stats_window_start = now - robot_command_count = 0 - - policy_active.clear() - teleop_disable_torque(teleop) - - if not stream_online: - for frame in frame_buffer: - dataset.add_frame(frame) - - -# Main collection function - - -@parser.wrap() -def hil_collect(cfg: HILConfig) -> LeRobotDataset: - """Main HIL data collection function (supports both sync and RTC modes).""" - init_logging() - logger.info(pformat(cfg.__dict__)) - - use_rtc = cfg.rtc.enabled - - if use_rtc: - _set_openarm_max_relative_target_if_missing(cfg.robot, max_relative_target=8.0) - - if cfg.display_data: - init_rerun(session_name="hil_collection") - - robot_raw = make_robot_from_config(cfg.robot) - teleop = make_teleoperator_from_config(cfg.teleop) - - teleop_proc, obs_proc = make_identity_processors() - - action_features_hw = {k: v for k, v in robot_raw.action_features.items() if k.endswith(".pos")} - all_observation_features = robot_raw.observation_features - available_joint_names = [ - key for key, value in all_observation_features.items() if key.endswith(".pos") and value is float - ] - ordered_joint_names = _resolve_state_joint_order( - getattr(cfg.policy, "action_feature_names", None), - available_joint_names, - ) - observation_features_hw = { - joint_name: all_observation_features[joint_name] for joint_name in ordered_joint_names - } - for key, value in all_observation_features.items(): - if isinstance(value, tuple): - observation_features_hw[key] = value - - dataset_features = combine_feature_dicts( - aggregate_pipeline_dataset_features( - pipeline=teleop_proc, - initial_features=create_initial_features(action=action_features_hw), - use_videos=cfg.dataset.video, - ), - aggregate_pipeline_dataset_features( - pipeline=obs_proc, - initial_features=create_initial_features(observation=observation_features_hw), - use_videos=cfg.dataset.video, - ), - ) - - dataset = None - listener = None - shutdown_event = Event() - policy_active = Event() - compile_warmup_done = Event() - if not cfg.use_torch_compile: - compile_warmup_done.set() - rtc_thread = None - - try: - if cfg.resume: - dataset = LeRobotDataset( - cfg.dataset.repo_id, - root=cfg.dataset.root, - batch_encoding_size=cfg.dataset.video_encoding_batch_size, - vcodec=cfg.dataset.vcodec, - streaming_encoding=cfg.dataset.streaming_encoding, - encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, - encoder_threads=cfg.dataset.encoder_threads, - ) - if hasattr(robot_raw, "cameras") and robot_raw.cameras: - dataset.start_image_writer( - num_processes=cfg.dataset.num_image_writer_processes, - num_threads=cfg.dataset.num_image_writer_threads_per_camera * len(robot_raw.cameras), - ) - else: - dataset = LeRobotDataset.create( - cfg.dataset.repo_id, - cfg.dataset.fps, - root=cfg.dataset.root, - robot_type=robot_raw.name, - features=dataset_features, - use_videos=cfg.dataset.video, - image_writer_processes=cfg.dataset.num_image_writer_processes, - image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera - * len(robot_raw.cameras if hasattr(robot_raw, "cameras") else []), - batch_encoding_size=cfg.dataset.video_encoding_batch_size, - vcodec=cfg.dataset.vcodec, - streaming_encoding=cfg.dataset.streaming_encoding, - encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, - encoder_threads=cfg.dataset.encoder_threads, - ) - - # Load policy — RTC needs manual loading for predict_action_chunk support - if use_rtc: - policy_class = get_policy_class(cfg.policy.type) - policy_config = PreTrainedConfig.from_pretrained(cfg.policy.pretrained_path) - if hasattr(policy_config, "compile_model"): - policy_config.compile_model = cfg.use_torch_compile - policy = policy_class.from_pretrained(cfg.policy.pretrained_path, config=policy_config) - policy.config.rtc_config = cfg.rtc - if hasattr(policy, "init_rtc_processor"): - policy.init_rtc_processor() - policy = policy.to(cfg.device) - policy.eval() - else: - policy = make_policy(cfg.policy, ds_meta=dataset.meta) - - preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=cfg.policy, - pretrained_path=cfg.policy.pretrained_path, - dataset_stats=rename_stats(dataset.meta.stats, cfg.dataset.rename_map), - preprocessor_overrides={ - "device_processor": {"device": cfg.device}, - "rename_observations_processor": {"rename_map": cfg.dataset.rename_map}, - }, - ) - - # Connect hardware - if use_rtc: - logger.info("Connecting robot (calibrate=%s)", cfg.calibrate) - robot_raw.connect(calibrate=False) - if cfg.calibrate and hasattr(robot_raw, "calibrate"): - robot_raw.calibrate() - robot_raw.disconnect() - robot_raw.connect(calibrate=False) - else: - robot_raw.connect() - - robot = ThreadSafeRobot(robot_raw) if use_rtc else robot_raw - teleop.connect() - listener, events = init_keyboard_listener() - - # RTC-specific setup - queue_holder = None - obs_holder = None - obs_lock = Lock() - hw_features = None - if use_rtc: - _start_pedal_listener(events) - queue_holder = {"queue": ActionQueue(cfg.rtc)} - obs_holder = { - "obs": None, - "robot_type": robot.robot_type, - "action_feature_names": [key for key in robot.action_features if key.endswith(".pos")], - } - hw_features = hw_to_dataset_features(observation_features_hw, "observation") - - rtc_thread = Thread( - target=_rtc_inference_thread, - args=( - policy, - obs_holder, - obs_lock, - hw_features, - preprocessor, - postprocessor, - queue_holder, - shutdown_event, - policy_active, - compile_warmup_done, - cfg, - ), - daemon=True, - ) - rtc_thread.start() - - print_controls(rtc=use_rtc) - logger.info(f" Policy: {cfg.policy.pretrained_path}") - logger.info(f" Task: {cfg.dataset.single_task}") - logger.info(f" Interpolation: {cfg.interpolation_multiplier}x") - if use_rtc: - logger.info(f" RTC: enabled (execution_horizon={cfg.rtc.execution_horizon})") - - with VideoEncodingManager(dataset): - recorded = 0 - while recorded < cfg.dataset.num_episodes and not events["stop_recording"]: - log_say(f"Episode {dataset.num_episodes}", cfg.play_sounds) - - if use_rtc: - queue_holder["queue"] = ActionQueue(cfg.rtc) - _rollout_rtc( - robot=robot, - teleop=teleop, - policy=policy, - preprocessor=preprocessor, - postprocessor=postprocessor, - dataset=dataset, - events=events, - cfg=cfg, - queue_holder=queue_holder, - obs_holder=obs_holder, - obs_lock=obs_lock, - policy_active=policy_active, - compile_warmup_done=compile_warmup_done, - hw_features=hw_features, - ) - else: - _rollout_sync( - robot=robot, - teleop=teleop, - policy=policy, - preprocessor=preprocessor, - postprocessor=postprocessor, - dataset=dataset, - events=events, - cfg=cfg, - ) - - if events["rerecord_episode"]: - log_say("Re-recording", cfg.play_sounds) - events["rerecord_episode"] = False - events["exit_early"] = False - dataset.clear_episode_buffer() - continue - - dataset.save_episode() - recorded += 1 - - if recorded < cfg.dataset.num_episodes and not events["stop_recording"]: - reset_loop(robot, teleop, events, cfg.dataset.fps) - - finally: - log_say("Stop recording", cfg.play_sounds, blocking=True) - - shutdown_event.set() - policy_active.clear() - - if rtc_thread and rtc_thread.is_alive(): - rtc_thread.join(timeout=2.0) - - if dataset: - dataset.finalize() - - if robot_raw.is_connected: - robot_raw.disconnect() - if teleop.is_connected: - teleop.disconnect() - - if not is_headless() and listener: - listener.stop() - - if cfg.dataset.push_to_hub and dataset is not None: - dataset.push_to_hub(tags=cfg.dataset.tags, private=cfg.dataset.private) - - return dataset - - -def main(): - from lerobot.utils.import_utils import register_third_party_plugins - - register_third_party_plugins() - hil_collect() - - -if __name__ == "__main__": - main() diff --git a/examples/hil/hil_utils.py b/examples/hil/hil_utils.py deleted file mode 100644 index 0f433d83a..000000000 --- a/examples/hil/hil_utils.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2025 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Shared utilities for Human-in-the-Loop data collection scripts.""" - -import logging -import time -from dataclasses import dataclass, field -from pathlib import Path - -from lerobot.common.control_utils import is_headless -from lerobot.processor import ( - IdentityProcessorStep, - RobotAction, - RobotObservation, - RobotProcessorPipeline, - observation_to_transition, - robot_action_observation_to_transition, - transition_to_observation, - transition_to_robot_action, -) -from lerobot.robots import Robot -from lerobot.teleoperators import Teleoperator -from lerobot.utils.robot_utils import precise_sleep - -logger = logging.getLogger(__name__) - - -@dataclass -class HILDatasetConfig: - repo_id: str - single_task: str - root: str | Path | None = None - fps: int = 30 - episode_time_s: float = 120 - num_episodes: int = 50 - video: bool = True - push_to_hub: bool = True - private: bool = False - tags: list[str] | None = None - num_image_writer_processes: int = 0 - num_image_writer_threads_per_camera: int = 4 - video_encoding_batch_size: int = 1 - vcodec: str = "auto" - streaming_encoding: bool = True - encoder_queue_maxsize: int = 30 - encoder_threads: int | None = None - rename_map: dict[str, str] = field(default_factory=dict) - - -def teleop_has_motor_control(teleop: Teleoperator) -> bool: - """Check if teleoperator has motor control capabilities.""" - return all(hasattr(teleop, attr) for attr in ("enable_torque", "disable_torque", "write_goal_positions")) - - -def teleop_disable_torque(teleop: Teleoperator) -> None: - """Disable teleop torque if supported.""" - if hasattr(teleop, "disable_torque"): - teleop.disable_torque() - - -def teleop_enable_torque(teleop: Teleoperator) -> None: - """Enable teleop torque if supported.""" - if hasattr(teleop, "enable_torque"): - teleop.enable_torque() - - -def teleop_smooth_move_to(teleop: Teleoperator, target_pos: dict, duration_s: float = 2.0, fps: int = 50): - """Smoothly move teleop to target position if motor control is available.""" - if not teleop_has_motor_control(teleop): - logger.warning("Teleop does not support motor control - cannot mirror robot position") - return - - teleop_enable_torque(teleop) - current = teleop.get_action() - steps = max(int(duration_s * fps), 1) - - for step in range(steps + 1): - t = step / steps - interp = {} - for k in current: - if k in target_pos: - interp[k] = current[k] * (1 - t) + target_pos[k] * t - else: - interp[k] = current[k] - teleop.write_goal_positions(interp) - time.sleep(1 / fps) - - -def init_keyboard_listener(): - """Initialize keyboard listener with HIL controls.""" - events = { - "exit_early": False, - "rerecord_episode": False, - "stop_recording": False, - "policy_paused": False, - "correction_active": False, - "resume_policy": False, - "in_reset": False, - "start_next_episode": False, - } - - if is_headless(): - logger.warning("Headless environment - keyboard controls unavailable") - return None, events - - from pynput import keyboard - - def on_press(key): - try: - if events["in_reset"]: - if key in [keyboard.Key.space, keyboard.Key.right]: - logger.info("[HIL] Starting next episode...") - events["start_next_episode"] = True - elif hasattr(key, "char") and key.char == "c": - events["start_next_episode"] = True - elif key == keyboard.Key.esc: - logger.info("[HIL] ESC - Stop recording, pushing to hub...") - events["stop_recording"] = True - events["start_next_episode"] = True - else: - if key == keyboard.Key.space: - if not events["policy_paused"] and not events["correction_active"]: - logger.info("[HIL] PAUSED - Press 'c' to take control or 'p' to resume policy") - events["policy_paused"] = True - elif hasattr(key, "char") and key.char == "c": - if events["policy_paused"] and not events["correction_active"]: - logger.info("[HIL] Taking control...") - events["start_next_episode"] = True - elif hasattr(key, "char") and key.char == "p": - if events["policy_paused"] or events["correction_active"]: - logger.info("[HIL] Resuming policy...") - events["resume_policy"] = True - elif key == keyboard.Key.right: - logger.info("[HIL] End episode") - events["exit_early"] = True - elif key == keyboard.Key.left: - logger.info("[HIL] Re-record episode") - events["rerecord_episode"] = True - events["exit_early"] = True - elif key == keyboard.Key.esc: - logger.info("[HIL] ESC - Stop recording...") - events["stop_recording"] = True - events["exit_early"] = True - except Exception as e: - logger.info(f"Key error: {e}") - - listener = keyboard.Listener(on_press=on_press) - listener.start() - return listener, events - - -def make_identity_processors(): - """Create identity processors for recording.""" - teleop_proc = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( - steps=[IdentityProcessorStep()], - to_transition=robot_action_observation_to_transition, - to_output=transition_to_robot_action, - ) - obs_proc = RobotProcessorPipeline[RobotObservation, RobotObservation]( - steps=[IdentityProcessorStep()], - to_transition=observation_to_transition, - to_output=transition_to_observation, - ) - return teleop_proc, obs_proc - - -def reset_loop(robot: Robot, teleop: Teleoperator, events: dict, fps: int): - """Reset period where human repositions environment.""" - logger.info("[HIL] RESET") - - events["in_reset"] = True - events["start_next_episode"] = False - - obs = robot.get_observation() - robot_pos = {k: v for k, v in obs.items() if k.endswith(".pos") and k in robot.observation_features} - teleop_smooth_move_to(teleop, robot_pos, duration_s=2.0, fps=50) - - logger.info("Press any key to enable teleoperation") - while not events["start_next_episode"] and not events["stop_recording"]: - precise_sleep(0.05) - - if events["stop_recording"]: - return - - events["start_next_episode"] = False - teleop_disable_torque(teleop) - logger.info("Teleop enabled - press any key to start episode") - - while not events["start_next_episode"] and not events["stop_recording"]: - loop_start = time.perf_counter() - action = teleop.get_action() - robot.send_action(action) - precise_sleep(1 / fps - (time.perf_counter() - loop_start)) - - events["in_reset"] = False - events["start_next_episode"] = False - events["exit_early"] = False - events["policy_paused"] = False - events["correction_active"] = False - events["resume_policy"] = False - - -def print_controls(rtc: bool = False): - """Print control instructions.""" - mode = "Human-in-the-Loop Data Collection" + (" (RTC)" if rtc else "") - logger.info( - "%s\n Controls:\n" - " SPACE - Pause policy\n" - " c - Take control\n" - " p - Resume policy after pause/correction\n" - " → - End episode\n" - " ESC - Stop and push to hub", - mode, - ) diff --git a/examples/lekiwi/evaluate.py b/examples/lekiwi/evaluate.py index d8c53829e..3ddcd1f14 100644 --- a/examples/lekiwi/evaluate.py +++ b/examples/lekiwi/evaluate.py @@ -14,17 +14,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lerobot.common.control_utils import init_keyboard_listener +import logging +import time + +from lerobot.common.control_utils import init_keyboard_listener, predict_action from lerobot.datasets import LeRobotDataset from lerobot.policies import make_pre_post_processors from lerobot.policies.act import ACTPolicy +from lerobot.policies.utils import make_robot_action from lerobot.processor import make_default_processors from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig -from lerobot.scripts.lerobot_record import record_loop from lerobot.utils.constants import ACTION, OBS_STR -from lerobot.utils.feature_utils import hw_to_dataset_features +from lerobot.utils.feature_utils import build_dataset_frame, hw_to_dataset_features +from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.utils import log_say -from lerobot.utils.visualization_utils import init_rerun +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data NUM_EPISODES = 2 FPS = 30 @@ -35,6 +39,9 @@ HF_DATASET_ID = "/" def main(): + # NOTE: For production policy deployment, use `lerobot-rollout` CLI instead. + # This script provides a self-contained example for educational purposes. + # Create the robot configuration & robot robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") @@ -83,43 +90,67 @@ def main(): raise ValueError("Robot is not connected!") print("Starting evaluate loop...") + control_interval = 1 / FPS recorded_episodes = 0 while recorded_episodes < NUM_EPISODES and not events["stop_recording"]: log_say(f"Running inference, recording eval episode {recorded_episodes} of {NUM_EPISODES}") - # Main record loop - record_loop( - robot=robot, - events=events, - fps=FPS, - policy=policy, - preprocessor=preprocessor, # Pass the pre and post policy processors - postprocessor=postprocessor, - dataset=dataset, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=teleop_action_processor, - robot_action_processor=robot_action_processor, - robot_observation_processor=robot_observation_processor, - ) + # Inline evaluation loop: predict actions and send to robot + timestamp = 0 + start_episode_t = time.perf_counter() + while timestamp < EPISODE_TIME_SEC: + start_loop_t = time.perf_counter() + + if events["exit_early"]: + events["exit_early"] = False + break + + # Get robot observation + obs = robot.get_observation() + obs_processed = robot_observation_processor(obs) + observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR) + + # Predict action using the policy + action_tensor = predict_action( + observation=observation_frame, + policy=policy, + device=policy.config.device, + preprocessor=preprocessor, + postprocessor=postprocessor, + use_amp=policy.config.device.type == "cuda", + task=TASK_DESCRIPTION, + robot_type=robot.name, + ) + + # Convert policy output to robot action dict + action_values = make_robot_action(action_tensor, dataset.features) + + # Process and send action to robot + robot_action_to_send = robot_action_processor((action_values, obs)) + robot.send_action(robot_action_to_send) + + # Write to dataset + action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION) + frame = {**observation_frame, **action_frame, "task": TASK_DESCRIPTION} + dataset.add_frame(frame) + + log_rerun_data(observation=obs_processed, action=action_values) + + dt_s = time.perf_counter() - start_loop_t + sleep_time_s = control_interval - dt_s + if sleep_time_s < 0: + logging.warning( + f"Evaluate loop is running slower ({1 / dt_s:.1f} Hz) than the target FPS ({FPS} Hz)." + ) + precise_sleep(max(sleep_time_s, 0.0)) + timestamp = time.perf_counter() - start_episode_t # Reset the environment if not stopping or re-recording if not events["stop_recording"] and ( (recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"] ): log_say("Reset the environment") - record_loop( - robot=robot, - events=events, - fps=FPS, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=teleop_action_processor, - robot_action_processor=robot_action_processor, - robot_observation_processor=robot_observation_processor, - ) + log_say("Waiting for environment reset, press right arrow key when ready...") if events["rerecord_episode"]: log_say("Re-record episode") diff --git a/examples/lekiwi/record.py b/examples/lekiwi/record.py index de5df7756..2c581f5ff 100644 --- a/examples/lekiwi/record.py +++ b/examples/lekiwi/record.py @@ -45,9 +45,6 @@ def main(): leader_arm = SO100Leader(leader_arm_config) keyboard = KeyboardTeleop(keyboard_config) - # TODO(Steven): Update this example to use pipelines - teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors() - # Configure the dataset features action_features = hw_to_dataset_features(robot.action_features, ACTION) obs_features = hw_to_dataset_features(robot.observation_features, OBS_STR) @@ -77,6 +74,10 @@ def main(): if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected: raise ValueError("Robot or teleop is not connected!") + teleop_action_processor, robot_action_processor, robot_observation_processor = ( + make_default_processors() + ) + print("Starting record loop...") recorded_episodes = 0 while recorded_episodes < NUM_EPISODES and not events["stop_recording"]: @@ -87,14 +88,14 @@ def main(): robot=robot, events=events, fps=FPS, + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, dataset=dataset, teleop=[leader_arm, keyboard], control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=teleop_action_processor, - robot_action_processor=robot_action_processor, - robot_observation_processor=robot_observation_processor, ) # Reset the environment if not stopping or re-recording @@ -106,13 +107,13 @@ def main(): robot=robot, events=events, fps=FPS, + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, teleop=[leader_arm, keyboard], control_time_s=RESET_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=teleop_action_processor, - robot_action_processor=robot_action_processor, - robot_observation_processor=robot_observation_processor, ) if events["rerecord_episode"]: diff --git a/examples/lekiwi/rollout.py b/examples/lekiwi/rollout.py new file mode 100644 index 000000000..4fb103c8c --- /dev/null +++ b/examples/lekiwi/rollout.py @@ -0,0 +1,77 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a trained policy on LeKiwi without recording (base rollout). + +Uses the rollout engine's :class:`BaseStrategy` (autonomous execution, +no dataset) with :class:`SyncInferenceConfig` (inline policy call per +control tick). For a CLI entry point with the same capabilities plus +recording, upload, and human-in-the-loop variants, see ``lerobot-rollout``. +""" + +from lerobot.configs import PreTrainedConfig +from lerobot.robots.lekiwi import LeKiwiClientConfig +from lerobot.rollout import BaseStrategyConfig, RolloutConfig, build_rollout_context +from lerobot.rollout.inference import SyncInferenceConfig +from lerobot.rollout.strategies import BaseStrategy +from lerobot.utils.process import ProcessSignalHandler +from lerobot.utils.utils import init_logging + +FPS = 30 +DURATION_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" + + +def main(): + init_logging() + + # Robot: LeKiwi client — make sure lekiwi_host is already running on the robot. + robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") + + # Policy: load the pretrained config. ``pretrained_path`` is read downstream + # by ``build_rollout_context`` to reload the full model. + policy_config = PreTrainedConfig.from_pretrained(HF_MODEL_ID) + policy_config.pretrained_path = HF_MODEL_ID + + # Assemble the rollout config: base strategy (no recording) + sync inference. + cfg = RolloutConfig( + robot=robot_config, + policy=policy_config, + strategy=BaseStrategyConfig(), + inference=SyncInferenceConfig(), + fps=FPS, + duration=DURATION_SEC, + task=TASK_DESCRIPTION, + ) + + # Graceful Ctrl-C: the strategy loop exits when shutdown_event is set. + signal_handler = ProcessSignalHandler(use_threads=True) + + # Build the context (connects robot, loads policy, wires the inference strategy). + # No custom processors here — LeKiwi runs on raw joint features. + ctx = build_rollout_context(cfg, signal_handler.shutdown_event) + + strategy = BaseStrategy(cfg.strategy) + try: + strategy.setup(ctx) + strategy.run(ctx) + finally: + strategy.teardown(ctx) + + +if __name__ == "__main__": + main() diff --git a/examples/phone_to_so100/evaluate.py b/examples/phone_to_so100/evaluate.py index 267e67c48..e859123d0 100644 --- a/examples/phone_to_so100/evaluate.py +++ b/examples/phone_to_so100/evaluate.py @@ -14,13 +14,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +import time + from lerobot.cameras.opencv import OpenCVCameraConfig -from lerobot.common.control_utils import init_keyboard_listener +from lerobot.common.control_utils import init_keyboard_listener, predict_action from lerobot.configs import FeatureType, PolicyFeature from lerobot.datasets import LeRobotDataset, aggregate_pipeline_dataset_features, create_initial_features from lerobot.model.kinematics import RobotKinematics from lerobot.policies import make_pre_post_processors from lerobot.policies.act import ACTPolicy +from lerobot.policies.utils import make_robot_action from lerobot.processor import ( RobotProcessorPipeline, make_default_teleop_action_processor, @@ -34,11 +38,12 @@ from lerobot.robots.so_follower.robot_kinematic_processor import ( ForwardKinematicsJointsToEE, InverseKinematicsEEToJoints, ) -from lerobot.scripts.lerobot_record import record_loop from lerobot.types import RobotAction, RobotObservation -from lerobot.utils.feature_utils import combine_feature_dicts +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts +from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.utils import log_say -from lerobot.utils.visualization_utils import init_rerun +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data NUM_EPISODES = 5 FPS = 30 @@ -49,6 +54,9 @@ HF_DATASET_ID = "/" def main(): + # NOTE: For production policy deployment, use `lerobot-rollout` CLI instead. + # This script provides a self-contained example for educational purposes. + # Create the robot configuration & robot camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} robot_config = SO100FollowerConfig( @@ -143,43 +151,67 @@ def main(): raise ValueError("Robot is not connected!") print("Starting evaluate loop...") + control_interval = 1 / FPS episode_idx = 0 for episode_idx in range(NUM_EPISODES): log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") - # Main record loop - record_loop( - robot=robot, - events=events, - fps=FPS, - policy=policy, - preprocessor=preprocessor, # Pass the pre and post policy processors - postprocessor=postprocessor, - dataset=dataset, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=make_default_teleop_action_processor(), - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose_processor, - ) + # Inline evaluation loop: predict actions and send to robot + timestamp = 0 + start_episode_t = time.perf_counter() + while timestamp < EPISODE_TIME_SEC: + start_loop_t = time.perf_counter() + + if events["exit_early"]: + events["exit_early"] = False + break + + # Get robot observation + obs = robot.get_observation() + obs_processed = robot_joints_to_ee_pose_processor(obs) + observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR) + + # Predict action using the policy + action_tensor = predict_action( + observation=observation_frame, + policy=policy, + device=policy.config.device, + preprocessor=preprocessor, + postprocessor=postprocessor, + use_amp=policy.config.device.type == "cuda", + task=TASK_DESCRIPTION, + robot_type=robot.name, + ) + + # Convert policy output to robot action dict + action_values = make_robot_action(action_tensor, dataset.features) + + # Process and send action to robot (EE -> joints via IK) + robot_action_to_send = robot_ee_to_joints_processor((action_values, obs)) + robot.send_action(robot_action_to_send) + + # Write to dataset + action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION) + frame = {**observation_frame, **action_frame, "task": TASK_DESCRIPTION} + dataset.add_frame(frame) + + log_rerun_data(observation=obs_processed, action=action_values) + + dt_s = time.perf_counter() - start_loop_t + sleep_time_s = control_interval - dt_s + if sleep_time_s < 0: + logging.warning( + f"Evaluate loop is running slower ({1 / dt_s:.1f} Hz) than the target FPS ({FPS} Hz)." + ) + precise_sleep(max(sleep_time_s, 0.0)) + timestamp = time.perf_counter() - start_episode_t # Reset the environment if not stopping or re-recording if not events["stop_recording"] and ( (episode_idx < NUM_EPISODES - 1) or events["rerecord_episode"] ): log_say("Reset the environment") - record_loop( - robot=robot, - events=events, - fps=FPS, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=make_default_teleop_action_processor(), - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose_processor, - ) + log_say("Waiting for environment reset, press right arrow key when ready...") if events["rerecord_episode"]: log_say("Re-record episode") @@ -190,7 +222,6 @@ def main(): # Save episode dataset.save_episode() - episode_idx += 1 finally: # Clean up log_say("Stop recording") diff --git a/examples/phone_to_so100/record.py b/examples/phone_to_so100/record.py index 6a8d38ec3..87b8e49fd 100644 --- a/examples/phone_to_so100/record.py +++ b/examples/phone_to_so100/record.py @@ -65,14 +65,15 @@ def main(): robot = SO100Follower(robot_config) phone = Phone(teleop_config) - # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf + # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: + # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf kinematics_solver = RobotKinematics( urdf_path="./SO101/so101_new_calib.urdf", target_frame_name="gripper_frame_link", joint_names=list(robot.bus.motors.keys()), ) - # Build pipeline to convert phone action to EE action + # Build pipeline to convert phone action to EE action (with gripper velocity mapped to joint). phone_to_robot_ee_pose_processor = RobotProcessorPipeline[ tuple[RobotAction, RobotObservation], RobotAction ]( @@ -94,7 +95,7 @@ def main(): to_output=transition_to_robot_action, ) - # Build pipeline to convert EE action to joints action + # Build pipeline to convert EE action to joints action (IK). robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( steps=[ InverseKinematicsEEToJoints( @@ -107,7 +108,7 @@ def main(): to_output=transition_to_robot_action, ) - # Build pipeline to convert joint observation to EE observation + # Build pipeline to convert joint observation to EE observation (FK). robot_joints_to_ee_pose = RobotProcessorPipeline[RobotObservation, RobotObservation]( steps=[ ForwardKinematicsJointsToEE( @@ -118,13 +119,12 @@ def main(): to_output=transition_to_observation, ) - # Create the dataset + # Create the dataset, deriving features from the pipelines so the on-disk schema + # matches exactly what the pipelines produce at runtime. dataset = LeRobotDataset.create( repo_id=HF_REPO_ID, fps=FPS, features=combine_feature_dicts( - # Run the feature contract of the pipelines - # This tells you how the features would look like after the pipeline steps aggregate_pipeline_dataset_features( pipeline=phone_to_robot_ee_pose_processor, initial_features=create_initial_features(action=phone.action_features), @@ -163,14 +163,14 @@ def main(): robot=robot, events=events, fps=FPS, + teleop_action_processor=phone_to_robot_ee_pose_processor, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose, teleop=phone, dataset=dataset, control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=phone_to_robot_ee_pose_processor, - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose, ) # Reset the environment if not stopping or re-recording @@ -182,13 +182,13 @@ def main(): robot=robot, events=events, fps=FPS, + teleop_action_processor=phone_to_robot_ee_pose_processor, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose, teleop=phone, control_time_s=RESET_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=phone_to_robot_ee_pose_processor, - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose, ) if events["rerecord_episode"]: diff --git a/examples/phone_to_so100/rollout.py b/examples/phone_to_so100/rollout.py new file mode 100644 index 000000000..ca6706c52 --- /dev/null +++ b/examples/phone_to_so100/rollout.py @@ -0,0 +1,126 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a trained EE-space policy on SO100 (phone-trained) without recording. + +Mirrors ``examples/so100_to_so100_EE/rollout.py`` — the model was trained +with phone teleoperation in EE space, so at deployment we only need the +joint↔EE conversion on the robot side; the phone is not used. + +Uses :class:`BaseStrategy` (no recording) + :class:`SyncInferenceConfig` +(inline policy call). For recording during rollout, switch to Sentry, +Highlight, or DAgger via ``lerobot-rollout --strategy.type=...``. +""" + +from lerobot.cameras.opencv import OpenCVCameraConfig +from lerobot.configs import PreTrainedConfig +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import ( + RobotProcessorPipeline, + observation_to_transition, + robot_action_observation_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig +from lerobot.robots.so_follower.robot_kinematic_processor import ( + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.rollout import BaseStrategyConfig, RolloutConfig, build_rollout_context +from lerobot.rollout.inference import SyncInferenceConfig +from lerobot.rollout.strategies import BaseStrategy +from lerobot.types import RobotAction, RobotObservation +from lerobot.utils.process import ProcessSignalHandler +from lerobot.utils.utils import init_logging + +FPS = 30 +DURATION_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" + + +def main(): + init_logging() + + camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} + robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem58760434471", + id="my_awesome_follower_arm", + cameras=camera_config, + use_degrees=True, + ) + + # Peek at motor names once to build the kinematic solver. + temp_robot = SO100Follower(robot_config) + motor_names = list(temp_robot.bus.motors.keys()) + + kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=motor_names, + ) + + robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=motor_names)], + to_transition=observation_to_transition, + to_output=transition_to_observation, + ) + + robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=motor_names, + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, + ) + + policy_config = PreTrainedConfig.from_pretrained(HF_MODEL_ID) + policy_config.pretrained_path = HF_MODEL_ID + + cfg = RolloutConfig( + robot=robot_config, + policy=policy_config, + strategy=BaseStrategyConfig(), + inference=SyncInferenceConfig(), + fps=FPS, + duration=DURATION_SEC, + task=TASK_DESCRIPTION, + ) + + signal_handler = ProcessSignalHandler(use_threads=True) + + ctx = build_rollout_context( + cfg, + signal_handler.shutdown_event, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + + strategy = BaseStrategy(cfg.strategy) + try: + strategy.setup(ctx) + strategy.run(ctx) + finally: + strategy.teardown(ctx) + + +if __name__ == "__main__": + main() diff --git a/examples/rtc/eval_with_real_robot.py b/examples/rtc/eval_with_real_robot.py deleted file mode 100644 index 66562749c..000000000 --- a/examples/rtc/eval_with_real_robot.py +++ /dev/null @@ -1,673 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2025 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Demo script showing how to use Real-Time Chunking (RTC) with action chunking policies on real robots. - -This script demonstrates: -1. Creating a robot and policy (SmolVLA, Pi0, etc.) with RTC -2. Consuming actions from the policy while the robot executes -3. Periodically requesting new action chunks in the background using threads -4. Managing action buffers and timing for real-time operation - -For simulation environments, see eval_with_simulation.py - -Usage: - # Run RTC with Real robot with RTC - uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=/smolvla_check_rtc_last3 \ - --policy.device=mps \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --robot.type=so100_follower \ - --robot.port=/dev/tty.usbmodem58FA0834591 \ - --robot.id=so100_follower \ - --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ - --task="Move green small object into the purple platform" \ - --duration=120 - - # Run RTC with Real robot without RTC - uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=/smolvla_check_rtc_last3 \ - --policy.device=mps \ - --rtc.enabled=false \ - --robot.type=so100_follower \ - --robot.port=/dev/tty.usbmodem58FA0834591 \ - --robot.id=so100_follower \ - --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ - --task="Move green small object into the purple platform" \ - --duration=120 - - # Run RTC with Real robot with pi0.5 policy - uv run examples/rtc/eval_with_real_robot.py \ - --policy.path=/pi05_check_rtc \ - --policy.device=mps \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --robot.type=so100_follower \ - --robot.port=/dev/tty.usbmodem58FA0834591 \ - --robot.id=so100_follower \ - --robot.cameras="{ gripper: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}}" \ - --task="Move green small object into the purple platform" \ - --duration=120 - - # Run RTC with bi_openarm_follower (dual-arm OpenArms) and pi0.5 policy - python examples/rtc/eval_with_real_robot.py \ - --policy.path=lerobot-data-collection/folding_final \ - --robot.type=bi_openarm_follower \ - --robot.cameras='{left_wrist: {type: opencv, index_or_path: "/dev/video4", width: 1280, height: 720, fps: 30}, base: {type: opencv, index_or_path: "/dev/video2", width: 640, height: 480, fps: 30}, right_wrist: {type: opencv, index_or_path: "/dev/video0", width: 1280, height: 720, fps: 30}}' \ - --robot.left_arm_config.port=can0 \ - --robot.left_arm_config.side=left \ - --robot.left_arm_config.can_interface=socketcan \ - --robot.left_arm_config.disable_torque_on_disconnect=true \ - --robot.left_arm_config.max_relative_target=8.0 \ - --robot.right_arm_config.port=can1 \ - --robot.right_arm_config.side=right \ - --robot.right_arm_config.can_interface=socketcan \ - --robot.right_arm_config.disable_torque_on_disconnect=true \ - --robot.right_arm_config.max_relative_target=8.0 \ - --task="Fold the T-shirt properly" \ - --fps=30 \ - --duration=2000 \ - --interpolation_multiplier=3 \ - --rtc.enabled=true \ - --rtc.execution_horizon=20 \ - --rtc.max_guidance_weight=5.0 \ - --rtc.prefix_attention_schedule=LINEAR \ - --device=cuda -""" - -import logging -import math -import sys -import time -import traceback -from dataclasses import dataclass, field -from threading import Event, Lock, Thread - -import torch -from torch import Tensor - -from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401 -from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401 -from lerobot.cameras.zmq import ZMQCameraConfig # noqa: F401 -from lerobot.configs import PreTrainedConfig, RTCAttentionSchedule, parser -from lerobot.policies import get_policy_class, make_pre_post_processors -from lerobot.policies.rtc import ActionInterpolator, ActionQueue, LatencyTracker, RTCConfig -from lerobot.processor import ( - NormalizerProcessorStep, - RelativeActionsProcessorStep, - TransitionKey, - create_transition, - make_default_robot_action_processor, - make_default_robot_observation_processor, - to_relative_actions, -) -from lerobot.rl.process import ProcessSignalHandler -from lerobot.robots import ( # noqa: F401 - Robot, - RobotConfig, - bi_openarm_follower, - bi_so_follower, - koch_follower, - so_follower, - unitree_g1, -) -from lerobot.robots.utils import make_robot_from_config -from lerobot.utils.constants import OBS_IMAGES, OBS_STATE -from lerobot.utils.feature_utils import build_dataset_frame, hw_to_dataset_features -from lerobot.utils.hub import HubMixin -from lerobot.utils.utils import init_logging - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -class RobotWrapper: - def __init__(self, robot: Robot): - self.robot = robot - self.lock = Lock() - - def get_observation(self) -> dict[str, Tensor]: - with self.lock: - return self.robot.get_observation() - - def send_action(self, action: Tensor): - with self.lock: - self.robot.send_action(action) - - def observation_features(self) -> list[str]: - with self.lock: - return self.robot.observation_features - - def action_features(self) -> list[str]: - with self.lock: - return self.robot.action_features - - -@dataclass -class RTCDemoConfig(HubMixin): - """Configuration for RTC demo with action chunking policies and real robots.""" - - # Policy configuration - policy: PreTrainedConfig | None = None - - # Robot configuration - robot: RobotConfig | None = None - - # RTC configuration - rtc: RTCConfig = field( - default_factory=lambda: RTCConfig( - execution_horizon=10, - max_guidance_weight=1.0, - prefix_attention_schedule=RTCAttentionSchedule.EXP, - ) - ) - - # Demo parameters - duration: float = 30.0 # Duration to run the demo (seconds) - fps: float = 10.0 # Action execution frequency (Hz) - interpolation_multiplier: int = 1 # Control rate multiplier (1=off, 2=2x, 3=3x) - - # Compute device - device: str | None = None # Device to run on (cuda, cpu, auto) - - # Get new actions horizon. The amount of executed steps after which will be requested new actions. - # It should be higher than inference delay + execution horizon. - action_queue_size_to_get_new_actions: int = 30 - - # Task to execute - task: str = field(default="", metadata={"help": "Task to execute"}) - - # Torch compile configuration - use_torch_compile: bool = field( - default=False, - metadata={"help": "Use torch.compile for faster inference (PyTorch 2.0+)"}, - ) - - torch_compile_backend: str = field( - default="inductor", - metadata={"help": "Backend for torch.compile (inductor, aot_eager, cudagraphs)"}, - ) - - torch_compile_mode: str = field( - default="default", - metadata={"help": "Compilation mode (default, reduce-overhead, max-autotune)"}, - ) - - torch_compile_disable_cudagraphs: bool = field( - default=True, - metadata={ - "help": "Disable CUDA graphs in torch.compile. Required due to in-place tensor " - "operations in denoising loop (x_t += dt * v_t) which cause tensor aliasing issues." - }, - ) - - def __post_init__(self): - # HACK: We parse again the cli args here to get the pretrained path if there was one. - policy_path = parser.get_path_arg("policy") - if policy_path: - cli_overrides = parser.get_cli_overrides("policy") - self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.pretrained_path = policy_path - else: - raise ValueError("Policy path is required") - - # Validate that robot configuration is provided - if self.robot is None: - raise ValueError("Robot configuration must be provided") - - @classmethod - def __get_path_fields__(cls) -> list[str]: - """This enables the parser to load config from the policy using `--policy.path=local/dir`""" - return ["policy"] - - -def is_image_key(k: str) -> bool: - return k.startswith(OBS_IMAGES) - - -def _reanchor_relative_rtc_prefix( - prev_actions_absolute: Tensor, - current_state: Tensor, - relative_step: RelativeActionsProcessorStep, - normalizer_step: NormalizerProcessorStep | None, - policy_device: torch.device | str, -) -> Tensor: - """Convert absolute leftovers into model-space for relative-action RTC policies. - - When a policy uses relative actions, the RTC prefix (leftover actions from - the previous chunk) is stored in absolute space. Before feeding it back to - the policy we need to re-express it relative to the *current* robot state - and then re-normalize. - """ - state = current_state.detach().cpu() - if state.dim() == 1: - state = state.unsqueeze(0) - - action_cpu = prev_actions_absolute.detach().cpu() - mask = relative_step._build_mask(action_cpu.shape[-1]) - relative_actions = to_relative_actions(action_cpu, state, mask) - - transition = create_transition(action=relative_actions) - if normalizer_step is not None: - transition = normalizer_step(transition) - - return transition[TransitionKey.ACTION].to(policy_device) - - -def get_actions( - policy, - robot: RobotWrapper, - robot_observation_processor, - action_queue: ActionQueue, - shutdown_event: Event, - cfg: RTCDemoConfig, -): - """Thread function to request action chunks from the policy. - - Args: - policy: The policy instance (SmolVLA, Pi0, etc.) - robot: The robot instance for getting observations - robot_observation_processor: Processor for raw robot observations - action_queue: Queue to put new action chunks - shutdown_event: Event to signal shutdown - cfg: Demo configuration - """ - try: - logger.info("[GET_ACTIONS] Starting get actions thread") - - latency_tracker = LatencyTracker() # Track latency of action chunks - fps = cfg.fps - time_per_chunk = 1.0 / fps - - # Only keep .pos joints + camera streams if the policy was trained on positions, - # not the full pos/vel/torque state the robot exposes. - observation_features_hw = { - key: value - for key, value in robot.observation_features().items() - if key.endswith(".pos") or isinstance(value, tuple) - } - - dataset_features = hw_to_dataset_features(observation_features_hw, "observation") - policy_device = policy.config.device - - # Load preprocessor and postprocessor from pretrained files - # The stats are embedded in the processor .safetensors files - logger.info(f"[GET_ACTIONS] Loading preprocessor/postprocessor from {cfg.policy.pretrained_path}") - - preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=cfg.policy, - pretrained_path=cfg.policy.pretrained_path, - dataset_stats=None, # Will load from pretrained processor files - preprocessor_overrides={ - "device_processor": {"device": cfg.policy.device}, - }, - ) - - logger.info("[GET_ACTIONS] Preprocessor/postprocessor loaded successfully with embedded stats") - - relative_step = next( - (s for s in preprocessor.steps if isinstance(s, RelativeActionsProcessorStep) and s.enabled), - None, - ) - normalizer_step = next( - (s for s in preprocessor.steps if isinstance(s, NormalizerProcessorStep)), - None, - ) - if relative_step is not None: - if relative_step.action_names is None: - cfg_names = getattr(cfg.policy, "action_feature_names", None) - if cfg_names: - relative_step.action_names = list(cfg_names) - else: - relative_step.action_names = [ - k for k in robot.robot.action_features if k.endswith(".pos") - ] - logger.info("[GET_ACTIONS] Relative actions enabled: will re-anchor RTC prefix") - - get_actions_threshold = cfg.action_queue_size_to_get_new_actions - - if not cfg.rtc.enabled: - get_actions_threshold = 0 - - while not shutdown_event.is_set(): - if action_queue.qsize() <= get_actions_threshold: - current_time = time.perf_counter() - action_index_before_inference = action_queue.get_action_index() - prev_actions = action_queue.get_left_over() - - inference_latency = latency_tracker.max() - inference_delay = math.ceil(inference_latency / time_per_chunk) - - obs = robot.get_observation() - - # Apply robot observation processor - obs_processed = robot_observation_processor(obs) - - obs_with_policy_features = build_dataset_frame( - dataset_features, obs_processed, prefix="observation" - ) - - for name in obs_with_policy_features: - obs_with_policy_features[name] = torch.from_numpy(obs_with_policy_features[name]) - if "image" in name: - obs_with_policy_features[name] = ( - obs_with_policy_features[name].type(torch.float32) / 255 - ) - obs_with_policy_features[name] = ( - obs_with_policy_features[name].permute(2, 0, 1).contiguous() - ) - obs_with_policy_features[name] = obs_with_policy_features[name].unsqueeze(0) - obs_with_policy_features[name] = obs_with_policy_features[name].to(policy_device) - - obs_with_policy_features["task"] = [cfg.task] # Task should be a list, not a string! - obs_with_policy_features["robot_type"] = ( - robot.robot.name if hasattr(robot.robot, "name") else "" - ) - - preproceseded_obs = preprocessor(obs_with_policy_features) - - # Re-anchor leftover actions for relative-action policies. - # We need the *postprocessed* (absolute) leftover, not the original - # (normalized/relative) one that get_left_over() returns. - if ( - prev_actions is not None - and relative_step is not None - and OBS_STATE in obs_with_policy_features - ): - with action_queue.lock: - if action_queue.queue is not None: - prev_actions_abs = action_queue.queue[action_queue.last_index :].clone() - else: - prev_actions_abs = None - if prev_actions_abs is not None and prev_actions_abs.numel() > 0: - prev_actions = _reanchor_relative_rtc_prefix( - prev_actions_absolute=prev_actions_abs, - current_state=obs_with_policy_features[OBS_STATE], - relative_step=relative_step, - normalizer_step=normalizer_step, - policy_device=policy_device, - ) - - # Generate actions WITH RTC - actions = policy.predict_action_chunk( - preproceseded_obs, - inference_delay=inference_delay, - prev_chunk_left_over=prev_actions, - ) - - # Store original actions (before postprocessing) for RTC - original_actions = actions.squeeze(0).clone() - - postprocessed_actions = postprocessor(actions) - - postprocessed_actions = postprocessed_actions.squeeze(0) - - new_latency = time.perf_counter() - current_time - new_delay = math.ceil(new_latency / time_per_chunk) - latency_tracker.add(new_latency) - - if cfg.action_queue_size_to_get_new_actions < cfg.rtc.execution_horizon + new_delay: - logger.warning( - "[GET_ACTIONS] cfg.action_queue_size_to_get_new_actions Too small, It should be higher than inference delay + execution horizon." - ) - - action_queue.merge( - original_actions, postprocessed_actions, new_delay, action_index_before_inference - ) - else: - # Small sleep to prevent busy waiting - time.sleep(0.1) - - logger.info("[GET_ACTIONS] get actions thread shutting down") - except Exception as e: - logger.error(f"[GET_ACTIONS] Fatal exception in get_actions thread: {e}") - logger.error(traceback.format_exc()) - sys.exit(1) - - -def actor_control( - robot: RobotWrapper, - robot_action_processor, - action_queue: ActionQueue, - shutdown_event: Event, - cfg: RTCDemoConfig, -): - """Thread function to execute actions on the robot. - - Args: - robot: The robot instance - action_queue: Queue to get actions from - shutdown_event: Event to signal shutdown - cfg: Demo configuration - """ - try: - logger.info("[ACTOR] Starting actor thread") - - action_keys = [k for k in robot.action_features() if k.endswith(".pos")] - - action_count = 0 - interpolator = ActionInterpolator(multiplier=cfg.interpolation_multiplier) - action_interval = interpolator.get_control_interval(cfg.fps) - - while not shutdown_event.is_set(): - start_time = time.perf_counter() - - if interpolator.needs_new_action(): - new_action = action_queue.get() - if new_action is not None: - interpolator.add(new_action.cpu()) - - action = interpolator.get() - if action is not None: - action = action.cpu() - action_dict = {key: action[i].item() for i, key in enumerate(action_keys)} - action_processed = robot_action_processor((action_dict, None)) - robot.send_action(action_processed) - action_count += 1 - - dt_s = time.perf_counter() - start_time - time.sleep(max(0, (action_interval - dt_s) - 0.001)) - - logger.info(f"[ACTOR] Actor thread shutting down. Total actions executed: {action_count}") - except Exception as e: - logger.error(f"[ACTOR] Fatal exception in actor_control thread: {e}") - logger.error(traceback.format_exc()) - sys.exit(1) - - -def _apply_torch_compile(policy, cfg: RTCDemoConfig): - """Apply torch.compile to the policy's predict_action_chunk method. - - Args: - policy: Policy instance to compile - cfg: Configuration containing torch compile settings - - Returns: - Policy with compiled predict_action_chunk method - """ - - # PI models handle their own compilation - if policy.type == "pi05" or policy.type == "pi0": - return policy - - try: - # Check if torch.compile is available (PyTorch 2.0+) - if not hasattr(torch, "compile"): - logger.warning( - f"torch.compile is not available. Requires PyTorch 2.0+. " - f"Current version: {torch.__version__}. Skipping compilation." - ) - return policy - - logger.info("Applying torch.compile to predict_action_chunk...") - logger.info(f" Backend: {cfg.torch_compile_backend}") - logger.info(f" Mode: {cfg.torch_compile_mode}") - logger.info(f" Disable CUDA graphs: {cfg.torch_compile_disable_cudagraphs}") - - # Compile the predict_action_chunk method - # - CUDA graphs disabled to prevent tensor aliasing from in-place ops (x_t += dt * v_t) - compile_kwargs = { - "backend": cfg.torch_compile_backend, - "mode": cfg.torch_compile_mode, - } - - # Disable CUDA graphs if requested (prevents tensor aliasing issues) - if cfg.torch_compile_disable_cudagraphs: - compile_kwargs["options"] = {"triton.cudagraphs": False} - - original_method = policy.predict_action_chunk - compiled_method = torch.compile(original_method, **compile_kwargs) - policy.predict_action_chunk = compiled_method - logger.info("✓ Successfully compiled predict_action_chunk") - - except Exception as e: - logger.error(f"Failed to apply torch.compile: {e}") - logger.warning("Continuing without torch.compile") - - return policy - - -@parser.wrap() -def demo_cli(cfg: RTCDemoConfig): - """Main entry point for RTC demo with draccus configuration.""" - - # Initialize logging - init_logging() - - logger.info(f"Using device: {cfg.device}") - - # Setup signal handler for graceful shutdown - signal_handler = ProcessSignalHandler(use_threads=True, display_pid=False) - shutdown_event = signal_handler.shutdown_event - - policy = None - robot = None - get_actions_thread = None - actor_thread = None - - policy_class = get_policy_class(cfg.policy.type) - - # Load config and set compile_model for pi0/pi05 models - config = PreTrainedConfig.from_pretrained(cfg.policy.pretrained_path) - - if cfg.policy.type == "pi05" or cfg.policy.type == "pi0": - config.compile_model = cfg.use_torch_compile - - if config.use_peft: - from peft import PeftConfig, PeftModel - - peft_pretrained_path = cfg.policy.pretrained_path - peft_config = PeftConfig.from_pretrained(peft_pretrained_path) - - policy = policy_class.from_pretrained( - pretrained_name_or_path=peft_config.base_model_name_or_path, config=config - ) - policy = PeftModel.from_pretrained(policy, peft_pretrained_path, config=peft_config) - else: - policy = policy_class.from_pretrained(cfg.policy.pretrained_path, config=config) - - # Turn on RTC - policy.config.rtc_config = cfg.rtc - - # Init RTC processort, as by default if RTC disabled in the config - # The processor won't be created - policy.init_rtc_processor() - - assert policy.name in ["smolvla", "pi05", "pi0"], "Only smolvla, pi05, and pi0 are supported for RTC" - - policy = policy.to(cfg.device) - policy.eval() - - # Apply torch.compile to predict_action_chunk method if enabled - if cfg.use_torch_compile: - policy = _apply_torch_compile(policy, cfg) - - # Create robot - logger.info(f"Initializing robot: {cfg.robot.type}") - robot = make_robot_from_config(cfg.robot) - robot.connect() - robot_wrapper = RobotWrapper(robot) - - # Create robot observation processor - robot_observation_processor = make_default_robot_observation_processor() - robot_action_processor = make_default_robot_action_processor() - - # Create action queue for communication between threads - action_queue = ActionQueue(cfg.rtc) - - # Start chunk requester thread - get_actions_thread = Thread( - target=get_actions, - args=(policy, robot_wrapper, robot_observation_processor, action_queue, shutdown_event, cfg), - daemon=True, - name="GetActions", - ) - get_actions_thread.start() - logger.info("Started get actions thread") - - # Start action executor thread - actor_thread = Thread( - target=actor_control, - args=(robot_wrapper, robot_action_processor, action_queue, shutdown_event, cfg), - daemon=True, - name="Actor", - ) - actor_thread.start() - logger.info("Started actor thread") - - logger.info("Started stop by duration thread") - - # Main thread monitors for duration or shutdown - logger.info(f"Running demo for {cfg.duration} seconds...") - start_time = time.time() - - while not shutdown_event.is_set() and (time.time() - start_time) < cfg.duration: - time.sleep(10) - - # Log queue status periodically - if int(time.time() - start_time) % 5 == 0: - logger.info(f"[MAIN] Action queue size: {action_queue.qsize()}") - - if time.time() - start_time > cfg.duration: - break - - logger.info("Demo duration reached or shutdown requested") - - # Signal shutdown - shutdown_event.set() - - # Wait for threads to finish - if get_actions_thread and get_actions_thread.is_alive(): - logger.info("Waiting for chunk requester thread to finish...") - get_actions_thread.join() - - if actor_thread and actor_thread.is_alive(): - logger.info("Waiting for action executor thread to finish...") - actor_thread.join() - - # Cleanup robot - if robot: - robot.disconnect() - logger.info("Robot disconnected") - - logger.info("Cleanup completed") - - -if __name__ == "__main__": - demo_cli() - logging.info("RTC demo finished") diff --git a/examples/so100_to_so100_EE/evaluate.py b/examples/so100_to_so100_EE/evaluate.py index fb5204997..63def68d0 100644 --- a/examples/so100_to_so100_EE/evaluate.py +++ b/examples/so100_to_so100_EE/evaluate.py @@ -14,13 +14,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +import time + from lerobot.cameras.opencv import OpenCVCameraConfig -from lerobot.common.control_utils import init_keyboard_listener +from lerobot.common.control_utils import init_keyboard_listener, predict_action from lerobot.configs import FeatureType, PolicyFeature from lerobot.datasets import LeRobotDataset, aggregate_pipeline_dataset_features, create_initial_features from lerobot.model.kinematics import RobotKinematics from lerobot.policies import make_pre_post_processors from lerobot.policies.act import ACTPolicy +from lerobot.policies.utils import make_robot_action from lerobot.processor import ( RobotProcessorPipeline, make_default_teleop_action_processor, @@ -34,11 +38,12 @@ from lerobot.robots.so_follower.robot_kinematic_processor import ( ForwardKinematicsJointsToEE, InverseKinematicsEEToJoints, ) -from lerobot.scripts.lerobot_record import record_loop from lerobot.types import RobotAction, RobotObservation -from lerobot.utils.feature_utils import combine_feature_dicts +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts +from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.utils import log_say -from lerobot.utils.visualization_utils import init_rerun +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data NUM_EPISODES = 5 FPS = 30 @@ -49,6 +54,9 @@ HF_DATASET_ID = "/" def main(): + # NOTE: For production policy deployment, use `lerobot-rollout` CLI instead. + # This script provides a self-contained example for educational purposes. + # Create the robot configuration & robot camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} robot_config = SO100FollowerConfig( @@ -143,43 +151,67 @@ def main(): raise ValueError("Robot is not connected!") print("Starting evaluate loop...") + control_interval = 1 / FPS episode_idx = 0 for episode_idx in range(NUM_EPISODES): log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") - # Main record loop - record_loop( - robot=robot, - events=events, - fps=FPS, - policy=policy, - preprocessor=preprocessor, # Pass the pre and post policy processors - postprocessor=postprocessor, - dataset=dataset, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=make_default_teleop_action_processor(), - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose_processor, - ) + # Inline evaluation loop: predict actions and send to robot + timestamp = 0 + start_episode_t = time.perf_counter() + while timestamp < EPISODE_TIME_SEC: + start_loop_t = time.perf_counter() + + if events["exit_early"]: + events["exit_early"] = False + break + + # Get robot observation + obs = robot.get_observation() + obs_processed = robot_joints_to_ee_pose_processor(obs) + observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR) + + # Predict action using the policy + action_tensor = predict_action( + observation=observation_frame, + policy=policy, + device=policy.config.device, + preprocessor=preprocessor, + postprocessor=postprocessor, + use_amp=policy.config.device.type == "cuda", + task=TASK_DESCRIPTION, + robot_type=robot.name, + ) + + # Convert policy output to robot action dict + action_values = make_robot_action(action_tensor, dataset.features) + + # Process and send action to robot (EE -> joints via IK) + robot_action_to_send = robot_ee_to_joints_processor((action_values, obs)) + robot.send_action(robot_action_to_send) + + # Write to dataset + action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION) + frame = {**observation_frame, **action_frame, "task": TASK_DESCRIPTION} + dataset.add_frame(frame) + + log_rerun_data(observation=obs_processed, action=action_values) + + dt_s = time.perf_counter() - start_loop_t + sleep_time_s = control_interval - dt_s + if sleep_time_s < 0: + logging.warning( + f"Evaluate loop is running slower ({1 / dt_s:.1f} Hz) than the target FPS ({FPS} Hz)." + ) + precise_sleep(max(sleep_time_s, 0.0)) + timestamp = time.perf_counter() - start_episode_t # Reset the environment if not stopping or re-recording if not events["stop_recording"] and ( (episode_idx < NUM_EPISODES - 1) or events["rerecord_episode"] ): log_say("Reset the environment") - record_loop( - robot=robot, - events=events, - fps=FPS, - control_time_s=EPISODE_TIME_SEC, - single_task=TASK_DESCRIPTION, - display_data=True, - teleop_action_processor=make_default_teleop_action_processor(), - robot_action_processor=robot_ee_to_joints_processor, - robot_observation_processor=robot_joints_to_ee_pose_processor, - ) + log_say("Waiting for environment reset, press right arrow key when ready...") if events["rerecord_episode"]: log_say("Re-record episode") @@ -190,7 +222,6 @@ def main(): # Save episode dataset.save_episode() - episode_idx += 1 finally: # Clean up log_say("Stop recording") diff --git a/examples/so100_to_so100_EE/record.py b/examples/so100_to_so100_EE/record.py index a7ac5bb80..a0b92da3b 100644 --- a/examples/so100_to_so100_EE/record.py +++ b/examples/so100_to_so100_EE/record.py @@ -62,21 +62,20 @@ def main(): follower = SO100Follower(follower_config) leader = SO100Leader(leader_config) - # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf + # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: + # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf follower_kinematics_solver = RobotKinematics( urdf_path="./SO101/so101_new_calib.urdf", target_frame_name="gripper_frame_link", joint_names=list(follower.bus.motors.keys()), ) - - # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf leader_kinematics_solver = RobotKinematics( urdf_path="./SO101/so101_new_calib.urdf", target_frame_name="gripper_frame_link", joint_names=list(leader.bus.motors.keys()), ) - # Build pipeline to convert follower joints to EE observation + # Build pipeline to convert follower joints to EE observation. follower_joints_to_ee = RobotProcessorPipeline[RobotObservation, RobotObservation]( steps=[ ForwardKinematicsJointsToEE( @@ -87,7 +86,7 @@ def main(): to_output=transition_to_observation, ) - # Build pipeline to convert leader joints to EE action + # Build pipeline to convert leader joints to EE action. leader_joints_to_ee = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( steps=[ ForwardKinematicsJointsToEE( @@ -98,9 +97,9 @@ def main(): to_output=transition_to_robot_action, ) - # Build pipeline to convert EE action to follower joints + # Build pipeline to convert EE action to follower joints (with safety bounds). ee_to_follower_joints = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( - [ + steps=[ EEBoundsAndSafety( end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.10, @@ -115,13 +114,12 @@ def main(): to_output=transition_to_robot_action, ) - # Create the dataset + # Create the dataset, deriving features from the pipelines so the on-disk schema + # matches exactly what the pipelines produce at runtime. dataset = LeRobotDataset.create( repo_id=HF_REPO_ID, fps=FPS, features=combine_feature_dicts( - # Run the feature contract of the pipelines - # This tells you how the features would look like after the pipeline steps aggregate_pipeline_dataset_features( pipeline=leader_joints_to_ee, initial_features=create_initial_features(action=leader.action_features), @@ -144,7 +142,7 @@ def main(): # Initialize the keyboard listener and rerun visualization listener, events = init_keyboard_listener() - init_rerun(session_name="recording_phone") + init_rerun(session_name="recording_so100_ee") try: if not leader.is_connected or not follower.is_connected: @@ -160,14 +158,14 @@ def main(): robot=follower, events=events, fps=FPS, + teleop_action_processor=leader_joints_to_ee, + robot_action_processor=ee_to_follower_joints, + robot_observation_processor=follower_joints_to_ee, teleop=leader, dataset=dataset, control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=leader_joints_to_ee, - robot_action_processor=ee_to_follower_joints, - robot_observation_processor=follower_joints_to_ee, ) # Reset the environment if not stopping or re-recording @@ -179,13 +177,13 @@ def main(): robot=follower, events=events, fps=FPS, + teleop_action_processor=leader_joints_to_ee, + robot_action_processor=ee_to_follower_joints, + robot_observation_processor=follower_joints_to_ee, teleop=leader, control_time_s=RESET_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, - teleop_action_processor=leader_joints_to_ee, - robot_action_processor=ee_to_follower_joints, - robot_observation_processor=follower_joints_to_ee, ) if events["rerecord_episode"]: diff --git a/examples/so100_to_so100_EE/rollout.py b/examples/so100_to_so100_EE/rollout.py new file mode 100644 index 000000000..d608bfab2 --- /dev/null +++ b/examples/so100_to_so100_EE/rollout.py @@ -0,0 +1,134 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a trained EE-space policy on SO100 without recording (base rollout). + +Uses the rollout engine's :class:`BaseStrategy` (autonomous execution, +no dataset) with :class:`SyncInferenceConfig` (inline policy call per +control tick). The custom observation/action processors convert between +joint space (robot hardware) and end-effector space (policy I/O) via +forward/inverse kinematics. +""" + +from lerobot.cameras.opencv import OpenCVCameraConfig +from lerobot.configs import PreTrainedConfig +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import ( + RobotProcessorPipeline, + observation_to_transition, + robot_action_observation_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig +from lerobot.robots.so_follower.robot_kinematic_processor import ( + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.rollout import BaseStrategyConfig, RolloutConfig, build_rollout_context +from lerobot.rollout.inference import SyncInferenceConfig +from lerobot.rollout.strategies import BaseStrategy +from lerobot.types import RobotAction, RobotObservation +from lerobot.utils.process import ProcessSignalHandler +from lerobot.utils.utils import init_logging + +FPS = 30 +DURATION_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" + + +def main(): + init_logging() + + # Robot configuration — the rollout engine will connect it inside build_rollout_context. + camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} + robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", + id="my_awesome_follower_arm", + cameras=camera_config, + use_degrees=True, + ) + + # Kinematic solver: we need the motor-name list, so peek at the robot once. + # (The rollout engine owns the connected instance; we only use this for introspection.) + temp_robot = SO100Follower(robot_config) + motor_names = list(temp_robot.bus.motors.keys()) + + # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: + # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf + kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=motor_names, + ) + + # Joint-space observation → EE-space observation (consumed by the policy). + robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=motor_names)], + to_transition=observation_to_transition, + to_output=transition_to_observation, + ) + + # EE-space action (produced by the policy) → joint-space action (sent to robot). + robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=motor_names, + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, + ) + + # Policy config (full model is loaded inside build_rollout_context). + policy_config = PreTrainedConfig.from_pretrained(HF_MODEL_ID) + policy_config.pretrained_path = HF_MODEL_ID + + cfg = RolloutConfig( + robot=robot_config, + policy=policy_config, + strategy=BaseStrategyConfig(), + inference=SyncInferenceConfig(), + fps=FPS, + duration=DURATION_SEC, + task=TASK_DESCRIPTION, + ) + + signal_handler = ProcessSignalHandler(use_threads=True) + + # Pass the EE kinematic processors via kwargs; the defaults (identity) would + # otherwise skip the joint↔EE conversion and the policy would receive the + # wrong observation/action space. + ctx = build_rollout_context( + cfg, + signal_handler.shutdown_event, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + + strategy = BaseStrategy(cfg.strategy) + try: + strategy.setup(ctx) + strategy.run(ctx) + finally: + strategy.teardown(ctx) + + +if __name__ == "__main__": + main() diff --git a/examples/tutorial/rl/hilserl_example.py b/examples/tutorial/rl/hilserl_example.py index 8a08d6d56..71b50e97c 100644 --- a/examples/tutorial/rl/hilserl_example.py +++ b/examples/tutorial/rl/hilserl_example.py @@ -10,7 +10,7 @@ from lerobot.datasets import LeRobotDataset from lerobot.envs.configs import HILSerlProcessorConfig, HILSerlRobotEnvConfig from lerobot.policies import SACConfig from lerobot.policies.sac.modeling_sac import SACPolicy -from lerobot.policies.sac.reward_model.modeling_classifier import Classifier +from lerobot.rewards.classifier.modeling_classifier import Classifier from lerobot.rl.buffer import ReplayBuffer from lerobot.rl.gym_manipulator import make_robot_env from lerobot.robots.so_follower import SO100FollowerConfig diff --git a/examples/tutorial/rl/reward_classifier_example.py b/examples/tutorial/rl/reward_classifier_example.py index b386bf4db..ddecfbcfc 100644 --- a/examples/tutorial/rl/reward_classifier_example.py +++ b/examples/tutorial/rl/reward_classifier_example.py @@ -1,7 +1,7 @@ import torch from lerobot.datasets import LeRobotDataset -from lerobot.policies import RewardClassifierConfig, make_policy, make_pre_post_processors +from lerobot.rewards import RewardClassifierConfig, make_reward_model, make_reward_pre_post_processors def main(): @@ -22,10 +22,10 @@ def main(): model_name="microsoft/resnet-18", ) - # Make policy, preprocessor, and optimizer - policy = make_policy(config, ds_meta=dataset.meta) - optimizer = config.get_optimizer_preset().build(policy.parameters()) - preprocessor, _ = make_pre_post_processors(policy_cfg=config, dataset_stats=dataset.meta.stats) + # Make reward model, preprocessor, and optimizer + reward_model = make_reward_model(config, dataset_stats=dataset.meta.stats) + optimizer = config.get_optimizer_preset().build(reward_model.parameters()) + preprocessor, _ = make_reward_pre_post_processors(config, dataset_stats=dataset.meta.stats) classifier_id = "/reward_classifier_hil_serl_example" @@ -42,7 +42,7 @@ def main(): batch = preprocessor(batch) # Forward pass - loss, output_dict = policy.forward(batch) + loss, output_dict = reward_model.forward(batch) # Backward pass and optimization optimizer.zero_grad() @@ -58,8 +58,8 @@ def main(): print("Training finished!") - # You can now save the trained policy. - policy.push_to_hub(classifier_id) + # You can now save the trained reward model. + reward_model.push_to_hub(classifier_id) if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 0790db6fb..331dff929 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,7 +128,7 @@ dataset_viz = ["lerobot[dataset]", "lerobot[viz]"] av-dep = ["av>=15.0.0,<16.0.0"] pygame-dep = ["pygame>=2.5.1,<2.7.0"] placo-dep = ["placo>=0.9.6,<0.9.17"] -transformers-dep = ["transformers==5.3.0"] # TODO(Steven): https://github.com/huggingface/lerobot/pull/3249 +transformers-dep = ["transformers>=5.4.0,<5.6.0"] grpcio-dep = ["grpcio==1.73.1", "protobuf>=6.31.1,<6.32.0"] can-dep = ["python-can>=4.2.0,<5.0.0"] peft-dep = ["peft>=0.18.0,<1.0.0"] @@ -289,6 +289,7 @@ lerobot-find-joint-limits="lerobot.scripts.lerobot_find_joint_limits:main" lerobot-imgtransform-viz="lerobot.scripts.lerobot_imgtransform_viz:main" lerobot-edit-dataset="lerobot.scripts.lerobot_edit_dataset:main" lerobot-setup-can="lerobot.scripts.lerobot_setup_can:main" +lerobot-rollout="lerobot.scripts.lerobot_rollout:main" # ---------------- Tool Configurations ---------------- [tool.setuptools.package-data] diff --git a/src/lerobot/cameras/realsense/camera_realsense.py b/src/lerobot/cameras/realsense/camera_realsense.py index 6363cc0bc..e156e6d14 100644 --- a/src/lerobot/cameras/realsense/camera_realsense.py +++ b/src/lerobot/cameras/realsense/camera_realsense.py @@ -17,6 +17,7 @@ Provides the RealSenseCamera class for capturing frames from Intel RealSense cam """ import logging +import sys import time from threading import Event, Lock, Thread from typing import TYPE_CHECKING, Any @@ -41,6 +42,7 @@ from ..utils import get_cv2_rotation from .configuration_realsense import RealSenseCameraConfig logger = logging.getLogger(__name__) +pkg_name = "pyrealsense2-macosx" if sys.platform == "darwin" else "pyrealsense2" class RealSenseCamera(Camera): @@ -114,7 +116,7 @@ class RealSenseCamera(Camera): Args: config: The configuration settings for the camera. """ - require_package("pyrealsense2", extra="intelrealsense") + require_package(pkg_name, extra="intelrealsense", import_name="pyrealsense2") super().__init__(config) self.config = config diff --git a/src/lerobot/common/wandb_utils.py b/src/lerobot/common/wandb_utils.py index e3190b6ce..b782cd751 100644 --- a/src/lerobot/common/wandb_utils.py +++ b/src/lerobot/common/wandb_utils.py @@ -41,8 +41,12 @@ def cfg_to_group( return tag return tag[:max_tag_length] + if cfg.is_reward_model_training: + trainable_tag = f"reward_model:{cfg.reward_model.type}" + else: + trainable_tag = f"policy:{cfg.policy.type}" lst = [ - f"policy:{cfg.policy.type}", + trainable_tag, f"seed:{cfg.seed}", ] if cfg.dataset is not None: diff --git a/src/lerobot/configs/__init__.py b/src/lerobot/configs/__init__.py index a2cb1d72d..d1d08ba01 100644 --- a/src/lerobot/configs/__init__.py +++ b/src/lerobot/configs/__init__.py @@ -21,6 +21,7 @@ are intentionally NOT re-exported here to avoid circular dependencies Import them directly: ``from lerobot.configs.train import TrainPipelineConfig`` """ +from .dataset import DatasetRecordConfig from .default import DatasetConfig, EvalConfig, PeftConfig, WandBConfig from .policies import PreTrainedConfig from .recipe import MessageTurn, TrainingRecipe, load_recipe @@ -40,6 +41,7 @@ __all__ = [ "PolicyFeature", "RTCAttentionSchedule", # Config classes + "DatasetRecordConfig", "DatasetConfig", "EvalConfig", "MessageTurn", diff --git a/src/lerobot/configs/dataset.py b/src/lerobot/configs/dataset.py new file mode 100644 index 000000000..e3e17e62b --- /dev/null +++ b/src/lerobot/configs/dataset.py @@ -0,0 +1,80 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared dataset recording configuration used by both ``lerobot-record`` and ``lerobot-rollout``.""" + +from dataclasses import dataclass +from datetime import datetime +from pathlib import Path + + +@dataclass +class DatasetRecordConfig: + # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). + repo_id: str = "" + # A short but accurate description of the task performed during the recording (e.g. "Pick the Lego block and drop it in the box on the right.") + single_task: str = "" + # Root directory where the dataset will be stored (e.g. 'dataset/path'). If None, defaults to $HF_LEROBOT_HOME/repo_id. + root: str | Path | None = None + # Limit the frames per second. + fps: int = 30 + # Number of seconds for data recording for each episode. + episode_time_s: int | float = 60 + # Number of seconds for resetting the environment after each episode. + reset_time_s: int | float = 60 + # Number of episodes to record. + num_episodes: int = 50 + # Encode frames in the dataset into video + video: bool = True + # Upload dataset to Hugging Face hub. + push_to_hub: bool = True + # Upload on private repository on the Hugging Face hub. + private: bool = False + # Add tags to your dataset on the hub. + tags: list[str] | None = None + # Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only; + # set to ≥1 to use subprocesses, each using threads to write images. The best number of processes + # and threads depends on your system. We recommend 4 threads per camera with 0 processes. + # If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses. + num_image_writer_processes: int = 0 + # Number of threads writing the frames as png images on disk, per camera. + # Too many threads might cause unstable teleoperation fps due to main thread being blocked. + # Not enough threads might cause low camera fps. + num_image_writer_threads_per_camera: int = 4 + # Number of episodes to record before batch encoding videos + # Set to 1 for immediate encoding (default behavior), or higher for batched encoding + video_encoding_batch_size: int = 1 + # Video codec for encoding videos. Options: 'h264', 'hevc', 'libsvtav1', 'auto', + # or hardware-specific: 'h264_videotoolbox', 'h264_nvenc', 'h264_vaapi', 'h264_qsv'. + # Use 'auto' to auto-detect the best available hardware encoder. + vcodec: str = "libsvtav1" + # Enable streaming video encoding: encode frames in real-time during capture instead + # of writing PNG images first. Makes save_episode() near-instant. More info in the documentation: https://huggingface.co/docs/lerobot/streaming_video_encoding + streaming_encoding: bool = False + # Maximum number of frames to buffer per camera when using streaming encoding. + # ~1s buffer at 30fps. Provides backpressure if the encoder can't keep up. + encoder_queue_maxsize: int = 30 + # Number of threads per encoder instance. None = auto (codec default). + # Lower values reduce CPU usage, maps to 'lp' (via svtav1-params) for libsvtav1 and 'threads' for h264/hevc.. + encoder_threads: int | None = None + + def stamp_repo_id(self) -> None: + """Append a date-time tag to ``repo_id`` so each recording session gets a unique name. + + Must be called explicitly at dataset *creation* time — not on resume, + where the existing ``repo_id`` (already stamped) must be preserved. + """ + if self.repo_id: + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + self.repo_id = f"{self.repo_id}_{timestamp}" diff --git a/src/lerobot/configs/recipes/pi05_hirobot.yaml b/src/lerobot/configs/recipes/pi05_hirobot.yaml deleted file mode 100644 index 7cd6b009f..000000000 --- a/src/lerobot/configs/recipes/pi05_hirobot.yaml +++ /dev/null @@ -1,74 +0,0 @@ -blend: - - memory_update: - weight: 0.10 - bindings: - prior_memory: "nth_prev(style=memory, offset=1)" - current_memory: "emitted_at(t, style=memory)" - completed_subtask: "nth_prev(style=subtask, offset=1)" - messages: - - {role: user, content: "${task}", stream: high_level} - - {role: assistant, content: "Previous memory: ${prior_memory}", stream: high_level, if_present: prior_memory} - - {role: user, content: "Completed subtask: ${completed_subtask}", stream: high_level, if_present: completed_subtask} - - {role: assistant, content: "${current_memory}", stream: high_level, target: true, if_present: current_memory} - - user_interjection_response: - weight: 0.16 - bindings: - prior_plan: "nth_prev(style=plan, offset=1)" - current_plan: "emitted_at(t, style=plan)" - interjection: "emitted_at(t, style=interjection)" - speech: "emitted_at(t, role=assistant, tool_name=say)" - messages: - - {role: user, content: "${task}", stream: high_level} - - {role: assistant, content: "Previous plan:\n${prior_plan}", stream: high_level, if_present: prior_plan} - - {role: user, content: "${interjection}", stream: high_level, if_present: interjection} - - {role: assistant, content: "${current_plan}", stream: high_level, target: true, if_present: current_plan, tool_calls_from: speech} - - high_level_subtask: - weight: 0.15 - bindings: - next_subtask: "nth_next(style=subtask, offset=1)" - messages: - - {role: user, content: "${task}\nPlan: ${plan}\nMemory: ${memory}", stream: high_level} - - {role: user, content: "Current subtask: ${subtask}", stream: high_level, if_present: subtask} - - {role: assistant, content: "${next_subtask}", stream: high_level, target: true} - - low_level_execution: - weight: 0.35 - messages: - - {role: user, content: "${task}\nPlan: ${plan}\nMemory: ${memory}", stream: high_level} - - {role: assistant, content: "${subtask}", stream: low_level, target: true} - - # VQA is view-dependent: bbox / keypoint / count answers only make sense for - # the camera they were grounded against. Each camera gets its own sub-recipe - # so the resolver can disambiguate via `camera=...` and the user-turn carries - # the matching image block. Adjust the camera keys (and add more sub-recipes) - # to match the cameras present on your dataset. - ask_vqa_top: - weight: 0.10 - bindings: - vqa_query: "emitted_at(t, style=vqa, role=user, camera=observation.images.top)" - vqa: "emitted_at(t, style=vqa, role=assistant, camera=observation.images.top)" - messages: - - role: user - stream: high_level - if_present: vqa_query - content: - - {type: image, feature: observation.images.top} - - {type: text, text: "${vqa_query}"} - - {role: assistant, content: "${vqa}", stream: high_level, target: true, if_present: vqa} - - ask_vqa_wrist: - weight: 0.10 - bindings: - vqa_query: "emitted_at(t, style=vqa, role=user, camera=observation.images.wrist)" - vqa: "emitted_at(t, style=vqa, role=assistant, camera=observation.images.wrist)" - messages: - - role: user - stream: high_level - if_present: vqa_query - content: - - {type: image, feature: observation.images.wrist} - - {type: text, text: "${vqa_query}"} - - {role: assistant, content: "${vqa}", stream: high_level, target: true, if_present: vqa} diff --git a/src/lerobot/configs/rewards.py b/src/lerobot/configs/rewards.py new file mode 100644 index 000000000..d495160bf --- /dev/null +++ b/src/lerobot/configs/rewards.py @@ -0,0 +1,163 @@ +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import builtins +import json +import logging +import os +import tempfile +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, TypeVar + +import draccus +from huggingface_hub import hf_hub_download +from huggingface_hub.constants import CONFIG_NAME +from huggingface_hub.errors import HfHubHTTPError + +from lerobot.configs.types import PolicyFeature +from lerobot.optim.optimizers import OptimizerConfig +from lerobot.optim.schedulers import LRSchedulerConfig +from lerobot.utils.device_utils import auto_select_torch_device, is_torch_device_available +from lerobot.utils.hub import HubMixin + +T = TypeVar("T", bound="RewardModelConfig") +logger = logging.getLogger(__name__) + + +@dataclass +class RewardModelConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC): + """Base configuration for reward models. + + Args: + input_features: A dictionary defining the PolicyFeature of the input data for the reward. The key represents + the input data name, and the value is PolicyFeature, which consists of FeatureType and shape attributes. + output_features: A dictionary defining the PolicyFeature of the output data for the reward. The key represents + the output data name, and the value is PolicyFeature, which consists of FeatureType and shape attributes. + """ + + # Reuses PolicyFeature + input_features: dict[str, PolicyFeature] = field(default_factory=dict) + output_features: dict[str, PolicyFeature] = field(default_factory=dict) + + device: str | None = None + + pretrained_path: str | None = None + + push_to_hub: bool = False + repo_id: str | None = None + + # Hub metadata + license: str | None = None + tags: list[str] | None = None + private: bool | None = None + + def __post_init__(self) -> None: + if not self.device or not is_torch_device_available(self.device): + auto_device = auto_select_torch_device() + logger.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.") + self.device = auto_device.type + + @property + def type(self) -> str: + choice_name = self.get_choice_name(self.__class__) + if not isinstance(choice_name, str): + raise TypeError(f"Expected string from get_choice_name, got {type(choice_name)}") + return choice_name + + @property + def observation_delta_indices(self) -> list | None: # type: ignore[type-arg] + return None + + @property + def action_delta_indices(self) -> list | None: # type: ignore[type-arg] + return None + + @property + def reward_delta_indices(self) -> list | None: # type: ignore[type-arg] + return None + + @abc.abstractmethod + def get_optimizer_preset(self) -> OptimizerConfig: + raise NotImplementedError + + def get_scheduler_preset(self) -> LRSchedulerConfig | None: + return None + + def validate_features(self) -> None: + pass + + def _save_pretrained(self, save_directory: Path) -> None: + with open(save_directory / CONFIG_NAME, "w") as f, draccus.config_type("json"): + draccus.dump(self, f, indent=4) + + @classmethod + def from_pretrained( + cls: builtins.type[T], + pretrained_name_or_path: str | Path, + *, + force_download: bool = False, + resume_download: bool | None = None, + proxies: dict[Any, Any] | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + **reward_kwargs: Any, + ) -> T: + model_id = str(pretrained_name_or_path) + config_file: str | None = None + if Path(model_id).is_dir(): + if CONFIG_NAME in os.listdir(model_id): + config_file = os.path.join(model_id, CONFIG_NAME) + else: + logger.error(f"{CONFIG_NAME} not found in {Path(model_id).resolve()}") + else: + try: + config_file = hf_hub_download( + repo_id=model_id, + filename=CONFIG_NAME, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + except HfHubHTTPError as e: + raise FileNotFoundError( + f"{CONFIG_NAME} not found on the HuggingFace Hub in {model_id}" + ) from e + + if config_file is None: + raise FileNotFoundError(f"{CONFIG_NAME} not found in {model_id}") + + # HACK: Parse the original config to get the config subclass, so that we can + # apply cli overrides. + with draccus.config_type("json"): + orig_config = draccus.parse(cls, config_file, args=[]) + + with open(config_file) as f: + config = json.load(f) + + config.pop("type", None) + with tempfile.NamedTemporaryFile("w+", delete=False, suffix=".json") as f: + json.dump(config, f) + config_file = f.name + + cli_overrides = reward_kwargs.pop("cli_overrides", []) + with draccus.config_type("json"): + return draccus.parse(orig_config.__class__, config_file, args=cli_overrides) diff --git a/src/lerobot/configs/train.py b/src/lerobot/configs/train.py index 924bcf5bb..b2b3cd7a0 100644 --- a/src/lerobot/configs/train.py +++ b/src/lerobot/configs/train.py @@ -13,7 +13,9 @@ # limitations under the License. import builtins import datetime as dt +import json import os +import tempfile from dataclasses import dataclass, field from pathlib import Path from typing import Any @@ -26,18 +28,57 @@ from lerobot import envs from lerobot.configs import parser from lerobot.optim import LRSchedulerConfig, OptimizerConfig from lerobot.utils.hub import HubMixin +from lerobot.utils.sample_weighting import SampleWeightingConfig from .default import DatasetConfig, EvalConfig, PeftConfig, WandBConfig from .policies import PreTrainedConfig +from .rewards import RewardModelConfig TRAIN_CONFIG_NAME = "train_config.json" +def _migrate_legacy_rabc_fields(config: dict[str, Any]) -> dict[str, Any] | None: + """Return migrated payload for legacy RA-BC fields, or None when no migration is needed.""" + legacy_fields = ( + "use_rabc", + "rabc_progress_path", + "rabc_kappa", + "rabc_epsilon", + "rabc_head_mode", + ) + if not any(key in config for key in legacy_fields): + return None + + migrated_config = dict(config) + use_rabc = bool(migrated_config.pop("use_rabc", False)) + rabc_progress_path = migrated_config.pop("rabc_progress_path", None) + rabc_kappa = migrated_config.pop("rabc_kappa", None) + rabc_epsilon = migrated_config.pop("rabc_epsilon", None) + rabc_head_mode = migrated_config.pop("rabc_head_mode", None) + + # New configs may already define sample_weighting explicitly. In that case, + # legacy fields are ignored after being stripped from the payload. + if migrated_config.get("sample_weighting") is None and use_rabc: + sample_weighting: dict[str, Any] = {"type": "rabc"} + if rabc_progress_path is not None: + sample_weighting["progress_path"] = rabc_progress_path + if rabc_kappa is not None: + sample_weighting["kappa"] = rabc_kappa + if rabc_epsilon is not None: + sample_weighting["epsilon"] = rabc_epsilon + if rabc_head_mode is not None: + sample_weighting["head_mode"] = rabc_head_mode + migrated_config["sample_weighting"] = sample_weighting + + return migrated_config + + @dataclass class TrainPipelineConfig(HubMixin): dataset: DatasetConfig env: envs.EnvConfig | None = None policy: PreTrainedConfig | None = None + reward_model: RewardModelConfig | None = None # Set `dir` to where you would like to save all of the run outputs. If you run another training session # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. output_dir: Path | None = None @@ -72,27 +113,41 @@ class TrainPipelineConfig(HubMixin): wandb: WandBConfig = field(default_factory=WandBConfig) peft: PeftConfig | None = None - # RA-BC (Reward-Aligned Behavior Cloning) parameters - use_rabc: bool = False # Enable reward-weighted training - rabc_progress_path: str | None = None # Path to precomputed SARM progress parquet file - rabc_kappa: float = 0.01 # Hard threshold for high-quality samples - rabc_epsilon: float = 1e-6 # Small constant for numerical stability - rabc_head_mode: str | None = "sparse" # For dual-head models: "sparse" or "dense" + # Sample weighting configuration (e.g., for RA-BC training) + sample_weighting: SampleWeightingConfig | None = None # Rename map for the observation to override the image and state keys rename_map: dict[str, str] = field(default_factory=dict) checkpoint_path: Path | None = field(init=False, default=None) + @property + def is_reward_model_training(self) -> bool: + """True when the config targets a reward model rather than a policy.""" + return self.reward_model is not None + + @property + def trainable_config(self) -> PreTrainedConfig | RewardModelConfig: + """Return whichever config (policy or reward_model) is active.""" + if self.is_reward_model_training: + return self.reward_model # type: ignore[return-value] + return self.policy # type: ignore[return-value] + def validate(self) -> None: # HACK: We parse again the cli args here to get the pretrained paths if there was some. policy_path = parser.get_path_arg("policy") - if policy_path: - # Only load the policy config + reward_model_path = parser.get_path_arg("reward_model") + + if reward_model_path: + cli_overrides = parser.get_cli_overrides("reward_model") + self.reward_model = RewardModelConfig.from_pretrained( + reward_model_path, cli_overrides=cli_overrides + ) + self.reward_model.pretrained_path = str(Path(reward_model_path)) + elif policy_path: cli_overrides = parser.get_cli_overrides("policy") self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = Path(policy_path) elif self.resume: - # The entire train config is already loaded, we just need to get the checkpoint dir config_path = parser.parse_arg("config_path") if not config_path: raise ValueError( @@ -108,18 +163,22 @@ class TrainPipelineConfig(HubMixin): policy_dir = Path(config_path).parent if self.policy is not None: self.policy.pretrained_path = policy_dir + if self.reward_model is not None: + self.reward_model.pretrained_path = str(policy_dir) self.checkpoint_path = policy_dir.parent - if self.policy is None: + if self.policy is None and self.reward_model is None: raise ValueError( - "Policy is not configured. Please specify a pretrained policy with `--policy.path`." + "Neither policy nor reward_model is configured. " + "Please specify one with `--policy.path` or `--reward_model.path`." ) + active_cfg = self.trainable_config if not self.job_name: if self.env is None: - self.job_name = f"{self.policy.type}" + self.job_name = f"{active_cfg.type}" else: - self.job_name = f"{self.env.type}_{self.policy.type}" + self.job_name = f"{self.env.type}_{active_cfg.type}" if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir(): raise FileExistsError( @@ -137,26 +196,16 @@ class TrainPipelineConfig(HubMixin): if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") elif self.use_policy_training_preset and not self.resume: - self.optimizer = self.policy.get_optimizer_preset() - self.scheduler = self.policy.get_scheduler_preset() + self.optimizer = active_cfg.get_optimizer_preset() + self.scheduler = active_cfg.get_scheduler_preset() - if self.policy.push_to_hub and not self.policy.repo_id: - raise ValueError( - "'policy.repo_id' argument missing. Please specify it to push the model to the hub." - ) - - if self.use_rabc and not self.rabc_progress_path: - # Auto-detect from dataset path - repo_id = self.dataset.repo_id - if self.dataset.root: - self.rabc_progress_path = str(Path(self.dataset.root) / "sarm_progress.parquet") - else: - self.rabc_progress_path = f"hf://datasets/{repo_id}/sarm_progress.parquet" + if hasattr(active_cfg, "push_to_hub") and active_cfg.push_to_hub and not active_cfg.repo_id: + raise ValueError("'repo_id' argument missing. Please specify it to push the model to the hub.") @classmethod def __get_path_fields__(cls) -> list[str]: - """This enables the parser to load config from the policy using `--policy.path=local/dir`""" - return ["policy"] + """Keys for draccus pretrained-path loading.""" + return ["policy", "reward_model"] def to_dict(self) -> dict[str, Any]: return draccus.encode(self) # type: ignore[no-any-return] # because of the third-party library draccus uses Any as the return type @@ -207,6 +256,15 @@ class TrainPipelineConfig(HubMixin): ) from e cli_args = kwargs.pop("cli_args", []) + if config_file is not None: + with open(config_file) as f: + config = json.load(f) + migrated_config = _migrate_legacy_rabc_fields(config) + if migrated_config is not None: + with tempfile.NamedTemporaryFile("w+", delete=False, suffix=".json") as f: + json.dump(migrated_config, f) + config_file = f.name + with draccus.config_type("json"): return draccus.parse(cls, config_file, args=cli_args) diff --git a/src/lerobot/datasets/aggregate.py b/src/lerobot/datasets/aggregate.py index 0da1da964..90fc8f583 100644 --- a/src/lerobot/datasets/aggregate.py +++ b/src/lerobot/datasets/aggregate.py @@ -97,8 +97,8 @@ def update_data_df(df, src_meta, dst_meta): pd.DataFrame: Updated DataFrame with adjusted indices. """ - df["episode_index"] = df["episode_index"] + dst_meta.info["total_episodes"] - df["index"] = df["index"] + dst_meta.info["total_frames"] + df["episode_index"] = df["episode_index"] + dst_meta.info.total_episodes + df["index"] = df["index"] + dst_meta.info.total_frames src_task_names = src_meta.tasks.index.take(df["task_index"].to_numpy()) df["task_index"] = dst_meta.tasks.loc[src_task_names, "task_index"].to_numpy() @@ -225,9 +225,9 @@ def update_meta_data( # Clean up temporary columns df = df.drop(columns=["_orig_chunk", "_orig_file"]) - df["dataset_from_index"] = df["dataset_from_index"] + dst_meta.info["total_frames"] - df["dataset_to_index"] = df["dataset_to_index"] + dst_meta.info["total_frames"] - df["episode_index"] = df["episode_index"] + dst_meta.info["total_episodes"] + df["dataset_from_index"] = df["dataset_from_index"] + dst_meta.info.total_frames + df["dataset_to_index"] = df["dataset_to_index"] + dst_meta.info.total_frames + df["episode_index"] = df["episode_index"] + dst_meta.info.total_episodes return df @@ -237,8 +237,8 @@ def aggregate_datasets( aggr_repo_id: str, roots: list[Path] | None = None, aggr_root: Path | None = None, - data_files_size_in_mb: float | None = None, - video_files_size_in_mb: float | None = None, + data_files_size_in_mb: int | None = None, + video_files_size_in_mb: int | None = None, chunk_size: int | None = None, ): """Aggregates multiple LeRobot datasets into a single unified dataset. @@ -313,8 +313,8 @@ def aggregate_datasets( # to avoid interference between different source datasets data_idx.pop("src_to_dst", None) - dst_meta.info["total_episodes"] += src_meta.total_episodes - dst_meta.info["total_frames"] += src_meta.total_frames + dst_meta.info.total_episodes += src_meta.total_episodes + dst_meta.info.total_frames += src_meta.total_frames finalize_aggregation(dst_meta, all_metadata) logging.info("Aggregation complete.") @@ -640,14 +640,10 @@ def finalize_aggregation(aggr_meta, all_metadata): write_tasks(aggr_meta.tasks, aggr_meta.root) logging.info("write info") - aggr_meta.info.update( - { - "total_tasks": len(aggr_meta.tasks), - "total_episodes": sum(m.total_episodes for m in all_metadata), - "total_frames": sum(m.total_frames for m in all_metadata), - "splits": {"train": f"0:{sum(m.total_episodes for m in all_metadata)}"}, - } - ) + aggr_meta.info.total_tasks = len(aggr_meta.tasks) + aggr_meta.info.total_episodes = sum(m.total_episodes for m in all_metadata) + aggr_meta.info.total_frames = sum(m.total_frames for m in all_metadata) + aggr_meta.info.splits = {"train": f"0:{sum(m.total_episodes for m in all_metadata)}"} write_info(aggr_meta.info, aggr_meta.root) logging.info("write stats") diff --git a/src/lerobot/datasets/dataset_metadata.py b/src/lerobot/datasets/dataset_metadata.py index 959018dd3..99aec7a05 100644 --- a/src/lerobot/datasets/dataset_metadata.py +++ b/src/lerobot/datasets/dataset_metadata.py @@ -36,13 +36,11 @@ from .io_utils import ( load_stats, load_tasks, write_info, - write_json, write_stats, write_tasks, ) from .utils import ( DEFAULT_EPISODES_PATH, - INFO_PATH, check_version_compatibility, get_safe_version, has_legacy_hub_download_metadata, @@ -226,7 +224,7 @@ class LeRobotDatasetMetadata: @property def _version(self) -> packaging.version.Version: """Codebase version used to create this dataset.""" - return packaging.version.parse(self.info["codebase_version"]) + return packaging.version.parse(self.info.codebase_version) def get_data_file_path(self, ep_index: int) -> Path: """Return the relative parquet file path for the given episode index. @@ -281,27 +279,27 @@ class LeRobotDatasetMetadata: @property def data_path(self) -> str: """Formattable string for the parquet files.""" - return self.info["data_path"] + return self.info.data_path @property def video_path(self) -> str | None: """Formattable string for the video files.""" - return self.info["video_path"] + return self.info.video_path @property def robot_type(self) -> str | None: """Robot type used in recording this dataset.""" - return self.info["robot_type"] + return self.info.robot_type @property def fps(self) -> int: """Frames per second used during data collection.""" - return self.info["fps"] + return self.info.fps @property def features(self) -> dict[str, dict]: """All features contained in the dataset.""" - return self.info["features"] + return self.info.features @property def image_keys(self) -> list[str]: @@ -353,32 +351,32 @@ class LeRobotDatasetMetadata: @property def total_episodes(self) -> int: """Total number of episodes available.""" - return self.info["total_episodes"] + return self.info.total_episodes @property def total_frames(self) -> int: """Total number of frames saved in this dataset.""" - return self.info["total_frames"] + return self.info.total_frames @property def total_tasks(self) -> int: """Total number of different tasks performed in this dataset.""" - return self.info["total_tasks"] + return self.info.total_tasks @property def chunks_size(self) -> int: """Max number of files per chunk.""" - return self.info["chunks_size"] + return self.info.chunks_size @property def data_files_size_in_mb(self) -> int: """Max size of data file in mega bytes.""" - return self.info["data_files_size_in_mb"] + return self.info.data_files_size_in_mb @property def video_files_size_in_mb(self) -> int: """Max size of video file in mega bytes.""" - return self.info["video_files_size_in_mb"] + return self.info.video_files_size_in_mb def get_task_index(self, task: str) -> int | None: """ @@ -522,10 +520,10 @@ class LeRobotDatasetMetadata: self._save_episode_metadata(episode_dict) # Update info - self.info["total_episodes"] += 1 - self.info["total_frames"] += episode_length - self.info["total_tasks"] = len(self.tasks) - self.info["splits"] = {"train": f"0:{self.info['total_episodes']}"} + self.info.total_episodes += 1 + self.info.total_frames += episode_length + self.info.total_tasks = len(self.tasks) + self.info.splits = {"train": f"0:{self.info.total_episodes}"} write_info(self.info, self.root) @@ -544,7 +542,7 @@ class LeRobotDatasetMetadata: for key in video_keys: if not self.features[key].get("info", None): video_path = self.root / self.video_path.format(video_key=key, chunk_index=0, file_index=0) - self.info["features"][key]["info"] = get_video_info(video_path) + self.info.features[key]["info"] = get_video_info(video_path) def update_chunk_settings( self, @@ -566,17 +564,17 @@ class LeRobotDatasetMetadata: if chunks_size is not None: if chunks_size <= 0: raise ValueError(f"chunks_size must be positive, got {chunks_size}") - self.info["chunks_size"] = chunks_size + self.info.chunks_size = chunks_size if data_files_size_in_mb is not None: if data_files_size_in_mb <= 0: raise ValueError(f"data_files_size_in_mb must be positive, got {data_files_size_in_mb}") - self.info["data_files_size_in_mb"] = data_files_size_in_mb + self.info.data_files_size_in_mb = data_files_size_in_mb if video_files_size_in_mb is not None: if video_files_size_in_mb <= 0: raise ValueError(f"video_files_size_in_mb must be positive, got {video_files_size_in_mb}") - self.info["video_files_size_in_mb"] = video_files_size_in_mb + self.info.video_files_size_in_mb = video_files_size_in_mb # Update the info file on disk write_info(self.info, self.root) @@ -672,7 +670,7 @@ class LeRobotDatasetMetadata: f"Features contain video keys {obj.video_keys}, but 'use_videos' is set to False. " "Either remove video features from the features dict, or set 'use_videos=True'." ) - write_json(obj.info, obj.root / INFO_PATH) + write_info(obj.info, obj.root) obj.revision = None obj._pq_writer = None obj.latest_episode = None diff --git a/src/lerobot/datasets/dataset_tools.py b/src/lerobot/datasets/dataset_tools.py index cbf4e5c49..46dd9bff2 100644 --- a/src/lerobot/datasets/dataset_tools.py +++ b/src/lerobot/datasets/dataset_tools.py @@ -897,14 +897,10 @@ def _copy_and_reindex_episodes_metadata( dst_meta.finalize() - dst_meta.info.update( - { - "total_episodes": len(episode_mapping), - "total_frames": total_frames, - "total_tasks": len(dst_meta.tasks) if dst_meta.tasks is not None else 0, - "splits": {"train": f"0:{len(episode_mapping)}"}, - } - ) + dst_meta.info.total_episodes = len(episode_mapping) + dst_meta.info.total_frames = total_frames + dst_meta.info.total_tasks = len(dst_meta.tasks) if dst_meta.tasks is not None else 0 + dst_meta.info.splits = {"train": f"0:{len(episode_mapping)}"} write_info(dst_meta.info, dst_meta.root) if not all_stats: @@ -1069,21 +1065,20 @@ def _copy_episodes_metadata_and_stats( if episodes_dir.exists(): shutil.copytree(episodes_dir, dst_episodes_dir, dirs_exist_ok=True) - dst_meta.info.update( - { - "total_episodes": src_dataset.meta.total_episodes, - "total_frames": src_dataset.meta.total_frames, - "total_tasks": src_dataset.meta.total_tasks, - "splits": src_dataset.meta.info.get("splits", {"train": f"0:{src_dataset.meta.total_episodes}"}), - } + dst_meta.info.total_episodes = src_dataset.meta.total_episodes + dst_meta.info.total_frames = src_dataset.meta.total_frames + dst_meta.info.total_tasks = src_dataset.meta.total_tasks + # Preserve original splits if available, otherwise create default + dst_meta.info.splits = ( + src_dataset.meta.info.splits + if src_dataset.meta.info.splits + else {"train": f"0:{src_dataset.meta.total_episodes}"} ) if dst_meta.video_keys and src_dataset.meta.video_keys: for key in dst_meta.video_keys: if key in src_dataset.meta.features: - dst_meta.info["features"][key]["info"] = src_dataset.meta.info["features"][key].get( - "info", {} - ) + dst_meta.info.features[key]["info"] = src_dataset.meta.info.features[key].get("info", {}) write_info(dst_meta.info, dst_meta.root) @@ -1525,7 +1520,7 @@ def modify_tasks( write_tasks(new_task_df, root) # Update info.json - dataset.meta.info["total_tasks"] = len(unique_tasks) + dataset.meta.info.total_tasks = len(unique_tasks) write_info(dataset.meta.info, root) # Reload metadata to reflect changes @@ -1858,10 +1853,10 @@ def convert_image_to_video_dataset( episodes_df.to_parquet(episodes_path, index=False) # Update metadata info - new_meta.info["total_episodes"] = len(episode_indices) - new_meta.info["total_frames"] = sum(ep["length"] for ep in all_episode_metadata.values()) - new_meta.info["total_tasks"] = dataset.meta.total_tasks - new_meta.info["splits"] = {"train": f"0:{len(episode_indices)}"} + new_meta.info.total_episodes = len(episode_indices) + new_meta.info.total_frames = sum(ep["length"] for ep in all_episode_metadata.values()) + new_meta.info.total_tasks = dataset.meta.total_tasks + new_meta.info.splits = {"train": f"0:{len(episode_indices)}"} # Update video info for all image keys (now videos) # We need to manually set video info since update_video_info() checks video_keys first @@ -1870,7 +1865,7 @@ def convert_image_to_video_dataset( video_path = new_meta.root / new_meta.video_path.format( video_key=img_key, chunk_index=0, file_index=0 ) - new_meta.info["features"][img_key]["info"] = get_video_info(video_path) + new_meta.info.features[img_key]["info"] = get_video_info(video_path) write_info(new_meta.info, new_meta.root) diff --git a/src/lerobot/datasets/factory.py b/src/lerobot/datasets/factory.py index 73df3f04b..cbbe83dc8 100644 --- a/src/lerobot/datasets/factory.py +++ b/src/lerobot/datasets/factory.py @@ -19,6 +19,7 @@ from pprint import pformat import torch from lerobot.configs import PreTrainedConfig +from lerobot.configs.rewards import RewardModelConfig from lerobot.configs.train import TrainPipelineConfig from lerobot.transforms import ImageTransforms from lerobot.utils.constants import ACTION, IMAGENET_STATS, OBS_PREFIX, REWARD @@ -30,12 +31,14 @@ from .streaming_dataset import StreamingLeRobotDataset def resolve_delta_timestamps( - cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata + cfg: PreTrainedConfig | RewardModelConfig, ds_meta: LeRobotDatasetMetadata ) -> dict[str, list] | None: - """Resolves delta_timestamps by reading from the 'delta_indices' properties of the PreTrainedConfig. + """Resolves delta_timestamps by reading from the 'delta_indices' properties of the config. Args: - cfg (PreTrainedConfig): The PreTrainedConfig to read delta_indices from. + cfg (PreTrainedConfig | RewardModelConfig): The config to read delta_indices from. Both + ``PreTrainedConfig`` and concrete ``RewardModelConfig`` subclasses expose the + ``{observation,action,reward}_delta_indices`` properties used below. ds_meta (LeRobotDatasetMetadata): The dataset from which features and fps are used to build delta_timestamps against. @@ -82,7 +85,7 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas ds_meta = LeRobotDatasetMetadata( cfg.dataset.repo_id, root=cfg.dataset.root, revision=cfg.dataset.revision ) - delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta) + delta_timestamps = resolve_delta_timestamps(cfg.trainable_config, ds_meta) if not cfg.dataset.streaming: dataset = LeRobotDataset( cfg.dataset.repo_id, diff --git a/src/lerobot/datasets/feature_utils.py b/src/lerobot/datasets/feature_utils.py index 4faaa3d48..a1993bc8c 100644 --- a/src/lerobot/datasets/feature_utils.py +++ b/src/lerobot/datasets/feature_utils.py @@ -34,6 +34,7 @@ from .utils import ( DEFAULT_DATA_PATH, DEFAULT_VIDEO_FILE_SIZE_IN_MB, DEFAULT_VIDEO_PATH, + DatasetInfo, ) @@ -90,8 +91,8 @@ def create_empty_dataset_info( chunks_size: int | None = None, data_files_size_in_mb: int | None = None, video_files_size_in_mb: int | None = None, -) -> dict: - """Create a template dictionary for a new dataset's `info.json`. +) -> DatasetInfo: + """Create a template ``DatasetInfo`` object for a new dataset's ``meta/info.json``. Args: codebase_version (str): The version of the LeRobot codebase. @@ -99,25 +100,24 @@ def create_empty_dataset_info( features (dict): The LeRobot features dictionary for the dataset. use_videos (bool): Whether the dataset will store videos. robot_type (str | None): The type of robot used, if any. + chunks_size (int | None): Max files per chunk directory. Defaults to ``DEFAULT_CHUNK_SIZE``. + data_files_size_in_mb (int | None): Max parquet file size in MB. Defaults to ``DEFAULT_DATA_FILE_SIZE_IN_MB``. + video_files_size_in_mb (int | None): Max video file size in MB. Defaults to ``DEFAULT_VIDEO_FILE_SIZE_IN_MB``. Returns: - dict: A dictionary with the initial dataset metadata. + DatasetInfo: A typed dataset information object with initial metadata. """ - return { - "codebase_version": codebase_version, - "robot_type": robot_type, - "total_episodes": 0, - "total_frames": 0, - "total_tasks": 0, - "chunks_size": chunks_size or DEFAULT_CHUNK_SIZE, - "data_files_size_in_mb": data_files_size_in_mb or DEFAULT_DATA_FILE_SIZE_IN_MB, - "video_files_size_in_mb": video_files_size_in_mb or DEFAULT_VIDEO_FILE_SIZE_IN_MB, - "fps": fps, - "splits": {}, - "data_path": DEFAULT_DATA_PATH, - "video_path": DEFAULT_VIDEO_PATH if use_videos else None, - "features": features, - } + return DatasetInfo( + codebase_version=codebase_version, + fps=fps, + features=features, + robot_type=robot_type, + chunks_size=chunks_size or DEFAULT_CHUNK_SIZE, + data_files_size_in_mb=data_files_size_in_mb or DEFAULT_DATA_FILE_SIZE_IN_MB, + video_files_size_in_mb=video_files_size_in_mb or DEFAULT_VIDEO_FILE_SIZE_IN_MB, + data_path=DEFAULT_DATA_PATH, + video_path=DEFAULT_VIDEO_PATH if use_videos else None, + ) def check_delta_timestamps( diff --git a/src/lerobot/datasets/io_utils.py b/src/lerobot/datasets/io_utils.py index f416cf46b..f4552c8b9 100644 --- a/src/lerobot/datasets/io_utils.py +++ b/src/lerobot/datasets/io_utils.py @@ -38,6 +38,7 @@ from .utils import ( EPISODES_DIR, INFO_PATH, STATS_PATH, + DatasetInfo, serialize_dict, ) @@ -114,25 +115,21 @@ def embed_images(dataset: datasets.Dataset) -> datasets.Dataset: return dataset -def write_info(info: dict, local_dir: Path) -> None: - write_json(info, local_dir / INFO_PATH) +def write_info(info: DatasetInfo, local_dir: Path) -> None: + write_json(info.to_dict(), local_dir / INFO_PATH) -def load_info(local_dir: Path) -> dict: +def load_info(local_dir: Path) -> DatasetInfo: """Load dataset info metadata from its standard file path. - Also converts shape lists to tuples for consistency. - Args: local_dir (Path): The root directory of the dataset. Returns: - dict: The dataset information dictionary. + DatasetInfo: The typed dataset information object. """ - info = load_json(local_dir / INFO_PATH) - for ft in info["features"].values(): - ft["shape"] = tuple(ft["shape"]) - return info + raw = load_json(local_dir / INFO_PATH) + return DatasetInfo.from_dict(raw) def write_stats(stats: dict, local_dir: Path) -> None: diff --git a/src/lerobot/datasets/lerobot_dataset.py b/src/lerobot/datasets/lerobot_dataset.py index 644ce14db..b6ab0f5f0 100644 --- a/src/lerobot/datasets/lerobot_dataset.py +++ b/src/lerobot/datasets/lerobot_dataset.py @@ -630,6 +630,8 @@ class LeRobotDataset(torch.utils.data.Dataset): streaming_encoding: bool = False, encoder_queue_maxsize: int = 30, encoder_threads: int | None = None, + video_files_size_in_mb: int | None = None, + data_files_size_in_mb: int | None = None, ) -> "LeRobotDataset": """Create a new LeRobotDataset from scratch for recording data. @@ -677,6 +679,8 @@ class LeRobotDataset(torch.utils.data.Dataset): root=root, use_videos=use_videos, metadata_buffer_size=metadata_buffer_size, + video_files_size_in_mb=video_files_size_in_mb, + data_files_size_in_mb=data_files_size_in_mb, ) obj.repo_id = obj.meta.repo_id obj._requested_root = obj.meta.root diff --git a/src/lerobot/datasets/multi_dataset.py b/src/lerobot/datasets/multi_dataset.py index b4b7a941d..b955c1114 100644 --- a/src/lerobot/datasets/multi_dataset.py +++ b/src/lerobot/datasets/multi_dataset.py @@ -123,7 +123,7 @@ class MultiLeRobotDataset(torch.utils.data.Dataset): NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info. """ - return self._datasets[0].meta.info["fps"] + return self._datasets[0].meta.info.fps @property def video(self) -> bool: @@ -133,7 +133,7 @@ class MultiLeRobotDataset(torch.utils.data.Dataset): NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info. """ - return self._datasets[0].meta.info.get("video", False) + return len(self._datasets[0].meta.video_keys) > 0 @property def features(self) -> datasets.Features: diff --git a/src/lerobot/datasets/streaming_dataset.py b/src/lerobot/datasets/streaming_dataset.py index 4de2ed69c..3c1e4a73c 100644 --- a/src/lerobot/datasets/streaming_dataset.py +++ b/src/lerobot/datasets/streaming_dataset.py @@ -434,7 +434,7 @@ class StreamingLeRobotDataset(torch.utils.data.IterableDataset): def _make_padding_camera_frame(self, camera_key: str): """Variable-shape padding frame for given camera keys, given in (H, W, C)""" - return torch.zeros(self.meta.info["features"][camera_key]["shape"]).permute(-1, 0, 1) + return torch.zeros(self.meta.info.features[camera_key]["shape"]).permute(-1, 0, 1) def _get_video_frame_padding_mask( self, diff --git a/src/lerobot/datasets/utils.py b/src/lerobot/datasets/utils.py index 922a2d3ce..efccbf23e 100644 --- a/src/lerobot/datasets/utils.py +++ b/src/lerobot/datasets/utils.py @@ -14,9 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import contextlib +import dataclasses import importlib.resources import json import logging +from dataclasses import dataclass, field from pathlib import Path import datasets @@ -70,6 +72,9 @@ class ForwardCompatibilityError(CompatibilityError): super().__init__(message) +logger = logging.getLogger(__name__) + + DEFAULT_CHUNK_SIZE = 1000 # Max number of files per chunk DEFAULT_DATA_FILE_SIZE_IN_MB = 100 # Max size per file DEFAULT_VIDEO_FILE_SIZE_IN_MB = 200 # Max size per file @@ -93,6 +98,123 @@ LEGACY_EPISODES_STATS_PATH = "meta/episodes_stats.jsonl" LEGACY_TASKS_PATH = "meta/tasks.jsonl" +@dataclass +class DatasetInfo: + """Typed representation of the ``meta/info.json`` file for a LeRobot dataset. + + Replaces the previously untyped ``dict`` returned by ``load_info()`` and + created by ``create_empty_dataset_info()``. Using a dataclass provides + explicit field definitions, IDE auto-completion, and validation at + construction time. + """ + + codebase_version: str + fps: int + features: dict[str, dict] + + # Episode / frame counters — start at zero for new datasets + total_episodes: int = 0 + total_frames: int = 0 + total_tasks: int = 0 + + # Storage settings + chunks_size: int = field(default=DEFAULT_CHUNK_SIZE) + data_files_size_in_mb: int = field(default=DEFAULT_DATA_FILE_SIZE_IN_MB) + video_files_size_in_mb: int = field(default=DEFAULT_VIDEO_FILE_SIZE_IN_MB) + + # File path templates + data_path: str = field(default=DEFAULT_DATA_PATH) + video_path: str | None = field(default=DEFAULT_VIDEO_PATH) + + # Optional metadata + robot_type: str | None = None + splits: dict[str, str] = field(default_factory=dict) + + def __post_init__(self) -> None: + # Coerce feature shapes from list to tuple — JSON deserialisation + # returns lists, but the rest of the codebase expects tuples. + for ft in self.features.values(): + if isinstance(ft.get("shape"), list): + ft["shape"] = tuple(ft["shape"]) + + if self.fps <= 0: + raise ValueError(f"fps must be positive, got {self.fps}") + if self.chunks_size <= 0: + raise ValueError(f"chunks_size must be positive, got {self.chunks_size}") + if self.data_files_size_in_mb <= 0: + raise ValueError(f"data_files_size_in_mb must be positive, got {self.data_files_size_in_mb}") + if self.video_files_size_in_mb <= 0: + raise ValueError(f"video_files_size_in_mb must be positive, got {self.video_files_size_in_mb}") + + def to_dict(self) -> dict: + """Return a JSON-serialisable dict. + + Converts tuple shapes back to lists so ``json.dump`` can handle them. + """ + d = dataclasses.asdict(self) + for ft in d["features"].values(): + if isinstance(ft.get("shape"), tuple): + ft["shape"] = list(ft["shape"]) + return d + + @classmethod + def from_dict(cls, data: dict) -> "DatasetInfo": + """Construct from a raw dict (e.g. loaded directly from JSON). + + Unknown keys are ignored for forward compatibility with datasets that + carry additional fields (e.g. ``total_videos`` from v2.x). A warning is + logged when such fields are present. + """ + known = {f.name for f in dataclasses.fields(cls)} + unknown = sorted(k for k in data if k not in known) + if unknown: + logger.warning(f"Unknown fields in DatasetInfo: {unknown}. These will be ignored.") + return cls(**{k: v for k, v in data.items() if k in known}) + + # --------------------------------------------------------------------------- + # Temporary dict-style compatibility layer + # Allows existing ``info["key"]`` call-sites to keep working without changes. + # Once all callers have been migrated to attribute access, remove these. + # --------------------------------------------------------------------------- + def __getitem__(self, key: str): + import warnings + + warnings.warn( + f"Accessing DatasetInfo with dict-style syntax info['{key}'] is deprecated. " + f"Use attribute access info.{key} instead.", + DeprecationWarning, + stacklevel=2, + ) + try: + return getattr(self, key) + except AttributeError as err: + raise KeyError(key) from err + + def __setitem__(self, key: str, value) -> None: + import warnings + + warnings.warn( + f"Setting DatasetInfo with dict-style syntax info['{key}'] = ... is deprecated. " + f"Use attribute assignment info.{key} = ... instead.", + DeprecationWarning, + stacklevel=2, + ) + if not hasattr(self, key): + raise KeyError(f"DatasetInfo has no field '{key}'") + setattr(self, key, value) + + def __contains__(self, key: str) -> bool: + """Check if a field exists (dict-like interface).""" + return hasattr(self, key) + + def get(self, key: str, default=None): + """Get attribute value with default fallback (dict-like interface).""" + try: + return getattr(self, key) + except AttributeError: + return default + + def has_legacy_hub_download_metadata(root: Path) -> bool: """Return ``True`` when *root* looks like a legacy Hub ``local_dir`` mirror. @@ -293,7 +415,7 @@ def create_branch(repo_id: str, *, branch: str, repo_type: str | None = None) -> def create_lerobot_dataset_card( tags: list | None = None, - dataset_info: dict | None = None, + dataset_info: DatasetInfo | None = None, **kwargs, ) -> DatasetCard: """Create a `DatasetCard` for a LeRobot dataset. @@ -304,7 +426,7 @@ def create_lerobot_dataset_card( Args: tags (list | None): A list of tags to add to the dataset card. - dataset_info (dict | None): The dataset's info dictionary, which will + dataset_info (DatasetInfo | None): The dataset's info object, which will be displayed on the card. **kwargs: Additional keyword arguments to populate the card template. @@ -317,7 +439,7 @@ def create_lerobot_dataset_card( card_tags += tags if dataset_info: dataset_structure = "[meta/info.json](meta/info.json):\n" - dataset_structure += f"```json\n{json.dumps(dataset_info, indent=4)}\n```\n" + dataset_structure += f"```json\n{json.dumps(dataset_info.to_dict(), indent=4)}\n```\n" kwargs = {**kwargs, "dataset_structure": dataset_structure} card_data = DatasetCardData( license=kwargs.get("license"), diff --git a/src/lerobot/policies/__init__.py b/src/lerobot/policies/__init__.py index e138a84d9..e811eef28 100644 --- a/src/lerobot/policies/__init__.py +++ b/src/lerobot/policies/__init__.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from lerobot.utils.action_interpolator import ActionInterpolator as ActionInterpolator + from .act.configuration_act import ACTConfig as ACTConfig from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig from .factory import get_policy_class, make_policy, make_policy_config, make_pre_post_processors @@ -21,10 +23,7 @@ from .pi0.configuration_pi0 import PI0Config as PI0Config from .pi0_fast.configuration_pi0_fast import PI0FastConfig as PI0FastConfig from .pi05.configuration_pi05 import PI05Config as PI05Config from .pretrained import PreTrainedPolicy as PreTrainedPolicy -from .rtc import ActionInterpolator as ActionInterpolator from .sac.configuration_sac import SACConfig as SACConfig -from .sac.reward_model.configuration_classifier import RewardClassifierConfig as RewardClassifierConfig -from .sarm.configuration_sarm import SARMConfig as SARMConfig from .smolvla.configuration_smolvla import SmolVLAConfig as SmolVLAConfig from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig from .utils import make_robot_action, prepare_observation_for_inference @@ -45,9 +44,7 @@ __all__ = [ "PI0Config", "PI0FastConfig", "PI05Config", - "RewardClassifierConfig", "SACConfig", - "SARMConfig", "SmolVLAConfig", "TDMPCConfig", "VQBeTConfig", diff --git a/src/lerobot/policies/act/modeling_act.py b/src/lerobot/policies/act/modeling_act.py index 0120258ee..5651fbfb1 100644 --- a/src/lerobot/policies/act/modeling_act.py +++ b/src/lerobot/policies/act/modeling_act.py @@ -142,9 +142,10 @@ class ACTPolicy(PreTrainedPolicy): actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch) - l1_loss = ( - F.l1_loss(batch[ACTION], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1) - ).mean() + abs_err = F.l1_loss(batch[ACTION], actions_hat, reduction="none") + valid_mask = ~batch["action_is_pad"].unsqueeze(-1) + num_valid = valid_mask.sum() * abs_err.shape[-1] + l1_loss = (abs_err * valid_mask).sum() / num_valid.clamp_min(1) loss_dict = {"l1_loss": l1_loss.item()} if self.config.use_vae: diff --git a/src/lerobot/policies/diffusion/modeling_diffusion.py b/src/lerobot/policies/diffusion/modeling_diffusion.py index 03203ffc8..9fbe1f703 100644 --- a/src/lerobot/policies/diffusion/modeling_diffusion.py +++ b/src/lerobot/policies/diffusion/modeling_diffusion.py @@ -380,7 +380,9 @@ class DiffusionModel(nn.Module): f"{self.config.do_mask_loss_for_padding=}." ) in_episode_bound = ~batch["action_is_pad"] - loss = loss * in_episode_bound.unsqueeze(-1) + mask = in_episode_bound.unsqueeze(-1) + num_valid = mask.sum() * loss.shape[-1] + return (loss * mask).sum() / num_valid.clamp_min(1) return loss.mean() diff --git a/src/lerobot/policies/factory.py b/src/lerobot/policies/factory.py index 611a6e9bc..754e0c857 100644 --- a/src/lerobot/policies/factory.py +++ b/src/lerobot/policies/factory.py @@ -52,8 +52,6 @@ from .pi0.configuration_pi0 import PI0Config from .pi05.configuration_pi05 import PI05Config from .pretrained import PreTrainedPolicy from .sac.configuration_sac import SACConfig -from .sac.reward_model.configuration_classifier import RewardClassifierConfig -from .sarm.configuration_sarm import SARMConfig from .smolvla.configuration_smolvla import SmolVLAConfig from .tdmpc.configuration_tdmpc import TDMPCConfig from .utils import validate_visual_features_consistency @@ -89,7 +87,7 @@ def get_policy_class(name: str) -> type[PreTrainedPolicy]: Args: name: The name of the policy. Supported names are "tdmpc", "diffusion", "act", - "multi_task_dit", "vqbet", "pi0", "pi05", "sac", "reward_classifier", "smolvla", "wall_x". + "multi_task_dit", "vqbet", "pi0", "pi05", "sac", "smolvla", "wall_x". Returns: The policy class corresponding to the given name. @@ -132,18 +130,10 @@ def get_policy_class(name: str) -> type[PreTrainedPolicy]: from .sac.modeling_sac import SACPolicy return SACPolicy - elif name == "reward_classifier": - from .sac.reward_model.modeling_classifier import Classifier - - return Classifier elif name == "smolvla": from .smolvla.modeling_smolvla import SmolVLAPolicy return SmolVLAPolicy - elif name == "sarm": - from .sarm.modeling_sarm import SARMRewardModel - - return SARMRewardModel elif name == "groot": from .groot.modeling_groot import GrootPolicy @@ -173,7 +163,7 @@ def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig: Args: policy_type: The type of the policy. Supported types include "tdmpc", "multi_task_dit", "diffusion", "act", "vqbet", "pi0", "pi05", "sac", - "smolvla", "reward_classifier", "wall_x". + "smolvla", "wall_x". **kwargs: Keyword arguments to be passed to the configuration class constructor. Returns: @@ -200,8 +190,6 @@ def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig: return SACConfig(**kwargs) elif policy_type == "smolvla": return SmolVLAConfig(**kwargs) - elif policy_type == "reward_classifier": - return RewardClassifierConfig(**kwargs) elif policy_type == "groot": return GrootConfig(**kwargs) elif policy_type == "xvla": @@ -378,14 +366,6 @@ def make_pre_post_processors( dataset_stats=kwargs.get("dataset_stats"), ) - elif isinstance(policy_cfg, RewardClassifierConfig): - from .sac.reward_model.processor_classifier import make_classifier_processor - - processors = make_classifier_processor( - config=policy_cfg, - dataset_stats=kwargs.get("dataset_stats"), - ) - elif isinstance(policy_cfg, SmolVLAConfig): from .smolvla.processor_smolvla import make_smolvla_pre_post_processors @@ -394,14 +374,6 @@ def make_pre_post_processors( dataset_stats=kwargs.get("dataset_stats"), ) - elif isinstance(policy_cfg, SARMConfig): - from .sarm.processor_sarm import make_sarm_pre_post_processors - - processors = make_sarm_pre_post_processors( - config=policy_cfg, - dataset_stats=kwargs.get("dataset_stats"), - dataset_meta=kwargs.get("dataset_meta"), - ) elif isinstance(policy_cfg, GrootConfig): from .groot.processor_groot import make_groot_pre_post_processors @@ -542,7 +514,7 @@ def make_policy( logging.info("Loading policy's PEFT adapter.") - peft_pretrained_path = cfg.pretrained_path + peft_pretrained_path = str(cfg.pretrained_path) peft_config = PeftConfig.from_pretrained(peft_pretrained_path) kwargs["pretrained_name_or_path"] = peft_config.base_model_name_or_path @@ -555,7 +527,9 @@ def make_policy( ) policy = policy_cls.from_pretrained(**kwargs) - policy = PeftModel.from_pretrained(policy, peft_pretrained_path, config=peft_config) + policy = PeftModel.from_pretrained( + policy, peft_pretrained_path, config=peft_config, is_trainable=True + ) else: # Make a fresh policy. diff --git a/src/lerobot/policies/groot/action_head/flow_matching_action_head.py b/src/lerobot/policies/groot/action_head/flow_matching_action_head.py index 2c1ca6014..986820670 100644 --- a/src/lerobot/policies/groot/action_head/flow_matching_action_head.py +++ b/src/lerobot/policies/groot/action_head/flow_matching_action_head.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from dataclasses import dataclass, field +from dataclasses import field from typing import TYPE_CHECKING import torch @@ -109,7 +109,6 @@ class MultiEmbodimentActionEncoder(nn.Module): return x -@dataclass class FlowmatchingActionHeadConfig(PretrainedConfig): """NOTE: N1.5 uses XEmbFlowmatchingPolicyHeadConfig as action head""" diff --git a/src/lerobot/policies/groot/groot_n1.py b/src/lerobot/policies/groot/groot_n1.py index abcbb8a8c..381c5fbd6 100644 --- a/src/lerobot/policies/groot/groot_n1.py +++ b/src/lerobot/policies/groot/groot_n1.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from dataclasses import dataclass, field from pathlib import Path from typing import TYPE_CHECKING @@ -174,17 +173,14 @@ N_COLOR_CHANNELS = 3 # config -@dataclass class GR00TN15Config(PretrainedConfig): model_type = "gr00t_n1_5" - backbone_cfg: dict = field(init=False, metadata={"help": "Backbone configuration."}) - action_head_cfg: dict = field(init=False, metadata={"help": "Action head configuration."}) - - action_horizon: int = field(init=False, metadata={"help": "Action horizon."}) - - action_dim: int = field(init=False, metadata={"help": "Action dimension."}) - compute_dtype: str = field(default="float32", metadata={"help": "Compute dtype."}) + backbone_cfg: dict + action_head_cfg: dict + action_horizon: int + action_dim: int + compute_dtype: str = "float32" def __init__(self, **kwargs): super().__init__(**kwargs) diff --git a/src/lerobot/policies/multi_task_dit/modeling_multi_task_dit.py b/src/lerobot/policies/multi_task_dit/modeling_multi_task_dit.py index 366b271c0..ceb4e211c 100644 --- a/src/lerobot/policies/multi_task_dit/modeling_multi_task_dit.py +++ b/src/lerobot/policies/multi_task_dit/modeling_multi_task_dit.py @@ -688,8 +688,9 @@ class DiffusionObjective(nn.Module): loss = F.mse_loss(predicted, target, reduction="none") if self.do_mask_loss_for_padding and "action_is_pad" in batch: - valid_actions = ~batch["action_is_pad"] - loss = loss * valid_actions.unsqueeze(-1) + mask = ~batch["action_is_pad"].unsqueeze(-1) + num_valid = mask.sum() * loss.shape[-1] + return (loss * mask).sum() / num_valid.clamp_min(1) return loss.mean() @@ -752,8 +753,9 @@ class FlowMatchingObjective(nn.Module): loss = F.mse_loss(predicted_velocity, target_velocity, reduction="none") if self.do_mask_loss_for_padding and "action_is_pad" in batch: - valid_mask = ~batch["action_is_pad"] - loss = loss * valid_mask.unsqueeze(-1) + mask = ~batch["action_is_pad"].unsqueeze(-1) + num_valid = mask.sum() * loss.shape[-1] + return (loss * mask).sum() / num_valid.clamp_min(1) return loss.mean() diff --git a/src/lerobot/policies/pi0/modeling_pi0.py b/src/lerobot/policies/pi0/modeling_pi0.py index 3534c7ae8..510af0796 100644 --- a/src/lerobot/policies/pi0/modeling_pi0.py +++ b/src/lerobot/policies/pi0/modeling_pi0.py @@ -444,13 +444,13 @@ class PaliGemmaWithExpertModel( if image.dtype != torch.float32: image = image.to(torch.float32) image_outputs = self.paligemma.model.get_image_features(image) - features = image_outputs.pooler_output * self.paligemma.config.text_config.hidden_size**0.5 + features = image_outputs.pooler_output if features.dtype != out_dtype: features = features.to(out_dtype) return features def embed_language_tokens(self, tokens: torch.Tensor): - return self.paligemma.model.language_model.embed_tokens(tokens) + return self.paligemma.model.language_model.get_input_embeddings()(tokens) def forward( self, @@ -666,8 +666,7 @@ class PI0Pytorch(nn.Module): # see openpi `PI0Pytorch` # Process language tokens def lang_embed_func(lang_tokens): lang_emb = self.paligemma_with_expert.embed_language_tokens(lang_tokens) - lang_emb_dim = lang_emb.shape[-1] - return lang_emb * math.sqrt(lang_emb_dim) + return lang_emb lang_emb = self._apply_checkpoint(lang_embed_func, lang_tokens) embs.append(lang_emb) @@ -748,16 +747,8 @@ class PI0Pytorch(nn.Module): # see openpi `PI0Pytorch` return embs, pad_masks, att_masks, adarms_cond - def forward( - self, images, img_masks, lang_tokens, lang_masks, state, actions, noise=None, time=None - ) -> Tensor: + def forward(self, images, img_masks, lang_tokens, lang_masks, state, actions, noise, time) -> Tensor: """Do a full training forward pass and compute the loss.""" - if noise is None: - noise = self.sample_noise(actions.shape, actions.device) - - if time is None: - time = self.sample_time(actions.shape[0], actions.device) - time_expanded = time[:, None, None] x_t = time_expanded * noise + (1 - time_expanded) * actions u_t = noise - actions @@ -1292,8 +1283,11 @@ class PI0Policy(PreTrainedPolicy): state = self.prepare_state(batch) actions = self.prepare_action(batch) + noise = self.model.sample_noise(actions.shape, actions.device) + time = self.model.sample_time(actions.shape[0], actions.device) + # Compute loss - losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions) + losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time) # Truncate losses to actual action dimensions original_action_dim = self.config.output_features[ACTION].shape[0] diff --git a/src/lerobot/policies/pi05/modeling_pi05.py b/src/lerobot/policies/pi05/modeling_pi05.py index 56786fbcd..bb206d608 100644 --- a/src/lerobot/policies/pi05/modeling_pi05.py +++ b/src/lerobot/policies/pi05/modeling_pi05.py @@ -728,14 +728,8 @@ class PI05Pytorch(nn.Module): # see openpi `PI0Pytorch` return embs, pad_masks, att_masks, adarms_cond - def forward(self, images, img_masks, tokens, masks, actions, noise=None, time=None) -> Tensor: + def forward(self, images, img_masks, tokens, masks, actions, noise, time) -> Tensor: """Do a full training forward pass and compute the loss.""" - if noise is None: - noise = self.sample_noise(actions.shape, actions.device) - - if time is None: - time = self.sample_time(actions.shape[0], actions.device) - time_expanded = time[:, None, None] x_t = time_expanded * noise + (1 - time_expanded) * actions u_t = noise - actions @@ -1262,8 +1256,11 @@ class PI05Policy(PreTrainedPolicy): actions = self.prepare_action(batch) + noise = self.model.sample_noise(actions.shape, actions.device) + time = self.model.sample_time(actions.shape[0], actions.device) + # Compute loss (no separate state needed for PI05) - losses = self.model.forward(images, img_masks, tokens, masks, actions) + losses = self.model.forward(images, img_masks, tokens, masks, actions, noise, time) # Truncate losses to actual action dimensions original_action_dim = self.config.output_features[ACTION].shape[0] diff --git a/src/lerobot/policies/pi0_fast/modeling_pi0_fast.py b/src/lerobot/policies/pi0_fast/modeling_pi0_fast.py index a49828ad1..d9342eb24 100644 --- a/src/lerobot/policies/pi0_fast/modeling_pi0_fast.py +++ b/src/lerobot/policies/pi0_fast/modeling_pi0_fast.py @@ -16,7 +16,6 @@ import builtins import logging -import math from collections import deque from pathlib import Path from typing import TYPE_CHECKING, Literal, TypedDict, Unpack @@ -227,6 +226,7 @@ class PI0FastPaliGemma(nn.Module): # forward(..., adarms_cond=...) is supported (same as pi0/pi05). if use_adarms[0]: text_config = self.paligemma.config.text_config + del self.paligemma.model.language_model self.paligemma.model.language_model = PiGemmaModel(text_config) self.to_bfloat16_for_selected_params(precision) @@ -260,13 +260,15 @@ class PI0FastPaliGemma(nn.Module): if image.dtype != torch.float32: image = image.to(torch.float32) image_outputs = self.paligemma.model.get_image_features(image) - features = image_outputs.pooler_output * self.paligemma.config.text_config.hidden_size**0.5 + features = image_outputs.pooler_output + norm = 2048**0.5 + features = features / norm * norm if features.dtype != out_dtype: features = features.to(out_dtype) return features def embed_language_tokens(self, tokens: torch.Tensor): - return self.paligemma.model.language_model.embed_tokens(tokens) + return self.paligemma.model.language_model.get_input_embeddings()(tokens) def forward( self, @@ -416,8 +418,7 @@ class PI0FastPytorch(nn.Module): # see openpi `PI0Pytorch` # Process language instruction tokens def lang_embed_func(tokens): lang_emb = self.paligemma_with_expert.embed_language_tokens(tokens) - lang_emb_dim = lang_emb.shape[-1] - return lang_emb * math.sqrt(lang_emb_dim) + return lang_emb lang_emb = self._apply_checkpoint(lang_embed_func, tokens) embs.append(lang_emb) @@ -431,8 +432,7 @@ class PI0FastPytorch(nn.Module): # see openpi `PI0Pytorch` def fast_action_embed_func(fast_action_tokens): fast_emb = self.paligemma_with_expert.embed_language_tokens(fast_action_tokens) - fast_emb_dim = fast_emb.shape[-1] - return fast_emb * math.sqrt(fast_emb_dim) + return fast_emb fast_action_emb = self._apply_checkpoint(fast_action_embed_func, fast_action_tokens) embs.append(fast_action_emb) @@ -665,7 +665,6 @@ class PI0FastPytorch(nn.Module): # see openpi `PI0Pytorch` if t < max_decoding_steps - 1: # embed the newly generated token next_token_emb = self.paligemma_with_expert.embed_language_tokens(next_token) - next_token_emb = next_token_emb * math.sqrt(next_token_emb.shape[-1]) if prefix_embs.dtype == torch.bfloat16: next_token_emb = next_token_emb.to(dtype=torch.bfloat16) @@ -770,7 +769,6 @@ class PI0FastPytorch(nn.Module): # see openpi `PI0Pytorch` # Embed the single previous token # We use embed_language_tokens directly to avoid overhead of full prefix embedding next_token_emb = self.paligemma_with_expert.embed_language_tokens(next_token) - next_token_emb = next_token_emb * math.sqrt(next_token_emb.shape[-1]) if prefix_embs.dtype == torch.bfloat16: next_token_emb = next_token_emb.to(dtype=torch.bfloat16) diff --git a/src/lerobot/policies/pi_gemma.py b/src/lerobot/policies/pi_gemma.py index 05f031d08..9986f9b79 100644 --- a/src/lerobot/policies/pi_gemma.py +++ b/src/lerobot/policies/pi_gemma.py @@ -197,6 +197,9 @@ class PiGemmaModel(GemmaModel): # type: ignore[misc] def __init__(self, config: GemmaConfig, **kwargs): super().__init__(config, **kwargs) + # Free parent-allocated layers/norm before replacing to avoid ~2x peak memory. + del self.layers + del self.norm # if not getattr(config, "use_adarms", False): # return cond_dim = getattr(config, "adarms_cond_dim", None) @@ -328,6 +331,7 @@ class PiGemmaForCausalLM(GemmaForCausalLM): # type: ignore[misc] def __init__(self, config: GemmaConfig, **kwargs): super().__init__(config, **kwargs) + del self.model self.model = PiGemmaModel(config) @@ -336,6 +340,7 @@ class PaliGemmaModelWithPiGemma(PaliGemmaModel): def __init__(self, config): super().__init__(config) + del self.language_model self.language_model = PiGemmaModel(config.text_config) @@ -344,6 +349,7 @@ class PaliGemmaForConditionalGenerationWithPiGemma(PaliGemmaForConditionalGenera def __init__(self, config): super().__init__(config) + del self.model self.model = PaliGemmaModelWithPiGemma(config) # Make modules available through conditional class for BC diff --git a/src/lerobot/policies/rtc/__init__.py b/src/lerobot/policies/rtc/__init__.py index 7a29dcac0..16417b3cd 100644 --- a/src/lerobot/policies/rtc/__init__.py +++ b/src/lerobot/policies/rtc/__init__.py @@ -19,6 +19,7 @@ from .action_queue import ActionQueue from .configuration_rtc import RTCConfig from .latency_tracker import LatencyTracker from .modeling_rtc import RTCProcessor +from .relative import reanchor_relative_rtc_prefix __all__ = [ "ActionInterpolator", @@ -26,4 +27,5 @@ __all__ = [ "LatencyTracker", "RTCConfig", "RTCProcessor", + "reanchor_relative_rtc_prefix", ] diff --git a/src/lerobot/policies/rtc/action_interpolator.py b/src/lerobot/policies/rtc/action_interpolator.py index 222dc33b5..c30481d3b 100644 --- a/src/lerobot/policies/rtc/action_interpolator.py +++ b/src/lerobot/policies/rtc/action_interpolator.py @@ -1,116 +1,4 @@ -# Copyright 2025 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Moved to lerobot.utils.action_interpolator — re-exported for backwards compatibility. +from lerobot.utils.action_interpolator import ActionInterpolator -"""Action interpolation for smoother robot control. - -Provides configurable Nx control rate by interpolating between consecutive actions. -Useful with RTC and action-chunking policies to reduce jerkiness. -""" - -from torch import Tensor - - -class ActionInterpolator: - """Interpolates between consecutive actions for smoother control. - - When enabled with multiplier N, produces N actions per policy action - by linearly interpolating between the previous and current action. - - Example with multiplier=3: - prev_action -> [1/3 interpolated, 2/3 interpolated, current_action] - - This effectively multiplies the control rate for smoother motion. - - Usage: - interpolator = ActionInterpolator(multiplier=2) # 2x control rate - - # In control loop: - if interpolator.needs_new_action(): - new_action = queue.get() - if new_action: - interpolator.add(new_action.cpu()) - - action = interpolator.get() - if action: - robot.send_action(action) - """ - - def __init__(self, multiplier: int = 1): - """Initialize the interpolator. - - Args: - multiplier: Control rate multiplier (1 = no interpolation, 2 = 2x, 3 = 3x, etc.) - """ - if multiplier < 1: - raise ValueError(f"multiplier must be >= 1, got {multiplier}") - self.multiplier = multiplier - self._prev: Tensor | None = None - self._buffer: list[Tensor] = [] - self._idx = 0 - - @property - def enabled(self) -> bool: - """Whether interpolation is active (multiplier > 1).""" - return self.multiplier > 1 - - def reset(self): - """Reset interpolation state (call between episodes).""" - self._prev = None - self._buffer = [] - self._idx = 0 - - def needs_new_action(self) -> bool: - """Check if a new action is needed from the queue.""" - return self._idx >= len(self._buffer) - - def add(self, action: Tensor) -> None: - """Add a new action and compute interpolated sequence. - - Args: - action: New action tensor from policy/queue (already on CPU). - """ - if self.multiplier > 1 and self._prev is not None: - self._buffer = [] - for i in range(1, self.multiplier + 1): - t = i / self.multiplier - interp = self._prev + t * (action - self._prev) - self._buffer.append(interp) - else: - # First step: no previous action yet, so run at base FPS without interpolation. - self._buffer = [action.clone()] - self._prev = action.clone() - self._idx = 0 - - def get(self) -> Tensor | None: - """Get the next interpolated action. - - Returns: - Next action tensor, or None if buffer is exhausted. - """ - if self._idx >= len(self._buffer): - return None - action = self._buffer[self._idx] - self._idx += 1 - return action - - def get_control_interval(self, fps: float) -> float: - """Get the control interval based on interpolation multiplier. - - Args: - fps: Base frames per second. - - Returns: - Control interval in seconds (divided by multiplier). - """ - return 1.0 / (fps * self.multiplier) +__all__ = ["ActionInterpolator"] diff --git a/src/lerobot/policies/rtc/action_queue.py b/src/lerobot/policies/rtc/action_queue.py index dbbdc41df..199257b12 100644 --- a/src/lerobot/policies/rtc/action_queue.py +++ b/src/lerobot/policies/rtc/action_queue.py @@ -92,10 +92,10 @@ class ActionQueue: Returns: int: Number of unconsumed actions. """ - if self.queue is None: - return 0 - length = len(self.queue) - return length - self.last_index + with self.lock: + if self.queue is None: + return 0 + return len(self.queue) - self.last_index def empty(self) -> bool: """Check if the queue is empty. @@ -103,11 +103,10 @@ class ActionQueue: Returns: bool: True if no actions remain, False otherwise. """ - if self.queue is None: - return True - - length = len(self.queue) - return length - self.last_index <= 0 + with self.lock: + if self.queue is None: + return True + return len(self.queue) - self.last_index <= 0 def get_action_index(self) -> int: """Get the current action consumption index. @@ -115,7 +114,8 @@ class ActionQueue: Returns: int: Index of the next action to be consumed. """ - return self.last_index + with self.lock: + return self.last_index def get_left_over(self) -> Tensor | None: """Get leftover original actions for RTC prev_chunk_left_over. diff --git a/src/lerobot/policies/rtc/configuration_rtc.py b/src/lerobot/policies/rtc/configuration_rtc.py index c70fe3de0..3d71edf26 100644 --- a/src/lerobot/policies/rtc/configuration_rtc.py +++ b/src/lerobot/policies/rtc/configuration_rtc.py @@ -35,7 +35,7 @@ class RTCConfig: """ # Infrastructure - enabled: bool = False + enabled: bool = True # Core RTC settings # Todo change to exp diff --git a/src/lerobot/policies/rtc/relative.py b/src/lerobot/policies/rtc/relative.py new file mode 100644 index 000000000..61063f3e2 --- /dev/null +++ b/src/lerobot/policies/rtc/relative.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Relative-action helpers for Real-Time Chunking (RTC).""" + +from __future__ import annotations + +import torch + +from lerobot.processor import ( + NormalizerProcessorStep, + RelativeActionsProcessorStep, + TransitionKey, + create_transition, + to_relative_actions, +) + + +def reanchor_relative_rtc_prefix( + prev_actions_absolute: torch.Tensor, + current_state: torch.Tensor, + relative_step: RelativeActionsProcessorStep, + normalizer_step: NormalizerProcessorStep | None, + policy_device: torch.device | str, +) -> torch.Tensor: + """Convert absolute leftover actions into model-space for relative-action RTC policies. + + When using relative actions, the RTC prefix (previous chunk's unexecuted tail) + is stored in absolute coordinates. Before feeding it back to the policy, this + helper re-expresses those actions relative to the robot's current joint state + and optionally normalizes them so the policy receives correctly scaled inputs. + """ + state = current_state.detach().cpu() + if state.dim() == 1: + state = state.unsqueeze(0) + + action_cpu = prev_actions_absolute.detach().cpu() + mask = relative_step._build_mask(action_cpu.shape[-1]) + relative_actions = to_relative_actions(action_cpu, state, mask) + + transition = create_transition(action=relative_actions) + if normalizer_step is not None: + transition = normalizer_step(transition) + + return transition[TransitionKey.ACTION].to(policy_device) diff --git a/src/lerobot/policies/sarm/README.md b/src/lerobot/policies/sarm/README.md deleted file mode 120000 index 18495860d..000000000 --- a/src/lerobot/policies/sarm/README.md +++ /dev/null @@ -1 +0,0 @@ -../../../../docs/source/policy_sarm_README.md \ No newline at end of file diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py index 8ddb023da..8fd60c1fc 100644 --- a/src/lerobot/policies/smolvla/modeling_smolvla.py +++ b/src/lerobot/policies/smolvla/modeling_smolvla.py @@ -394,13 +394,21 @@ class SmolVLAPolicy(PreTrainedPolicy): loss_dict["losses_after_rm_padding"] = losses.clone().mean().item() if reduction == "none": - # Return per-sample losses (B,) by averaging over time and action dims - per_sample_loss = losses.mean(dim=(1, 2)) + # Return per-sample losses (B,) by averaging over valid (time, action) entries + if actions_is_pad is None: + per_sample_loss = losses.mean(dim=(1, 2)) + else: + num_valid = ((~actions_is_pad).sum(dim=1) * losses.shape[-1]).clamp_min(1) + per_sample_loss = losses.sum(dim=(1, 2)) / num_valid loss_dict["loss"] = per_sample_loss.mean().item() return per_sample_loss, loss_dict else: - # Default: return scalar mean loss - loss = losses.mean() + # Default: return scalar mean loss over valid (time, action) entries + if actions_is_pad is None: + loss = losses.mean() + else: + num_valid = ((~actions_is_pad).sum() * losses.shape[-1]).clamp_min(1) + loss = losses.sum() / num_valid loss_dict["loss"] = loss.item() return loss, loss_dict diff --git a/src/lerobot/policies/wall_x/qwen_model/qwen2_5_vl_moe.py b/src/lerobot/policies/wall_x/qwen_model/qwen2_5_vl_moe.py index ecf3eb371..a80096514 100644 --- a/src/lerobot/policies/wall_x/qwen_model/qwen2_5_vl_moe.py +++ b/src/lerobot/policies/wall_x/qwen_model/qwen2_5_vl_moe.py @@ -22,7 +22,7 @@ from transformers.utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, - is_flash_attn_greater_or_equal_2_10, + is_flash_attn_greater_or_equal, is_torchdynamo_compiling, logging, replace_return_docstrings, @@ -890,7 +890,7 @@ class Qwen2_5_VLFlashAttention2(Qwen2_5_VLAttention): # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). - self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal("2.1.0") def forward( self, diff --git a/src/lerobot/policies/xvla/modeling_florence2.py b/src/lerobot/policies/xvla/modeling_florence2.py index e33efe5c3..81f9c8234 100644 --- a/src/lerobot/policies/xvla/modeling_florence2.py +++ b/src/lerobot/policies/xvla/modeling_florence2.py @@ -45,7 +45,7 @@ from transformers.utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, - is_flash_attn_greater_or_equal_2_10, + is_flash_attn_greater_or_equal, logging, replace_return_docstrings, ) @@ -909,7 +909,7 @@ class Florence2FlashAttention2(Florence2Attention): # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). - self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal("2.1.0") def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) diff --git a/src/lerobot/processor/hil_processor.py b/src/lerobot/processor/hil_processor.py index c6f98c689..49dbb8106 100644 --- a/src/lerobot/processor/hil_processor.py +++ b/src/lerobot/processor/hil_processor.py @@ -557,7 +557,7 @@ class RewardClassifierProcessorStep(ProcessorStep): def __post_init__(self): """Initializes the reward classifier model after the dataclass is created.""" if self.pretrained_path is not None: - from lerobot.policies.sac.reward_model.modeling_classifier import Classifier + from lerobot.rewards.classifier.modeling_classifier import Classifier self.reward_classifier = Classifier.from_pretrained(self.pretrained_path) self.reward_classifier.to(self.device) diff --git a/src/lerobot/processor/relative_action_processor.py b/src/lerobot/processor/relative_action_processor.py index d9f97f2c6..e1e65acb1 100644 --- a/src/lerobot/processor/relative_action_processor.py +++ b/src/lerobot/processor/relative_action_processor.py @@ -142,6 +142,10 @@ class RelativeActionsProcessorStep(ProcessorStep): new_transition[TransitionKey.ACTION] = to_relative_actions(action, state, mask) return new_transition + def get_cached_state(self) -> torch.Tensor | None: + """Return the cached ``observation.state`` used as the reference point for relative/absolute action conversions.""" + return self._last_state + def get_config(self) -> dict[str, Any]: return { "enabled": self.enabled, @@ -182,7 +186,8 @@ class AbsoluteActionsProcessorStep(ProcessorStep): "but relative_step is None. Ensure relative_step is set when constructing the postprocessor." ) - if self.relative_step._last_state is None: + cached_state = self.relative_step.get_cached_state() + if cached_state is None: raise RuntimeError( "AbsoluteActionsProcessorStep requires state from RelativeActionsProcessorStep " "but no state has been cached. Ensure the preprocessor runs before the postprocessor." @@ -194,9 +199,7 @@ class AbsoluteActionsProcessorStep(ProcessorStep): return new_transition mask = self.relative_step._build_mask(action.shape[-1]) - new_transition[TransitionKey.ACTION] = to_absolute_actions( - action, self.relative_step._last_state, mask - ) + new_transition[TransitionKey.ACTION] = to_absolute_actions(action, cached_state, mask) return new_transition def get_config(self) -> dict[str, Any]: diff --git a/src/lerobot/rewards/__init__.py b/src/lerobot/rewards/__init__.py new file mode 100644 index 000000000..203fe2ee1 --- /dev/null +++ b/src/lerobot/rewards/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .classifier.configuration_classifier import RewardClassifierConfig as RewardClassifierConfig +from .factory import ( + get_reward_model_class as get_reward_model_class, + make_reward_model as make_reward_model, + make_reward_model_config as make_reward_model_config, + make_reward_pre_post_processors as make_reward_pre_post_processors, +) +from .pretrained import PreTrainedRewardModel as PreTrainedRewardModel +from .sarm.configuration_sarm import SARMConfig as SARMConfig + +__all__ = [ + # Configuration classes + "RewardClassifierConfig", + "SARMConfig", + # Base class + "PreTrainedRewardModel", + # Factory functions + "get_reward_model_class", + "make_reward_model", + "make_reward_model_config", + "make_reward_pre_post_processors", +] diff --git a/src/lerobot/policies/sac/reward_model/__init__.py b/src/lerobot/rewards/classifier/__init__.py similarity index 100% rename from src/lerobot/policies/sac/reward_model/__init__.py rename to src/lerobot/rewards/classifier/__init__.py diff --git a/src/lerobot/policies/sac/reward_model/configuration_classifier.py b/src/lerobot/rewards/classifier/configuration_classifier.py similarity index 92% rename from src/lerobot/policies/sac/reward_model/configuration_classifier.py rename to src/lerobot/rewards/classifier/configuration_classifier.py index 3a5bfa424..a618a2cf7 100644 --- a/src/lerobot/policies/sac/reward_model/configuration_classifier.py +++ b/src/lerobot/rewards/classifier/configuration_classifier.py @@ -1,5 +1,3 @@ -# !/usr/bin/env python - # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,14 +13,15 @@ # limitations under the License. from dataclasses import dataclass, field -from lerobot.configs import NormalizationMode, PreTrainedConfig +from lerobot.configs import NormalizationMode +from lerobot.configs.rewards import RewardModelConfig from lerobot.optim import AdamWConfig, LRSchedulerConfig, OptimizerConfig from lerobot.utils.constants import OBS_IMAGE -@PreTrainedConfig.register_subclass(name="reward_classifier") +@RewardModelConfig.register_subclass(name="reward_classifier") @dataclass -class RewardClassifierConfig(PreTrainedConfig): +class RewardClassifierConfig(RewardModelConfig): """Configuration for the Reward Classifier model.""" name: str = "reward_classifier" diff --git a/src/lerobot/policies/sac/reward_model/modeling_classifier.py b/src/lerobot/rewards/classifier/modeling_classifier.py similarity index 88% rename from src/lerobot/policies/sac/reward_model/modeling_classifier.py rename to src/lerobot/rewards/classifier/modeling_classifier.py index c8b7efe58..bedfffbe9 100644 --- a/src/lerobot/policies/sac/reward_model/modeling_classifier.py +++ b/src/lerobot/rewards/classifier/modeling_classifier.py @@ -1,5 +1,3 @@ -# !/usr/bin/env python - # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +17,10 @@ import logging import torch from torch import Tensor, nn +from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig +from lerobot.rewards.pretrained import PreTrainedRewardModel from lerobot.utils.constants import OBS_IMAGE, REWARD -from ...pretrained import PreTrainedPolicy -from .configuration_classifier import RewardClassifierConfig - class ClassifierOutput: """Wrapper for classifier outputs with additional metadata.""" @@ -99,7 +96,7 @@ class SpatialLearnedEmbeddings(nn.Module): return output -class Classifier(PreTrainedPolicy): +class Classifier(PreTrainedRewardModel): """Image classifier built on top of a pre-trained encoder.""" name = "reward_classifier" @@ -235,6 +232,16 @@ class Classifier(PreTrainedPolicy): return ClassifierOutput(logits=logits, probabilities=probabilities, hidden_states=encoder_outputs) + def compute_reward(self, batch: dict[str, Tensor]) -> Tensor: + """Returns 1.0 for success, 0.0 for failure based on image observations.""" + images = [batch[key] for key in self.config.input_features if key.startswith(OBS_IMAGE)] + output = self.predict(images) + + if self.config.num_classes == 2: + return (output.probabilities > 0.5).float() + else: + return torch.argmax(output.probabilities, dim=1).float() + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict[str, Tensor]]: """Standard forward pass for training compatible with train.py.""" # Extract images and labels @@ -269,10 +276,6 @@ class Classifier(PreTrainedPolicy): def predict_reward(self, batch, threshold=0.5): """Eval method. Returns predicted reward with the decision threshold as argument.""" - # Check for both OBS_IMAGE and OBS_IMAGES prefixes - batch = self.normalize_inputs(batch) - batch = self.normalize_targets(batch) - # Extract images from batch dict images = [batch[key] for key in self.config.input_features if key.startswith(OBS_IMAGE)] @@ -282,28 +285,3 @@ class Classifier(PreTrainedPolicy): return (probs > threshold).float() else: return torch.argmax(self.predict(images).probabilities, dim=1) - - def get_optim_params(self): - """Return optimizer parameters for the policy.""" - return self.parameters() - - def select_action(self, batch: dict[str, Tensor]) -> Tensor: - """ - This method is required by PreTrainedPolicy but not used for reward classifiers. - The reward classifier is not an actor and does not select actions. - """ - raise NotImplementedError("Reward classifiers do not select actions") - - def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: - """ - This method is required by PreTrainedPolicy but not used for reward classifiers. - The reward classifier is not an actor and does not produce action chunks. - """ - raise NotImplementedError("Reward classifiers do not predict action chunks") - - def reset(self): - """ - This method is required by PreTrainedPolicy but not used for reward classifiers. - The reward classifier is not an actor and does not select actions. - """ - pass diff --git a/src/lerobot/policies/sac/reward_model/processor_classifier.py b/src/lerobot/rewards/classifier/processor_classifier.py similarity index 91% rename from src/lerobot/policies/sac/reward_model/processor_classifier.py rename to src/lerobot/rewards/classifier/processor_classifier.py index 1f7a66e58..056d7e91b 100644 --- a/src/lerobot/policies/sac/reward_model/processor_classifier.py +++ b/src/lerobot/rewards/classifier/processor_classifier.py @@ -1,5 +1,3 @@ -# !/usr/bin/env python - # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,8 +25,7 @@ from lerobot.processor import ( policy_action_to_transition, transition_to_policy_action, ) - -from .configuration_classifier import RewardClassifierConfig +from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig def make_classifier_processor( @@ -52,8 +49,6 @@ def make_classifier_processor( Args: config: The configuration object for the RewardClassifier. dataset_stats: A dictionary of statistics for normalization. - preprocessor_kwargs: Additional arguments for the pre-processor pipeline. - postprocessor_kwargs: Additional arguments for the post-processor pipeline. Returns: A tuple containing the configured pre-processor and post-processor pipelines. diff --git a/src/lerobot/rewards/factory.py b/src/lerobot/rewards/factory.py new file mode 100644 index 000000000..f6716f3fb --- /dev/null +++ b/src/lerobot/rewards/factory.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import logging +from typing import Any + +import torch + +from lerobot.configs.rewards import RewardModelConfig +from lerobot.processor import PolicyAction, PolicyProcessorPipeline +from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig +from lerobot.rewards.pretrained import PreTrainedRewardModel +from lerobot.rewards.sarm.configuration_sarm import SARMConfig + + +def get_reward_model_class(name: str) -> type[PreTrainedRewardModel]: + """ + Retrieves a reward model class by its registered name. + + This function uses dynamic imports to avoid loading all reward model classes into + memory at once, improving startup time and reducing dependencies. + + Args: + name: The name of the reward model. Supported names are "reward_classifier", + "sarm". + + Returns: + The reward model class corresponding to the given name. + + Raises: + ValueError: If the reward model name is not recognized. + """ + if name == "reward_classifier": + from lerobot.rewards.classifier.modeling_classifier import Classifier + + return Classifier + elif name == "sarm": + from lerobot.rewards.sarm.modeling_sarm import SARMRewardModel + + return SARMRewardModel + else: + try: + return _get_reward_model_cls_from_name(name=name) + except Exception as e: + raise ValueError(f"Reward model type '{name}' is not available.") from e + + +def make_reward_model_config(reward_type: str, **kwargs) -> RewardModelConfig: + """ + Instantiates a reward model configuration object based on the reward type. + + This factory function simplifies the creation of reward model configuration objects + by mapping a string identifier to the corresponding config class. + + Args: + reward_type: The type of the reward model. Supported types include + "reward_classifier", "sarm". + **kwargs: Keyword arguments to be passed to the configuration class constructor. + + Returns: + An instance of a `RewardModelConfig` subclass. + + Raises: + ValueError: If the `reward_type` is not recognized. + """ + if reward_type == "reward_classifier": + return RewardClassifierConfig(**kwargs) + elif reward_type == "sarm": + return SARMConfig(**kwargs) + else: + try: + config_cls = RewardModelConfig.get_choice_class(reward_type) + return config_cls(**kwargs) + except Exception as e: + raise ValueError(f"Reward model type '{reward_type}' is not available.") from e + + +def make_reward_model(cfg: RewardModelConfig, **kwargs) -> PreTrainedRewardModel: + """ + Instantiate a reward model from its configuration. + + Args: + cfg: The configuration for the reward model to be created. If + `cfg.pretrained_path` is set, the model will be loaded with weights + from that path. + **kwargs: Additional keyword arguments forwarded to the model constructor + (e.g., ``dataset_stats``, ``dataset_meta``). + + Returns: + An instantiated and device-placed reward model. + """ + reward_cls = get_reward_model_class(cfg.type) + + kwargs["config"] = cfg + + if cfg.pretrained_path: + kwargs["pretrained_name_or_path"] = cfg.pretrained_path + reward_model = reward_cls.from_pretrained(**kwargs) + else: + reward_model = reward_cls(**kwargs) + + reward_model.to(cfg.device) + assert isinstance(reward_model, torch.nn.Module) + + return reward_model + + +def make_reward_pre_post_processors( + reward_cfg: RewardModelConfig, + **kwargs, +) -> tuple[ + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + PolicyProcessorPipeline[PolicyAction, PolicyAction], +]: + """ + Create pre- and post-processor pipelines for a given reward model. + + Each reward model type has a dedicated factory function for its processors. + + Args: + reward_cfg: The configuration of the reward model for which to create processors. + **kwargs: Additional keyword arguments passed to the processor factory + (e.g., ``dataset_stats``, ``dataset_meta``). + + Returns: + A tuple containing the input (pre-processor) and output (post-processor) pipelines. + + Raises: + ValueError: If a processor factory is not implemented for the given reward + model configuration type. + """ + # Create a new processor based on reward model type + if isinstance(reward_cfg, RewardClassifierConfig): + from lerobot.rewards.classifier.processor_classifier import make_classifier_processor + + return make_classifier_processor( + config=reward_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(reward_cfg, SARMConfig): + from lerobot.rewards.sarm.processor_sarm import make_sarm_pre_post_processors + + return make_sarm_pre_post_processors( + config=reward_cfg, + dataset_stats=kwargs.get("dataset_stats"), + dataset_meta=kwargs.get("dataset_meta"), + ) + + else: + try: + processors = _make_processors_from_reward_model_config( + config=reward_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + except Exception as e: + raise ValueError( + f"Processor for reward model type '{reward_cfg.type}' is not implemented." + ) from e + return processors + + +def _get_reward_model_cls_from_name(name: str) -> type[PreTrainedRewardModel]: + """Get reward model class from its registered name using dynamic imports. + + This is used as a helper function to import reward models from 3rd party lerobot + plugins. + + Args: + name: The name of the reward model. + + Returns: + The reward model class corresponding to the given name. + """ + if name not in RewardModelConfig.get_known_choices(): + raise ValueError( + f"Unknown reward model name '{name}'. " + f"Available reward models: {RewardModelConfig.get_known_choices()}" + ) + + config_cls = RewardModelConfig.get_choice_class(name) + config_cls_name = config_cls.__name__ + + model_name = config_cls_name.removesuffix("Config") + if model_name == config_cls_name: + raise ValueError( + f"The config class name '{config_cls_name}' does not follow the expected naming convention. " + f"Make sure it ends with 'Config'!" + ) + + cls_name = model_name + "RewardModel" + module_path = config_cls.__module__.replace("configuration_", "modeling_") + + module = importlib.import_module(module_path) + reward_cls = getattr(module, cls_name) + return reward_cls + + +def _make_processors_from_reward_model_config( + config: RewardModelConfig, + dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None, +) -> tuple[Any, Any]: + """Create pre- and post-processors from a reward model configuration using dynamic imports. + + This is used as a helper function to import processor factories from 3rd party + lerobot reward model plugins. + + Args: + config: The reward model configuration object. + dataset_stats: Dataset statistics for normalization. + + Returns: + A tuple containing the input (pre-processor) and output (post-processor) pipelines. + """ + reward_type = config.type + function_name = f"make_{reward_type}_pre_post_processors" + module_path = config.__class__.__module__.replace("configuration_", "processor_") + logging.debug( + f"Instantiating reward pre/post processors using function '{function_name}' " + f"from module '{module_path}'" + ) + module = importlib.import_module(module_path) + function = getattr(module, function_name) + return function(config, dataset_stats=dataset_stats) diff --git a/src/lerobot/rewards/pretrained.py b/src/lerobot/rewards/pretrained.py new file mode 100644 index 000000000..d44b31733 --- /dev/null +++ b/src/lerobot/rewards/pretrained.py @@ -0,0 +1,244 @@ +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import builtins +import logging +import os +from importlib.resources import files +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import TYPE_CHECKING, Any, TypeVar + +import packaging +import safetensors +from huggingface_hub import HfApi, ModelCard, ModelCardData, hf_hub_download +from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE +from huggingface_hub.errors import HfHubHTTPError +from safetensors.torch import load_model as load_model_as_safetensor, save_model as save_model_as_safetensor +from torch import Tensor, nn + +from lerobot.configs.rewards import RewardModelConfig +from lerobot.utils.hub import HubMixin + +if TYPE_CHECKING: + from lerobot.configs.train import TrainPipelineConfig + +T = TypeVar("T", bound="PreTrainedRewardModel") + + +class PreTrainedRewardModel(nn.Module, HubMixin, abc.ABC): + """Base class for reward models.""" + + config_class: None + name: None + + def __init__(self, config: RewardModelConfig, *inputs, **kwargs): + super().__init__() + if not isinstance(config, RewardModelConfig): + raise ValueError( + f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " + "`RewardModelConfig`. To create a model from a pretrained model use " + f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.config = config + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + if not getattr(cls, "config_class", None): + raise TypeError(f"Class {cls.__name__} must define 'config_class'") + if not getattr(cls, "name", None): + raise TypeError(f"Class {cls.__name__} must define 'name'") + + def _save_pretrained(self, save_directory: Path) -> None: + self.config._save_pretrained(save_directory) + model_to_save = self.module if hasattr(self, "module") else self + save_model_as_safetensor(model_to_save, str(save_directory / SAFETENSORS_SINGLE_FILE)) + + @classmethod + def from_pretrained( + cls: builtins.type[T], + pretrained_name_or_path: str | Path, + *, + config: RewardModelConfig | None = None, + force_download: bool = False, + resume_download: bool | None = None, + proxies: dict | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + strict: bool = False, + **kwargs, + ) -> T: + """ + The reward model is set in evaluation mode by default using `reward.eval()` (dropout modules are + deactivated). To train it, you should first set it back in training mode with `reward.train()`. + """ + if config is None: + config = RewardModelConfig.from_pretrained( + pretrained_name_or_path=pretrained_name_or_path, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + revision=revision, + **kwargs, + ) + model_id = str(pretrained_name_or_path) + instance = cls(config, **kwargs) + if os.path.isdir(model_id): + print("Loading weights from local directory") + model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE) + reward = cls._load_as_safetensor(instance, model_file, config.device or "cpu", strict) + else: + try: + model_file = hf_hub_download( + repo_id=model_id, + filename=SAFETENSORS_SINGLE_FILE, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + reward = cls._load_as_safetensor(instance, model_file, config.device or "cpu", strict) + except HfHubHTTPError as e: + raise FileNotFoundError( + f"{SAFETENSORS_SINGLE_FILE} not found on the HuggingFace Hub in {model_id}" + ) from e + + reward.to(config.device) + reward.eval() + return reward + + @classmethod + def _load_as_safetensor(cls, model: T, model_file: str, map_location: str, strict: bool) -> T: + # Create base kwargs + kwargs = {"strict": strict} + + # Add device parameter for newer versions that support it + if packaging.version.parse(safetensors.__version__) >= packaging.version.parse("0.4.3"): + kwargs["device"] = map_location + + # Load the model with appropriate kwargs + missing_keys, unexpected_keys = load_model_as_safetensor(model, model_file, **kwargs) + if missing_keys: + logging.warning(f"Missing key(s) when loading model: {missing_keys}") + if unexpected_keys: + logging.warning(f"Unexpected key(s) when loading model: {unexpected_keys}") + + # For older versions, manually move to device if needed + if "device" not in kwargs and map_location != "cpu": + logging.warning( + "Loading model weights on other devices than 'cpu' is not supported natively in your version of safetensors." + " This means that the model is loaded on 'cpu' first and then copied to the device." + " This leads to a slower loading time." + " Please update safetensors to version 0.4.3 or above for improved performance." + ) + model.to(map_location) + return model + + def get_optim_params(self): + """ + Returns the reward-model-specific parameters dict to be passed on to the optimizer. + """ + return self.parameters() + + def reset(self) -> None: + """Reset any internal state.""" + pass + + @abc.abstractmethod + def compute_reward(self, batch: dict[str, Tensor]) -> Tensor: + """Compute a scalar reward signal for a batch of observations. + + Args: + batch: Dictionary containing at minimum observation tensors. + May also contain "action", "next_observation.*", etc. + + Returns: + Tensor of shape ``(batch_size,)`` with reward values. + """ + ... + + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict[str, Any]]: + """Training forward pass — override for trainable reward models.""" + raise NotImplementedError( + f"{self.__class__.__name__} is not trainable. Only use compute_reward() for inference." + ) + + @property + def is_trainable(self) -> bool: + """Whether this reward model can be trained via ``lerobot-train``. + + Trainable reward models override :meth:`forward`; zero-shot models + inherit the base implementation that raises ``NotImplementedError``. + """ + return type(self).forward is not PreTrainedRewardModel.forward + + def push_model_to_hub(self, cfg: "TrainPipelineConfig"): + api = HfApi() + repo_id = api.create_repo( + repo_id=self.config.repo_id, private=self.config.private, exist_ok=True + ).repo_id + + # Push the files to the repo in a single commit + with TemporaryDirectory(ignore_cleanup_errors=True) as tmp: + saved_path = Path(tmp) / repo_id + + self.save_pretrained(saved_path) # Calls _save_pretrained and stores model tensors + + card = self.generate_model_card( + cfg.dataset.repo_id, self.config.type, self.config.license, self.config.tags + ) + card.save(str(saved_path / "README.md")) + + cfg.save_pretrained(saved_path) # Calls _save_pretrained and stores train config + + commit_info = api.upload_folder( + repo_id=repo_id, + repo_type="model", + folder_path=saved_path, + commit_message="Upload reward model weights, train config and readme", + allow_patterns=["*.safetensors", "*.json", "*.yaml", "*.md"], + ignore_patterns=["*.tmp", "*.log"], + ) + + logging.info(f"Model pushed to {commit_info.repo_url.url}") + + def generate_model_card( + self, dataset_repo_id: str, model_type: str, license: str | None, tags: list[str] | None + ) -> ModelCard: + card_data = ModelCardData( + license=license or "apache-2.0", + library_name="lerobot", + pipeline_tag="robotics", + tags=list(set(tags or []).union({"robotics", "lerobot", "reward-model", model_type})), + model_name=model_type, + datasets=dataset_repo_id, + ) + + template_card = ( + files("lerobot.templates") + .joinpath("lerobot_rewardmodel_modelcard_template.md") + .read_text(encoding="utf-8") + ) + card = ModelCard.from_template(card_data, template_str=template_card) + card.validate() + return card diff --git a/src/lerobot/policies/sarm/__init__.py b/src/lerobot/rewards/sarm/__init__.py similarity index 76% rename from src/lerobot/policies/sarm/__init__.py rename to src/lerobot/rewards/sarm/__init__.py index b164c87ef..5d5afcfb2 100644 --- a/src/lerobot/policies/sarm/__init__.py +++ b/src/lerobot/rewards/sarm/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,5 +14,6 @@ from .configuration_sarm import SARMConfig from .modeling_sarm import SARMRewardModel +from .processor_sarm import make_sarm_pre_post_processors -__all__ = ["SARMConfig", "SARMRewardModel"] +__all__ = ["SARMConfig", "SARMRewardModel", "make_sarm_pre_post_processors"] diff --git a/src/lerobot/policies/sarm/compute_rabc_weights.py b/src/lerobot/rewards/sarm/compute_rabc_weights.py similarity index 98% rename from src/lerobot/policies/sarm/compute_rabc_weights.py rename to src/lerobot/rewards/sarm/compute_rabc_weights.py index 07d0780b5..b1bf2e1f5 100644 --- a/src/lerobot/policies/sarm/compute_rabc_weights.py +++ b/src/lerobot/rewards/sarm/compute_rabc_weights.py @@ -25,18 +25,18 @@ need ~num_frames/30 queries instead of one per frame (~30x speedup). Usage: # Full RA-BC computation with visualizations - python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + python src/lerobot/rewards/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ --reward-model-path /sarm_single_uni4 # Faster computation with stride (compute every 5 frames, interpolate the rest) - python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + python src/lerobot/rewards/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ --reward-model-path /sarm_single_uni4 \\ --stride 5 # Visualize predictions only (no RA-BC computation) - python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + python src/lerobot/rewards/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ --reward-model-path /sarm_single_uni4 \\ --visualize-only \\ @@ -58,10 +58,9 @@ import torch from tqdm import tqdm from lerobot.datasets import LeRobotDataset - -from .modeling_sarm import SARMRewardModel -from .processor_sarm import make_sarm_pre_post_processors -from .sarm_utils import normalize_stage_tau +from lerobot.rewards.sarm.modeling_sarm import SARMRewardModel +from lerobot.rewards.sarm.processor_sarm import make_sarm_pre_post_processors +from lerobot.rewards.sarm.sarm_utils import normalize_stage_tau def get_reward_model_path_from_parquet(parquet_path: Path) -> str | None: @@ -713,12 +712,12 @@ def main(): epilog=""" Examples: # Full RA-BC computation with visualizations - python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + python src/lerobot/rewards/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ --reward-model-path /sarm_single_uni4 # Visualize predictions only (no RA-BC computation) - python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + python src/lerobot/rewards/sarm/compute_rabc_weights.py \\ --dataset-repo-id lerobot/aloha_sim_insertion_human \\ --reward-model-path /sarm_single_uni4 \\ --visualize-only \\ diff --git a/src/lerobot/policies/sarm/configuration_sarm.py b/src/lerobot/rewards/sarm/configuration_sarm.py similarity index 98% rename from src/lerobot/policies/sarm/configuration_sarm.py rename to src/lerobot/rewards/sarm/configuration_sarm.py index fc8daa055..0d1f727f7 100644 --- a/src/lerobot/policies/sarm/configuration_sarm.py +++ b/src/lerobot/rewards/sarm/configuration_sarm.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # Copyright 2025 Qianzhong Chen, Justin Yu, Mac Schwager, Pieter Abbeel, Yide Shentu, Philipp Wu # and The HuggingFace Inc. team. All rights reserved. # @@ -22,14 +20,15 @@ Paper: https://arxiv.org/abs/2509.25358 from dataclasses import dataclass, field -from lerobot.configs import FeatureType, NormalizationMode, PolicyFeature, PreTrainedConfig +from lerobot.configs import FeatureType, NormalizationMode, PolicyFeature +from lerobot.configs.rewards import RewardModelConfig from lerobot.optim import AdamWConfig, CosineDecayWithWarmupSchedulerConfig from lerobot.utils.constants import OBS_IMAGES, OBS_STATE -@PreTrainedConfig.register_subclass("sarm") +@RewardModelConfig.register_subclass("sarm") @dataclass -class SARMConfig(PreTrainedConfig): +class SARMConfig(RewardModelConfig): """Configuration class for SARM (Stage-Aware Reward Modeling). Supports three annotation modes: @@ -110,7 +109,6 @@ class SARMConfig(PreTrainedConfig): def __post_init__(self): super().__post_init__() - if self.annotation_mode not in ["single_stage", "dense_only", "dual"]: raise ValueError( f"annotation_mode must be 'single_stage', 'dense_only', or 'dual', got {self.annotation_mode}" diff --git a/src/lerobot/policies/sarm/modeling_sarm.py b/src/lerobot/rewards/sarm/modeling_sarm.py similarity index 96% rename from src/lerobot/policies/sarm/modeling_sarm.py rename to src/lerobot/rewards/sarm/modeling_sarm.py index 710554e4b..365f519b2 100644 --- a/src/lerobot/policies/sarm/modeling_sarm.py +++ b/src/lerobot/rewards/sarm/modeling_sarm.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # Copyright 2025 Qianzhong Chen, Justin Yu, Mac Schwager, Pieter Abbeel, Yide Shentu, Philipp Wu # and The HuggingFace Inc. team. All rights reserved. # @@ -34,14 +32,13 @@ import torch.nn as nn import torch.nn.functional as F # noqa: N812 from torch import Tensor -from lerobot.utils.constants import OBS_STR - -from ..pretrained import PreTrainedPolicy -from .configuration_sarm import SARMConfig -from .sarm_utils import ( +from lerobot.rewards.pretrained import PreTrainedRewardModel +from lerobot.rewards.sarm.configuration_sarm import SARMConfig +from lerobot.rewards.sarm.sarm_utils import ( normalize_stage_tau, pad_state_to_max_dim, ) +from lerobot.utils.constants import OBS_STR class StageTransformer(nn.Module): @@ -353,7 +350,7 @@ def gen_stage_emb(num_classes: int, targets: torch.Tensor) -> torch.Tensor: return stage_onehot -class SARMRewardModel(PreTrainedPolicy): +class SARMRewardModel(PreTrainedRewardModel): """ SARM Reward Model for stage-aware task completion rewards. @@ -471,6 +468,23 @@ class SARMRewardModel(PreTrainedPolicy): self.subtask_model.to(device) return self + def compute_reward(self, batch: dict[str, Tensor]) -> Tensor: + """Compute dense progress reward in [0, 1] from batch. + + Expects batch to contain: + - "observation_features" or video embeddings: (B, T, 512) + - "language_embedding" or text embeddings: (B, 512) + - optionally "observation.state": (B, T, state_dim) + """ + text_emb = batch.get("language_embedding", batch.get("text_features")) + video_emb = batch.get("observation_features", batch.get("video_features")) + state = batch.get("observation.state", batch.get("state_features")) + + rewards = self.calculate_rewards(text_emb, video_emb, state) + if isinstance(rewards, np.ndarray): + rewards = torch.from_numpy(rewards).float() + return rewards + @torch.no_grad() def calculate_rewards( self, @@ -631,17 +645,9 @@ class SARMRewardModel(PreTrainedPolicy): return self.parameters() def reset(self): - """Required by PreTrainedPolicy but not used for reward models.""" + """SARM has no episode-level state to reset.""" pass - def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: - """Required by PreTrainedPolicy but not used for reward models.""" - raise NotImplementedError("SARM model does not predict action chunks") - - def select_action(self, batch: dict[str, Tensor]) -> Tensor: - """Required by PreTrainedPolicy but not used for SARM.""" - raise NotImplementedError("SARM model does not select actions") - def _train_step( self, img_emb: torch.Tensor, # (B, N, T, D) diff --git a/src/lerobot/policies/sarm/processor_sarm.py b/src/lerobot/rewards/sarm/processor_sarm.py similarity index 95% rename from src/lerobot/policies/sarm/processor_sarm.py rename to src/lerobot/rewards/sarm/processor_sarm.py index e939b3485..eaa5f66f5 100644 --- a/src/lerobot/policies/sarm/processor_sarm.py +++ b/src/lerobot/rewards/sarm/processor_sarm.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,16 +58,15 @@ from lerobot.processor import ( policy_action_to_transition, transition_to_policy_action, ) -from lerobot.types import EnvTransition, PolicyAction, TransitionKey -from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME - -from .configuration_sarm import SARMConfig -from .sarm_utils import ( +from lerobot.rewards.sarm.configuration_sarm import SARMConfig +from lerobot.rewards.sarm.sarm_utils import ( apply_rewind_augmentation, compute_absolute_indices, find_stage_and_tau, pad_state_to_max_dim, ) +from lerobot.types import EnvTransition, PolicyAction, TransitionKey +from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME class SARMEncodingProcessorStep(ProcessorStep): @@ -455,7 +452,13 @@ class SARMEncodingProcessorStep(ProcessorStep): inputs = {k: v.to(self.device) for k, v in inputs.items()} # Get image embeddings - embeddings = self.clip_model.get_image_features(**inputs).detach().cpu() + # transformers 5.x returns BaseModelOutputWithPooling instead of a plain tensor + output = self.clip_model.get_image_features(**inputs) + if not isinstance(output, torch.Tensor): + output = output.pooler_output + if output is None: + raise ValueError("pooler_output should not be None for CLIP models.") + embeddings = output.detach().cpu() # Handle single frame case if embeddings.dim() == 1: @@ -482,7 +485,13 @@ class SARMEncodingProcessorStep(ProcessorStep): inputs = self.clip_processor.tokenizer([text], return_tensors="pt", padding=True, truncation=True) inputs = {k: v.to(self.device) for k, v in inputs.items()} - text_embedding = self.clip_model.get_text_features(**inputs).detach().cpu() + # transformers 5.x returns BaseModelOutputWithPooling instead of a plain tensor + output = self.clip_model.get_text_features(**inputs) + if not isinstance(output, torch.Tensor): + output = output.pooler_output + if output is None: + raise ValueError("pooler_output should not be None for CLIP models.") + text_embedding = output.detach().cpu() text_embedding = text_embedding.expand(batch_size, -1) return text_embedding diff --git a/src/lerobot/utils/rabc.py b/src/lerobot/rewards/sarm/rabc.py similarity index 79% rename from src/lerobot/utils/rabc.py rename to src/lerobot/rewards/sarm/rabc.py index dc0c61c69..8d7ce6bde 100644 --- a/src/lerobot/utils/rabc.py +++ b/src/lerobot/rewards/sarm/rabc.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,14 +12,38 @@ # See the License for the specific language governing permissions and # limitations under the License. +""" +RA-BC (Reward-Aligned Behavior Cloning) sample weighting implementation. + +This module implements the SampleWeighter protocol for RA-BC training, +which weights training samples based on their task progress as measured +by the SARM reward model. + +The weights are computed based on progress deltas: + delta = progress[t + chunk_size] - progress[t] + +High-quality samples (positive progress) get higher weights, while +samples with negative progress (going backwards) get zero weight. + +See: https://arxiv.org/abs/2509.25358 for the SARM paper. +""" + import logging from pathlib import Path +from typing import TYPE_CHECKING import numpy as np -import pandas as pd import torch from huggingface_hub import hf_hub_download +from lerobot.utils.import_utils import _pandas_available +from lerobot.utils.sample_weighting import SampleWeighter + +if TYPE_CHECKING or _pandas_available: + import pandas as pd +else: + pd = None # type: ignore[assignment] + def resolve_hf_path(path: str | Path) -> Path: """Resolve a path that may be a HuggingFace URL (hf://datasets/...) to a local path.""" @@ -34,23 +56,27 @@ def resolve_hf_path(path: str | Path) -> Path: return Path(path) -class RABCWeights: +class RABCWeights(SampleWeighter): """ Load precomputed SARM progress values and compute RA-BC weights during training. + This class implements the SampleWeighter ABC for use with the generic + sample weighting infrastructure in lerobot. + Progress values are loaded from a parquet file (generated by compute_rabc_weights.py). During training, computes: - progress_delta = progress[t + chunk_size] - progress[t] - rabc_weight based on the delta (paper Eq. 8-9) Args: - progress_path: Path to parquet file with precomputed progress values - chunk_size: Number of frames ahead for computing progress delta - head_mode: Which SARM head to use ("sparse" or "dense") - kappa: Hard threshold for high-quality samples (default: 0.01) - epsilon: Small constant for numerical stability (default: 1e-6) - fallback_weight: Weight to use for frames without valid delta (default: 1.0) - device: Device to return tensors on + progress_path: Path to parquet file with precomputed progress values. + Supports HuggingFace URLs (hf://datasets/...). + chunk_size: Number of frames ahead for computing progress delta. + head_mode: Which SARM head to use ("sparse" or "dense"). + kappa: Hard threshold for high-quality samples (default: 0.01). + epsilon: Small constant for numerical stability (default: 1e-6). + fallback_weight: Weight to use for frames without valid delta (default: 1.0). + device: Device to return tensors on. """ def __init__( @@ -61,7 +87,7 @@ class RABCWeights: kappa: float = 0.01, epsilon: float = 1e-6, fallback_weight: float = 1.0, - device: torch.device = None, + device: torch.device | None = None, ): self.progress_path = resolve_hf_path(progress_path) self.chunk_size = chunk_size @@ -87,8 +113,8 @@ class RABCWeights: logging.info(f"Using progress column: {self.progress_column}") - self.progress_lookup = {} - self.episode_lookup = {} + self.progress_lookup: dict[int, float] = {} + self.episode_lookup: dict[int, int] = {} for _, row in self.df.iterrows(): global_idx = int(row["index"]) @@ -100,7 +126,7 @@ class RABCWeights: self.episode_lookup[global_idx] = episode_idx # Build episode boundaries for delta computation - self.episode_boundaries = {} + self.episode_boundaries: dict[int, dict[str, int]] = {} for episode_idx in self.df["episode_index"].unique(): ep_df = self.df[self.df["episode_index"] == episode_idx] self.episode_boundaries[int(episode_idx)] = { @@ -114,7 +140,7 @@ class RABCWeights: # Compute global statistics for weight computation self._compute_global_stats() - def _compute_global_stats(self): + def _compute_global_stats(self) -> None: """Compute global mean and std of progress deltas for weight calculation.""" all_deltas = [] @@ -138,8 +164,8 @@ class RABCWeights: all_deltas.append(delta) if all_deltas: - self.delta_mean = max(np.mean(all_deltas), 0.0) - self.delta_std = max(np.std(all_deltas), self.epsilon) + self.delta_mean = max(float(np.mean(all_deltas)), 0.0) + self.delta_std = max(float(np.std(all_deltas)), self.epsilon) logging.info(f"Progress delta stats: mean={self.delta_mean:.4f}, std={self.delta_std:.4f}") else: self.delta_mean = 0.0 @@ -157,18 +183,19 @@ class RABCWeights: 4. Compute weight using paper Eq. 8-9 Args: - batch: Training batch containing "index" key with global frame indices + batch: Training batch containing "index" key with global frame indices. Returns: Tuple of: - - Weights tensor (batch_size,) normalized to sum to batch_size - - Stats dict with raw_mean_weight, num_zero_weight, num_full_weight + - Weights tensor (batch_size,) normalized to sum to batch_size. + - Stats dict with weighting statistics for logging. """ indices = batch.get("index") if indices is None: logging.warning("RA-BC: Batch missing 'index' key, using uniform weights") batch_size = self._get_batch_size(batch) - return torch.ones(batch_size, device=self.device), {"raw_mean_weight": 1.0} + stats = {"mean_weight": 1.0, "num_zero_weight": 0, "num_full_weight": batch_size} + return torch.ones(batch_size, device=self.device), stats # Convert to list of ints if isinstance(indices, torch.Tensor): @@ -183,29 +210,29 @@ class RABCWeights: delta = self._compute_delta(idx) deltas.append(delta) - deltas = np.array(deltas, dtype=np.float32) + deltas_array = np.array(deltas, dtype=np.float32) # Compute weights from deltas - weights = self._compute_weights(deltas) + weights = self._compute_weights(deltas_array) # Compute stats before normalization for logging raw_mean_weight = float(np.nanmean(weights)) num_zero_weight = int(np.sum(weights == 0)) num_full_weight = int(np.sum(weights == 1.0)) batch_stats = { - "raw_mean_weight": raw_mean_weight, + "mean_weight": raw_mean_weight, "num_zero_weight": num_zero_weight, "num_full_weight": num_full_weight, } - weights = torch.tensor(weights, device=self.device, dtype=torch.float32) + weights_tensor = torch.tensor(weights, device=self.device, dtype=torch.float32) # Normalize to sum to batch_size - batch_size = len(weights) - weight_sum = weights.sum() + self.epsilon - weights = weights * batch_size / weight_sum + batch_size = len(weights_tensor) + weight_sum = weights_tensor.sum() + self.epsilon + weights_tensor = weights_tensor * batch_size / weight_sum - return weights, batch_stats + return weights_tensor, batch_stats def _compute_delta(self, global_idx: int) -> float: """Compute progress delta for a single frame.""" @@ -241,7 +268,7 @@ class RABCWeights: - Final weight: wi = 1{ri > κ} + 1{0 ≤ ri ≤ κ}˜wi Returns: - Array of weights + Array of weights. """ valid_mask = ~np.isnan(deltas) @@ -273,12 +300,13 @@ class RABCWeights: if key in batch: val = batch[key] if isinstance(val, (torch.Tensor, np.ndarray)): - return val.shape[0] + return int(val.shape[0]) return 1 def get_stats(self) -> dict: - """Get statistics.""" + """Get global statistics about the RA-BC weighting.""" return { + "type": "rabc", "num_frames": len(self.progress_lookup), "chunk_size": self.chunk_size, "head_mode": self.head_mode, diff --git a/src/lerobot/policies/sarm/sarm_utils.py b/src/lerobot/rewards/sarm/sarm_utils.py similarity index 99% rename from src/lerobot/policies/sarm/sarm_utils.py rename to src/lerobot/rewards/sarm/sarm_utils.py index 5b6955d38..d2cd92cff 100644 --- a/src/lerobot/policies/sarm/sarm_utils.py +++ b/src/lerobot/rewards/sarm/sarm_utils.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/lerobot/rl/actor.py b/src/lerobot/rl/actor.py index 588adffac..eab527250 100644 --- a/src/lerobot/rl/actor.py +++ b/src/lerobot/rl/actor.py @@ -76,6 +76,7 @@ from lerobot.transport.utils import ( ) from lerobot.types import TransitionKey from lerobot.utils.device_utils import get_safe_torch_device +from lerobot.utils.process import ProcessSignalHandler from lerobot.utils.random_utils import set_seed from lerobot.utils.robot_utils import precise_sleep from lerobot.utils.transition import ( @@ -94,7 +95,6 @@ from .gym_manipulator import ( make_robot_env, step_env_and_process_transition, ) -from .process import ProcessSignalHandler from .queue import get_last_item_from_queue # Main entry point diff --git a/src/lerobot/rl/crop_dataset_roi.py b/src/lerobot/rl/crop_dataset_roi.py index b6bde2273..cc808bcb0 100644 --- a/src/lerobot/rl/crop_dataset_roi.py +++ b/src/lerobot/rl/crop_dataset_roi.py @@ -193,15 +193,15 @@ def convert_lerobot_dataset_to_cropped_lerobot_dataset( fps=int(original_dataset.fps), root=new_dataset_root, robot_type=original_dataset.meta.robot_type, - features=original_dataset.meta.info["features"], + features=original_dataset.meta.info.features, use_videos=len(original_dataset.meta.video_keys) > 0, ) # Update the metadata for every image key that will be cropped: # (Here we simply set the shape to be the final resize_size.) for key in crop_params_dict: - if key in new_dataset.meta.info["features"]: - new_dataset.meta.info["features"][key]["shape"] = [3] + list(resize_size) + if key in new_dataset.meta.info.features: + new_dataset.meta.info.features[key]["shape"] = (3, *resize_size) # TODO: Directly modify the mp4 video + meta info features, instead of recreating a dataset prev_episode_index = 0 diff --git a/src/lerobot/rl/learner.py b/src/lerobot/rl/learner.py index d1207421b..14542576d 100644 --- a/src/lerobot/rl/learner.py +++ b/src/lerobot/rl/learner.py @@ -90,6 +90,7 @@ from lerobot.utils.constants import ( TRAINING_STATE_DIR, ) from lerobot.utils.device_utils import get_safe_torch_device +from lerobot.utils.process import ProcessSignalHandler from lerobot.utils.random_utils import set_seed from lerobot.utils.transition import move_state_dict_to_device, move_transition_to_device from lerobot.utils.utils import ( @@ -99,7 +100,6 @@ from lerobot.utils.utils import ( from .buffer import ReplayBuffer, concatenate_batch_transitions from .learner_service import MAX_WORKERS, SHUTDOWN_TIMEOUT, LearnerService -from .process import ProcessSignalHandler @parser.wrap() diff --git a/src/lerobot/rollout/__init__.py b/src/lerobot/rollout/__init__.py new file mode 100644 index 000000000..a4de8ee6c --- /dev/null +++ b/src/lerobot/rollout/__init__.py @@ -0,0 +1,87 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Policy deployment engine with pluggable rollout strategies.""" + +from lerobot.utils.import_utils import require_package + +require_package("datasets", extra="dataset") + +from .configs import ( + BaseStrategyConfig, + DAggerKeyboardConfig, + DAggerPedalConfig, + DAggerStrategyConfig, + HighlightStrategyConfig, + RolloutConfig, + RolloutStrategyConfig, + SentryStrategyConfig, +) +from .context import ( + DatasetContext, + HardwareContext, + PolicyContext, + ProcessorContext, + RolloutContext, + RuntimeContext, + build_rollout_context, +) +from .inference import ( + InferenceEngine, + InferenceEngineConfig, + RTCInferenceConfig, + RTCInferenceEngine, + SyncInferenceConfig, + SyncInferenceEngine, + create_inference_engine, +) +from .strategies import ( + BaseStrategy, + DAggerStrategy, + HighlightStrategy, + RolloutStrategy, + SentryStrategy, + create_strategy, +) + +__all__ = [ + "BaseStrategy", + "BaseStrategyConfig", + "DAggerKeyboardConfig", + "DAggerPedalConfig", + "DAggerStrategy", + "DAggerStrategyConfig", + "DatasetContext", + "HardwareContext", + "HighlightStrategy", + "HighlightStrategyConfig", + "InferenceEngine", + "InferenceEngineConfig", + "PolicyContext", + "ProcessorContext", + "RTCInferenceConfig", + "RTCInferenceEngine", + "RolloutConfig", + "RolloutContext", + "RolloutStrategy", + "RolloutStrategyConfig", + "RuntimeContext", + "SentryStrategy", + "SentryStrategyConfig", + "SyncInferenceConfig", + "SyncInferenceEngine", + "build_rollout_context", + "create_inference_engine", + "create_strategy", +] diff --git a/src/lerobot/rollout/configs.py b/src/lerobot/rollout/configs.py new file mode 100644 index 000000000..9d019c887 --- /dev/null +++ b/src/lerobot/rollout/configs.py @@ -0,0 +1,323 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Configuration dataclasses for the rollout deployment engine.""" + +from __future__ import annotations + +import abc +import logging +from dataclasses import dataclass, field + +import draccus + +from lerobot.configs import PreTrainedConfig, parser +from lerobot.configs.dataset import DatasetRecordConfig +from lerobot.robots.config import RobotConfig +from lerobot.teleoperators.config import TeleoperatorConfig +from lerobot.utils.device_utils import auto_select_torch_device, is_torch_device_available + +from .inference import InferenceEngineConfig, SyncInferenceConfig + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Strategy configs (polymorphic dispatch via draccus ChoiceRegistry) +# --------------------------------------------------------------------------- + + +@dataclass +class RolloutStrategyConfig(draccus.ChoiceRegistry, abc.ABC): + """Abstract base for rollout strategy configurations. + + Use ``--strategy.type=`` on the CLI to select a strategy. + """ + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +@RolloutStrategyConfig.register_subclass("base") +@dataclass +class BaseStrategyConfig(RolloutStrategyConfig): + """Autonomous rollout with no data recording.""" + + pass + + +@RolloutStrategyConfig.register_subclass("sentry") +@dataclass +class SentryStrategyConfig(RolloutStrategyConfig): + """Continuous autonomous rollout with always-on recording. + + Episode duration is derived from camera resolution, FPS, and + ``target_video_file_size_mb`` so that each saved episode produces a + video file that has crossed the target size. This aligns episode + boundaries with the dataset's video file chunking, so each + ``push_to_hub`` call uploads complete video files rather than + re-uploading a growing file that hasn't crossed the chunk boundary. + """ + + upload_every_n_episodes: int = 5 + # Target video file size in MB for episode rotation. Episodes are + # saved once the estimated video duration would exceed this limit. + # Defaults to DEFAULT_VIDEO_FILE_SIZE_IN_MB when set to None. + target_video_file_size_mb: int | None = None + + +@RolloutStrategyConfig.register_subclass("highlight") +@dataclass +class HighlightStrategyConfig(RolloutStrategyConfig): + """Autonomous rollout with on-demand recording via ring buffer. + + A memory-bounded ring buffer continuously captures telemetry. When + the user presses the save key, the buffer contents are flushed to + the dataset and live recording continues until the key is pressed + again. + """ + + ring_buffer_seconds: float = 10.0 + ring_buffer_max_memory_mb: int = 1024 + save_key: str = "s" + push_key: str = "h" + + +@dataclass +class DAggerKeyboardConfig: + """Keyboard key bindings for DAgger controls. + + Keys are specified as single characters (e.g. ``"c"``, ``"h"``) or + special key names (``"space"``). + """ + + pause_resume: str = "space" + correction: str = "tab" + upload: str = "enter" + + +@dataclass +class DAggerPedalConfig: + """Foot pedal configuration for DAgger controls. + + Pedal codes are evdev key code strings (e.g. ``"KEY_A"``). + """ + + device_path: str = "/dev/input/by-id/usb-PCsensor_FootSwitch-event-kbd" + pause_resume: str = "KEY_A" + correction: str = "KEY_B" + upload: str = "KEY_C" + + +@RolloutStrategyConfig.register_subclass("dagger") +@dataclass +class DAggerStrategyConfig(RolloutStrategyConfig): + """Human-in-the-loop data collection (DAgger / RaC). + + Alternates between autonomous policy execution and human intervention. + Intervention frames are tagged with ``intervention=True``. + + Input is controlled via either a keyboard or foot pedal, selected by + ``input_device``. Each device exposes three actions: + + 1. **pause_resume** — toggle policy execution on/off. + 2. **correction** — toggle human correction recording. + 3. **upload** — push dataset to hub on demand (corrections-only mode). + + When ``record_autonomous=False`` (default) only human-correction windows + are recorded — each correction becomes its own episode. Set to ``True`` + to record both autonomous and correction frames with size-based episode + rotation (same as Sentry) and background uploading. ``push_to_hub`` is + blocked while a correction is in progress. + """ + + # Number of correction episodes to collect (corrections-only mode). + # When None, falls back to ``--dataset.num_episodes``. + num_episodes: int | None = None + record_autonomous: bool = False + upload_every_n_episodes: int = 5 + # Target video file size in MB for episode rotation (record_autonomous + # mode only). Defaults to DEFAULT_VIDEO_FILE_SIZE_IN_MB when None. + target_video_file_size_mb: int | None = None + input_device: str = "keyboard" + keyboard: DAggerKeyboardConfig = field(default_factory=DAggerKeyboardConfig) + pedal: DAggerPedalConfig = field(default_factory=DAggerPedalConfig) + + def __post_init__(self): + if self.input_device not in ("keyboard", "pedal"): + raise ValueError(f"DAgger input_device must be 'keyboard' or 'pedal', got '{self.input_device}'") + + +# --------------------------------------------------------------------------- +# Top-level rollout config +# --------------------------------------------------------------------------- + + +@dataclass +class RolloutConfig: + """Top-level configuration for the ``lerobot-rollout`` CLI. + + Combines hardware, policy, strategy, and runtime settings. The + ``__post_init__`` method performs fail-fast validation to reject + invalid flag combinations early. + """ + + # Hardware + robot: RobotConfig | None = None + teleop: TeleoperatorConfig | None = None + + # Policy (loaded from --policy.path via __post_init__) + policy: PreTrainedConfig | None = None + + # Strategy (polymorphic: --strategy.type=base|sentry|highlight|dagger) + strategy: RolloutStrategyConfig = field(default_factory=BaseStrategyConfig) + + # Inference backend (polymorphic: --inference.type=sync|rtc) + inference: InferenceEngineConfig = field(default_factory=SyncInferenceConfig) + + # Dataset (required for sentry, highlight, dagger; None for base) + dataset: DatasetRecordConfig | None = None + + # Runtime + fps: float = 30.0 + duration: float = 0.0 # 0 = infinite (24/7 mode) + interpolation_multiplier: int = 1 + device: str | None = None + task: str = "" + display_data: bool = False + # Display data on a remote Rerun server + display_ip: str | None = None + # Port of the remote Rerun server + display_port: int | None = None + # Whether to display compressed images in Rerun + display_compressed_images: bool = False + # Use vocal synthesis to read events + play_sounds: bool = True + resume: bool = False + # Rename map for mapping robot/dataset observation keys to policy keys + rename_map: dict[str, str] = field(default_factory=dict) + + # Hardware teardown + # When True (default), smoothly interpolate the robot back to the joint + # positions captured at startup before disconnecting. Set to False to + # leave the robot in its final achieved pose at shutdown. + return_to_initial_position: bool = True + + # Torch compile + use_torch_compile: bool = False + torch_compile_backend: str = "inductor" + torch_compile_mode: str = "default" + compile_warmup_inferences: int = 2 + + def __post_init__(self): + """Validate config invariants and load the policy config from ``--policy.path``.""" + # --- Strategy-specific validation --- + if isinstance(self.strategy, DAggerStrategyConfig) and self.teleop is None: + raise ValueError("DAgger strategy requires --teleop.type to be set") + + # TODO(Steven): DAgger shouldn't require a dataset (user may want to just rollout+intervene without recording), but for now we require it to simplify the implementation. + needs_dataset = isinstance( + self.strategy, (SentryStrategyConfig, HighlightStrategyConfig, DAggerStrategyConfig) + ) + if needs_dataset and (self.dataset is None or not self.dataset.repo_id): + raise ValueError(f"{self.strategy.type} strategy requires --dataset.repo_id to be set") + + if isinstance(self.strategy, BaseStrategyConfig) and self.dataset is not None: + raise ValueError( + "Base strategy does not record data. Use sentry, highlight, or dagger for recording." + ) + + # Sentry MUST use streaming encoding to avoid disk I/O blocking the control loop + if ( + isinstance(self.strategy, SentryStrategyConfig) + and self.dataset is not None + and not self.dataset.streaming_encoding + ): + logger.warning("Sentry mode forces streaming_encoding=True") + self.dataset.streaming_encoding = True + + # Highlight writes frames while the policy is still running, so streaming is mandatory. + if ( + isinstance(self.strategy, HighlightStrategyConfig) + and self.dataset is not None + and not self.dataset.streaming_encoding + ): + logger.warning("Highlight mode forces streaming_encoding=True") + self.dataset.streaming_encoding = True + + # DAgger: streaming is mandatory only when the autonomous phase is also recorded. + if isinstance(self.strategy, DAggerStrategyConfig) and self.dataset is not None: + if self.strategy.record_autonomous and not self.dataset.streaming_encoding: + logger.warning("DAgger with record_autonomous=True forces streaming_encoding=True") + self.dataset.streaming_encoding = True + elif not self.strategy.record_autonomous and not self.dataset.streaming_encoding: + logger.info( + "Streaming encoding is disabled for DAgger corrections-only mode. " + "Consider enabling it for faster episode saving: " + "--dataset.streaming_encoding=true --dataset.encoder_threads=2" + ) + + # DAgger: resolve num_episodes from dataset config when not explicitly set. + if isinstance(self.strategy, DAggerStrategyConfig) and self.strategy.num_episodes is None: + if self.dataset is not None: + self.strategy.num_episodes = self.dataset.num_episodes + logger.info( + "DAgger num_episodes not set — using --dataset.num_episodes=%d", + self.strategy.num_episodes, + ) + else: + raise ValueError( + "DAgger num_episodes must be set either via --strategy.num_episodes or --dataset.num_episodes" + ) + + # --- Policy loading --- + if self.robot is None: + raise ValueError("--robot.type is required for rollout") + + policy_path = parser.get_path_arg("policy") + if policy_path: + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.pretrained_path = policy_path + if self.policy is None: + raise ValueError("--policy.path is required for rollout") + + # --- Task resolution --- + # When any --dataset.* flag is passed, draccus creates a DatasetRecordConfig with single_task="". + # If the user set the task via the top-level --task flag, propagate it so that all + # downstream consumers (inference engine, dataset frame builders) see it. + if self.dataset is not None and not self.dataset.single_task and self.task: + logger.info("Propagating top-level task '%s' to dataset config", self.task) + self.dataset.single_task = self.task + elif self.dataset is not None and self.dataset.single_task and not self.task: + logger.info("Propagating dataset single_task '%s' to top-level task", self.dataset.single_task) + self.task = self.dataset.single_task + + # --- Device resolution --- + # Resolve device from the policy config when not explicitly set so all + # components (policy.to, preprocessor, inference engine) use the same + # device string instead of inconsistent fallbacks. + if self.device is None or not is_torch_device_available(self.device): + resolved = self.policy.device + if resolved: + self.device = resolved + logger.info("Resolved device from policy config: %s", self.device) + else: + self.device = auto_select_torch_device().type + logger.info("No policy config to resolve device from; auto-selected device: %s", self.device) + + @classmethod + def __get_path_fields__(cls) -> list[str]: + return ["policy"] diff --git a/src/lerobot/rollout/context.py b/src/lerobot/rollout/context.py new file mode 100644 index 000000000..8804cd789 --- /dev/null +++ b/src/lerobot/rollout/context.py @@ -0,0 +1,454 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rollout context: shared state created once before strategy dispatch. + +Grouped into five topical sub-contexts — :class:`RuntimeContext`, +:class:`HardwareContext`, :class:`PolicyContext`, :class:`ProcessorContext`, +and :class:`DatasetContext` — assembled into :class:`RolloutContext`. +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass, field +from threading import Event + +import torch + +from lerobot.configs import FeatureType +from lerobot.datasets import ( + LeRobotDataset, + aggregate_pipeline_dataset_features, + create_initial_features, +) +from lerobot.policies import get_policy_class, make_pre_post_processors +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.processor import ( + PolicyProcessorPipeline, + RobotAction, + RobotObservation, + RobotProcessorPipeline, + make_default_processors, + rename_stats, +) +from lerobot.processor.relative_action_processor import RelativeActionsProcessorStep +from lerobot.robots import make_robot_from_config +from lerobot.teleoperators import Teleoperator, make_teleoperator_from_config +from lerobot.utils.feature_utils import combine_feature_dicts, hw_to_dataset_features + +from .configs import BaseStrategyConfig, DAggerStrategyConfig, RolloutConfig +from .inference import ( + InferenceEngine, + RTCInferenceConfig, + SyncInferenceConfig, + create_inference_engine, +) +from .robot_wrapper import ThreadSafeRobot + +logger = logging.getLogger(__name__) + + +def _resolve_action_key_order( + policy_action_names: list[str] | None, dataset_action_names: list[str] +) -> list[str]: + """Choose action name ordering for mapping policy tensor outputs to robot action dicts.""" + if not policy_action_names: + return dataset_action_names + policy_action_names = list(policy_action_names) + if len(policy_action_names) != len(dataset_action_names): + logger.warning( + "policy.action_feature_names length (%d) != dataset action dim (%d); using dataset order", + len(policy_action_names), + len(dataset_action_names), + ) + return dataset_action_names + if set(dataset_action_names) != set(policy_action_names): + logger.warning("policy.action_feature_names keys don't match dataset; using dataset order") + return dataset_action_names + return policy_action_names + + +# --------------------------------------------------------------------------- +# Sub-contexts +# --------------------------------------------------------------------------- + + +@dataclass +class RuntimeContext: + """Runtime knobs shared with every strategy.""" + + cfg: RolloutConfig + shutdown_event: Event + + +@dataclass +class HardwareContext: + """Connected hardware. + + The raw robot is available via ``robot_wrapper.inner`` when needed + (e.g. for disconnect); strategies should otherwise go through the + thread-safe wrapper. + + ``initial_position`` stores the robot's joint positions at connect + time. Strategies use it to return the robot to a safe pose before + shutting down. + """ + + robot_wrapper: ThreadSafeRobot + teleop: Teleoperator | None + initial_position: dict | None = None + + +@dataclass +class PolicyContext: + """Loaded policy and its inference engine.""" + + policy: PreTrainedPolicy + preprocessor: PolicyProcessorPipeline + postprocessor: PolicyProcessorPipeline + inference: InferenceEngine + + +@dataclass +class ProcessorContext: + """Robot-side pipelines (run outside the policy).""" + + teleop_action_processor: RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction] + robot_action_processor: RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction] + robot_observation_processor: RobotProcessorPipeline[RobotObservation, RobotObservation] + + +@dataclass +class DatasetContext: + """Dataset and feature bookkeeping.""" + + dataset: LeRobotDataset | None + dataset_features: dict = field(default_factory=dict) + hw_features: dict = field(default_factory=dict) + ordered_action_keys: list[str] = field(default_factory=list) + + +@dataclass +class RolloutContext: + """Bundle of sub-contexts passed to every rollout strategy. + + Built once by :func:`build_rollout_context` before strategy dispatch. + """ + + runtime: RuntimeContext + hardware: HardwareContext + policy: PolicyContext + processors: ProcessorContext + data: DatasetContext + + +# --------------------------------------------------------------------------- +# Build +# --------------------------------------------------------------------------- + + +def build_rollout_context( + cfg: RolloutConfig, + shutdown_event: Event, + teleop_action_processor: RobotProcessorPipeline | None = None, + robot_action_processor: RobotProcessorPipeline | None = None, + robot_observation_processor: RobotProcessorPipeline | None = None, +) -> RolloutContext: + """Wire up policy, processors, hardware, dataset, and inference engine. + + The order is policy-first / hardware-last so a bad ``--policy.path`` + fails fast without touching the robot. + """ + is_rtc = isinstance(cfg.inference, RTCInferenceConfig) + + # --- 1. Policy (heavy I/O, but no hardware yet) ------------------- + logger.info("Loading policy from '%s'...", cfg.policy.pretrained_path) + policy_config = cfg.policy + policy_class = get_policy_class(policy_config.type) + + if hasattr(policy_config, "compile_model"): + policy_config.compile_model = cfg.use_torch_compile + + if policy_config.type == "vqbet" and cfg.device == "mps": + raise NotImplementedError( + "Current implementation of VQBeT does not support `mps` backend. " + "Please use `cpu` or `cuda` backend." + ) + + if policy_config.use_peft: + from peft import PeftConfig, PeftModel + + peft_path = policy_config.pretrained_path + peft_config = PeftConfig.from_pretrained(peft_path) + policy = policy_class.from_pretrained( + pretrained_name_or_path=peft_config.base_model_name_or_path, config=policy_config + ) + policy = PeftModel.from_pretrained(policy, peft_path, config=peft_config) + else: + policy = policy_class.from_pretrained(policy_config.pretrained_path, config=policy_config) + + if is_rtc: + policy.config.rtc_config = cfg.inference.rtc + if hasattr(policy, "init_rtc_processor"): + policy.init_rtc_processor() + + policy = policy.to(cfg.device) + policy.eval() + logger.info("Policy loaded: type=%s, device=%s", policy_config.type, cfg.device) + + if cfg.use_torch_compile and policy.type not in ("pi0", "pi05"): + try: + if hasattr(torch, "compile"): + compile_kwargs = { + "backend": cfg.torch_compile_backend, + "mode": cfg.torch_compile_mode, + "options": {"triton.cudagraphs": False}, + } + policy.predict_action_chunk = torch.compile(policy.predict_action_chunk, **compile_kwargs) + logger.info("torch.compile applied to predict_action_chunk") + except Exception as e: + logger.warning("Failed to apply torch.compile: %s", e) + + # --- 2. Robot-side processors (user-supplied or defaults) -------- + if ( + teleop_action_processor is None + or robot_action_processor is None + or robot_observation_processor is None + ): + _t, _r, _o = make_default_processors() + teleop_action_processor = teleop_action_processor or _t + robot_action_processor = robot_action_processor or _r + robot_observation_processor = robot_observation_processor or _o + + # --- 3. Hardware (heaviest side-effect, deferred) ----------------- + logger.info("Connecting robot (%s)...", cfg.robot.type if cfg.robot else "?") + robot = make_robot_from_config(cfg.robot) + robot.connect() + logger.info("Robot connected: %s", robot.name) + + # Store the initial joint positions so we can return to a safe pose on shutdown. + initial_obs = robot.get_observation() + initial_position = {k: v for k, v in initial_obs.items() if k.endswith(".pos")} + logger.info("Captured initial robot position (%d keys)", len(initial_position)) + + robot_wrapper = ThreadSafeRobot(robot) + + teleop = None + if cfg.teleop is not None: + logger.info("Connecting teleoperator (%s)...", cfg.teleop.type if cfg.teleop else "?") + teleop = make_teleoperator_from_config(cfg.teleop) + teleop.connect() + logger.info("Teleoperator connected") + + # TODO(Steven): once Teleoperator motor-control methods are standardised + # (``enable_torque`` / ``disable_torque`` / ``write_goal_positions``), gate + # the DAgger strategy on their presence here and fail fast with a helpful + # message instead of relying on the operator to pre-align the leader by + # hand. See :func:`DAggerStrategy._apply_transition` for the matching + # disabled call sites. + # if isinstance(cfg.strategy, DAggerStrategyConfig) and teleop is not None: + # required_teleop_methods = ("enable_torque", "disable_torque", "write_goal_positions") + # missing = [m for m in required_teleop_methods if not callable(getattr(teleop, m, None))] + # if missing: + # teleop.disconnect() + # raise ValueError( + # f"DAgger strategy requires a teleoperator with motor control methods " + # f"{required_teleop_methods}. '{type(teleop).__name__}' is missing: {missing}" + # ) + + # --- 4. Features + action-key reconciliation --------------------- + # TODO(Steven):Only ``.pos`` joint features are routed to the policy as state and as the + # action target; velocity and torque channels (when present) are kept in + # the raw observation but excluded from the policy-facing tensors. + all_obs_features = robot.observation_features + # ``observation_features`` values are either a tuple (camera shape) or the + # ``float`` type itself used as a sentinel for scalar motor features — + # see ``dict[str, type | tuple]`` annotation on ``Robot.observation_features``. + observation_features_hw = { + k: v + for k, v in all_obs_features.items() + if isinstance(v, tuple) or (v is float and k.endswith(".pos")) + } + action_features_hw = {k: v for k, v in robot.action_features.items() if k.endswith(".pos")} + + # The action side is always needed: sync inference reads action names from + # ``dataset_features[ACTION]`` to map policy tensors back to robot actions. + action_dataset_features = aggregate_pipeline_dataset_features( + pipeline=teleop_action_processor, + initial_features=create_initial_features(action=action_features_hw), + use_videos=cfg.dataset.video if cfg.dataset else True, + ) + # Observation-side aggregation is needed because of build_dataset_frame + observation_dataset_features = aggregate_pipeline_dataset_features( + pipeline=robot_observation_processor, + initial_features=create_initial_features(observation=observation_features_hw), + use_videos=cfg.dataset.video if cfg.dataset else True, + ) + dataset_features = combine_feature_dicts(action_dataset_features, observation_dataset_features) + hw_features = hw_to_dataset_features(observation_features_hw, "observation") + raw_action_keys = list(action_features_hw.keys()) + policy_action_names = getattr(policy_config, "action_feature_names", None) + ordered_action_keys = _resolve_action_key_order( + list(policy_action_names) if policy_action_names else None, + raw_action_keys, + ) + + # Validate visual features if no rename_map is active + rename_map = cfg.rename_map + if not rename_map: + expected_visuals = { + k for k, v in policy_config.input_features.items() if v.type == FeatureType.VISUAL + } + provided_visuals = { + f"observation.images.{k}" for k, v in robot.observation_features.items() if isinstance(v, tuple) + } + policy_subset = expected_visuals.issubset(provided_visuals) + hw_subset = provided_visuals.issubset(expected_visuals) + if not (policy_subset or hw_subset): + raise ValueError( + f"Visual feature mismatch between policy and robot hardware.\n" + f"Policy expects: {expected_visuals}\n" + f"Robot provides: {provided_visuals}" + ) + + # --- 5. Dataset ------------- + dataset = None + if cfg.dataset is not None and not isinstance(cfg.strategy, BaseStrategyConfig): + logger.info("Setting up dataset (repo_id=%s)...", cfg.dataset.repo_id) + if cfg.resume: + dataset = LeRobotDataset.resume( + cfg.dataset.repo_id, + root=cfg.dataset.root, + batch_encoding_size=cfg.dataset.video_encoding_batch_size, + vcodec=cfg.dataset.vcodec, + streaming_encoding=cfg.dataset.streaming_encoding, + encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, + encoder_threads=cfg.dataset.encoder_threads, + image_writer_processes=cfg.dataset.num_image_writer_processes, + image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera + * len(robot.cameras if hasattr(robot, "cameras") else []), + ) + else: + if isinstance(cfg.strategy, DAggerStrategyConfig): + dataset_features["intervention"] = { + "dtype": "bool", + "shape": (1,), + "names": None, + } + + repo_name = cfg.dataset.repo_id.split("/", 1)[-1] + if not repo_name.startswith("rollout_"): + raise ValueError( + "Dataset names for rollout must start with 'rollout_'. " + "Use --dataset.repo_id=/rollout_ for policy deployment datasets." + ) + cfg.dataset.stamp_repo_id() + target_video_mb = getattr(cfg.strategy, "target_video_file_size_mb", None) + dataset = LeRobotDataset.create( + cfg.dataset.repo_id, + cfg.dataset.fps, + root=cfg.dataset.root, + robot_type=robot.name, + features=dataset_features, + use_videos=cfg.dataset.video, + image_writer_processes=cfg.dataset.num_image_writer_processes, + image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera + * len(robot.cameras if hasattr(robot, "cameras") else []), + batch_encoding_size=cfg.dataset.video_encoding_batch_size, + vcodec=cfg.dataset.vcodec, + streaming_encoding=cfg.dataset.streaming_encoding, + encoder_queue_maxsize=cfg.dataset.encoder_queue_maxsize, + encoder_threads=cfg.dataset.encoder_threads, + video_files_size_in_mb=target_video_mb, + ) + + if dataset is not None: + logger.info("Dataset ready: %s (%d existing episodes)", dataset.repo_id, dataset.num_episodes) + + # --- 6. Policy pre/post processors (needs dataset stats if any) --- + dataset_stats = None + if dataset is not None: + dataset_stats = rename_stats( + dataset.meta.stats, + cfg.rename_map, + ) + + preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=policy_config, + pretrained_path=cfg.policy.pretrained_path, + dataset_stats=dataset_stats, + preprocessor_overrides={ + "device_processor": {"device": cfg.device}, + "rename_observations_processor": {"rename_map": cfg.rename_map}, + }, + ) + + if isinstance(cfg.inference, SyncInferenceConfig) and any( + isinstance(step, RelativeActionsProcessorStep) and step.enabled + for step in getattr(preprocessor, "steps", ()) + ): + raise NotImplementedError( + "SyncInferenceEngine does not support policies with relative actions for now." + "Use --inference.type=rtc or remove relative action processor steps from the policy pipeline." + ) + + # --- 7. Inference strategy (needs policy + pre/post + hardware) -- + logger.info( + "Creating inference engine (type=%s)...", + cfg.inference.type if hasattr(cfg.inference, "type") else "sync", + ) + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + inference_strategy = create_inference_engine( + cfg.inference, + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + robot_wrapper=robot_wrapper, + hw_features=hw_features, + dataset_features=dataset_features, + ordered_action_keys=ordered_action_keys, + task=task_str, + fps=cfg.fps, + device=cfg.device, + use_torch_compile=cfg.use_torch_compile, + compile_warmup_inferences=cfg.compile_warmup_inferences, + shutdown_event=shutdown_event, + ) + + # --- 8. Assemble --------------------------------------------------- + logger.info("Rollout context assembled successfully") + return RolloutContext( + runtime=RuntimeContext(cfg=cfg, shutdown_event=shutdown_event), + hardware=HardwareContext( + robot_wrapper=robot_wrapper, teleop=teleop, initial_position=initial_position + ), + policy=PolicyContext( + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + inference=inference_strategy, + ), + processors=ProcessorContext( + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, + ), + data=DatasetContext( + dataset=dataset, + dataset_features=dataset_features, + hw_features=hw_features, + ordered_action_keys=ordered_action_keys, + ), + ) diff --git a/src/lerobot/rollout/inference/__init__.py b/src/lerobot/rollout/inference/__init__.py new file mode 100644 index 000000000..b61cb342c --- /dev/null +++ b/src/lerobot/rollout/inference/__init__.py @@ -0,0 +1,39 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Inference engine package — backend-agnostic action production. + +Concrete backends (``sync``, ``rtc``, ...) expose the same small interface so +rollout strategies never branch on which backend is in use. +""" + +from .base import InferenceEngine +from .factory import ( + InferenceEngineConfig, + RTCInferenceConfig, + SyncInferenceConfig, + create_inference_engine, +) +from .rtc import RTCInferenceEngine +from .sync import SyncInferenceEngine + +__all__ = [ + "InferenceEngine", + "InferenceEngineConfig", + "RTCInferenceConfig", + "RTCInferenceEngine", + "SyncInferenceConfig", + "SyncInferenceEngine", + "create_inference_engine", +] diff --git a/src/lerobot/rollout/inference/base.py b/src/lerobot/rollout/inference/base.py new file mode 100644 index 000000000..f269aa5fe --- /dev/null +++ b/src/lerobot/rollout/inference/base.py @@ -0,0 +1,89 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Inference engine ABC. + +Rollout strategies consume actions through this small interface so they +do not need to know whether inference happens inline on the control thread +or asynchronously in a background thread (RTC). +""" + +from __future__ import annotations + +import abc + +import torch + + +class InferenceEngine(abc.ABC): + """Abstract backend for producing actions during rollout. + + Subclasses decide whether inference happens inline on the control + thread or asynchronously in a background thread. The contract is + minimal so additional backends can be plugged in without touching + rollout strategies. + + Lifecycle + --------- + ``start`` — prepare the backend (e.g. launch a background thread). + ``stop`` — shut the backend down cleanly. + ``reset`` — clear episode-scoped state (policy hidden state, queues…). + + Action production + ----------------- + ``get_action(obs_frame)`` — return the next action tensor, or + ``None`` if none is available (e.g. async queue empty). Sync + backends always compute from ``obs_frame``; async backends ignore + it (they receive observations via ``notify_observation``). + + Optional hooks + -------------- + ``notify_observation`` / ``pause`` / ``resume`` have a no-op default + so rollout strategies can invoke them unconditionally. + """ + + @abc.abstractmethod + def start(self) -> None: + """Initialise the backend.""" + + @abc.abstractmethod + def stop(self) -> None: + """Tear the backend down.""" + + @abc.abstractmethod + def reset(self) -> None: + """Clear episode-scoped state.""" + + @abc.abstractmethod + def get_action(self, obs_frame: dict | None) -> torch.Tensor | None: + """Return the next action tensor, or ``None`` if unavailable.""" + + def notify_observation(self, obs: dict) -> None: # noqa: B027 + """Publish the latest processed observation. Default: no-op.""" + + def pause(self) -> None: # noqa: B027 + """Pause background inference. Default: no-op.""" + + def resume(self) -> None: # noqa: B027 + """Resume background inference. Default: no-op.""" + + @property + def ready(self) -> bool: + """True once the backend can produce actions (e.g. warmup done).""" + return True + + @property + def failed(self) -> bool: + """True if an unrecoverable error occurred in the backend.""" + return False diff --git a/src/lerobot/rollout/inference/factory.py b/src/lerobot/rollout/inference/factory.py new file mode 100644 index 000000000..e600bed63 --- /dev/null +++ b/src/lerobot/rollout/inference/factory.py @@ -0,0 +1,128 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Inference engine configs and factory. + +Selection is explicit via ``--inference.type=sync|rtc``. Adding a new +backend requires registering its config subclass and dispatching it in +:func:`create_inference_engine`. +""" + +from __future__ import annotations + +import abc +import logging +from dataclasses import dataclass, field +from threading import Event + +import draccus + +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.processor import PolicyProcessorPipeline + +from ..robot_wrapper import ThreadSafeRobot +from .base import InferenceEngine +from .rtc import RTCInferenceEngine +from .sync import SyncInferenceEngine + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Configs +# --------------------------------------------------------------------------- + + +@dataclass +class InferenceEngineConfig(draccus.ChoiceRegistry, abc.ABC): + """Abstract base for inference backend configuration. + + Use ``--inference.type=`` on the CLI to select a backend. + """ + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +@InferenceEngineConfig.register_subclass("sync") +@dataclass +class SyncInferenceConfig(InferenceEngineConfig): + """Inline synchronous inference (one policy call per control tick).""" + + +@InferenceEngineConfig.register_subclass("rtc") +@dataclass +class RTCInferenceConfig(InferenceEngineConfig): + """Real-Time Chunking: async policy inference in a background thread.""" + + # Eagerly constructed so draccus exposes nested fields directly on the CLI + # (e.g. ``--inference.rtc.execution_horizon=...``). + rtc: RTCConfig = field(default_factory=RTCConfig) + queue_threshold: int = 30 + + +# --------------------------------------------------------------------------- +# Factory +# --------------------------------------------------------------------------- + + +def create_inference_engine( + config: InferenceEngineConfig, + *, + policy: PreTrainedPolicy, + preprocessor: PolicyProcessorPipeline, + postprocessor: PolicyProcessorPipeline, + robot_wrapper: ThreadSafeRobot, + hw_features: dict, + dataset_features: dict, + ordered_action_keys: list[str], + task: str, + fps: float, + device: str | None, + use_torch_compile: bool = False, + compile_warmup_inferences: int = 2, + shutdown_event: Event | None = None, +) -> InferenceEngine: + """Instantiate the appropriate inference engine from a config object.""" + logger.info("Creating inference engine: %s", config.type) + if isinstance(config, SyncInferenceConfig): + return SyncInferenceEngine( + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + dataset_features=dataset_features, + ordered_action_keys=ordered_action_keys, + task=task, + device=device, + robot_type=robot_wrapper.robot_type, + ) + if isinstance(config, RTCInferenceConfig): + return RTCInferenceEngine( + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + robot_wrapper=robot_wrapper, + rtc_config=config.rtc, + hw_features=hw_features, + task=task, + fps=fps, + device=device, + use_torch_compile=use_torch_compile, + compile_warmup_inferences=compile_warmup_inferences, + rtc_queue_threshold=config.queue_threshold, + shutdown_event=shutdown_event, + ) + raise ValueError(f"Unknown inference engine type: {type(config).__name__}") diff --git a/src/lerobot/rollout/inference/rtc.py b/src/lerobot/rollout/inference/rtc.py new file mode 100644 index 000000000..0eef62cef --- /dev/null +++ b/src/lerobot/rollout/inference/rtc.py @@ -0,0 +1,360 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Real-Time Chunking inference engine. + +A background thread produces action chunks asynchronously via +:meth:`policy.predict_action_chunk`. The main control loop polls +``get_action`` for the next ready action; observations flow the other +way via ``notify_observation``. +""" + +from __future__ import annotations + +import logging +import math +import time +import traceback +from threading import Event, Lock, Thread +from typing import Any + +import torch + +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.rtc import ActionQueue, LatencyTracker, reanchor_relative_rtc_prefix +from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.policies.utils import prepare_observation_for_inference +from lerobot.processor import ( + NormalizerProcessorStep, + PolicyProcessorPipeline, + RelativeActionsProcessorStep, +) +from lerobot.utils.feature_utils import build_dataset_frame + +from ..robot_wrapper import ThreadSafeRobot +from .base import InferenceEngine + +logger = logging.getLogger(__name__) + +# How long the RTC loop sleeps when paused, idle, or backpressured by a full queue. +_RTC_IDLE_SLEEP_S: float = 0.01 +# Backoff between transient inference errors (per consecutive failure). +_RTC_ERROR_RETRY_DELAY_S: float = 0.5 +# Consecutive transient errors tolerated before giving up and propagating shutdown. +_RTC_MAX_CONSECUTIVE_ERRORS: int = 10 +# Hard timeout for joining the RTC thread on stop(). +_RTC_JOIN_TIMEOUT_S: float = 3.0 + + +# --------------------------------------------------------------------------- +# RTC helpers +# --------------------------------------------------------------------------- + + +def _normalize_prev_actions_length(prev_actions: torch.Tensor, target_steps: int) -> torch.Tensor: + """Pad or truncate RTC prefix actions to a fixed length for stable compiled inference.""" + if prev_actions.ndim != 2: + raise ValueError(f"Expected 2D [T, A] tensor, got shape={tuple(prev_actions.shape)}") + steps, action_dim = prev_actions.shape + if steps == target_steps: + return prev_actions + if steps > target_steps: + return prev_actions[:target_steps] + padded = torch.zeros((target_steps, action_dim), dtype=prev_actions.dtype, device=prev_actions.device) + padded[:steps] = prev_actions + return padded + + +# --------------------------------------------------------------------------- +# RTCInferenceEngine +# --------------------------------------------------------------------------- + + +class RTCInferenceEngine(InferenceEngine): + """Async RTC inference: a background thread produces action chunks. + + ``get_action`` pops the next action from the shared queue (or + returns ``None`` if the queue is empty). The main loop should call + ``notify_observation`` every tick and ``pause``/``resume`` around + human-intervention phases. + """ + + def __init__( + self, + policy: PreTrainedPolicy, + preprocessor: PolicyProcessorPipeline, + postprocessor: PolicyProcessorPipeline, + robot_wrapper: ThreadSafeRobot, + rtc_config: RTCConfig, + hw_features: dict, + task: str, + fps: float, + device: str | None, + use_torch_compile: bool = False, + compile_warmup_inferences: int = 2, + rtc_queue_threshold: int = 30, + shutdown_event: Event | None = None, + ) -> None: + self._policy = policy + self._preprocessor = preprocessor + self._postprocessor = postprocessor + self._robot = robot_wrapper + self._rtc_config = rtc_config + self._hw_features = hw_features + self._task = task + self._fps = fps + self._device = device or "cpu" + self._use_torch_compile = use_torch_compile + self._compile_warmup_inferences = compile_warmup_inferences + self._rtc_queue_threshold = rtc_queue_threshold + + self._action_queue: ActionQueue | None = None + self._obs_holder: dict[str, Any] = {} + self._obs_lock = Lock() + self._policy_active = Event() + self._compile_warmup_done = Event() + self._shutdown_event = Event() + self._rtc_error = Event() + self._global_shutdown_event = shutdown_event + self._rtc_thread: Thread | None = None + + if not self._use_torch_compile: + self._compile_warmup_done.set() + logger.info("RTCInferenceEngine initialized (torch.compile disabled, no warmup needed)") + else: + logger.info( + "RTCInferenceEngine initialized (torch.compile enabled, %d warmup inferences)", + compile_warmup_inferences, + ) + + # Processor introspection for relative-action re-anchoring. + self._relative_step = next( + (s for s in preprocessor.steps if isinstance(s, RelativeActionsProcessorStep) and s.enabled), + None, + ) + self._normalizer_step = next( + (s for s in preprocessor.steps if isinstance(s, NormalizerProcessorStep)), + None, + ) + if self._relative_step is not None: + if self._relative_step.action_names is None: + cfg_names = getattr(policy.config, "action_feature_names", None) + if cfg_names: + self._relative_step.action_names = list(cfg_names) + else: + self._relative_step.action_names = [ + k for k in robot_wrapper.action_features if k.endswith(".pos") + ] + logger.info("Relative actions enabled: RTC prefix will be re-anchored") + + # ------------------------------------------------------------------ + # Lifecycle + # ------------------------------------------------------------------ + + @property + def ready(self) -> bool: + """True once torch.compile warmup is complete (or immediately if compile is disabled).""" + return self._compile_warmup_done.is_set() + + @property + def failed(self) -> bool: + """True if the RTC background thread exited due to an unrecoverable error.""" + return self._rtc_error.is_set() + + @property + def action_queue(self) -> ActionQueue | None: + """The shared action queue between the RTC thread and the main loop.""" + return self._action_queue + + def start(self) -> None: + """Launch the RTC background thread.""" + self._action_queue = ActionQueue(self._rtc_config) + self._obs_holder = { + "obs": None, + "robot_type": self._robot.robot_type, + } + self._shutdown_event.clear() + self._rtc_thread = Thread( + target=self._rtc_loop, + daemon=True, + name="RTCInference", + ) + self._rtc_thread.start() + logger.info("RTC inference thread started") + + def stop(self) -> None: + """Signal the RTC thread to stop and wait for it.""" + logger.info("Stopping RTC inference thread...") + self._shutdown_event.set() + self._policy_active.clear() + if self._rtc_thread is not None and self._rtc_thread.is_alive(): + self._rtc_thread.join(timeout=_RTC_JOIN_TIMEOUT_S) + if self._rtc_thread.is_alive(): + logger.warning("RTC thread did not join within %.1fs", _RTC_JOIN_TIMEOUT_S) + else: + logger.info("RTC inference thread stopped") + self._rtc_thread = None + + def pause(self) -> None: + """Pause the RTC background thread.""" + logger.info("Pausing RTC inference thread") + self._policy_active.clear() + + def resume(self) -> None: + """Resume the RTC background thread.""" + logger.info("Resuming RTC inference thread") + self._policy_active.set() + + def reset(self) -> None: + """Reset the policy, processors, and action queue.""" + logger.info("Resetting RTC inference state (policy + processors + queue)") + self._policy.reset() + self._preprocessor.reset() + self._postprocessor.reset() + if self._action_queue is not None: + self._action_queue.clear() + + # ------------------------------------------------------------------ + # Action production (called from main thread) + # ------------------------------------------------------------------ + + def get_action(self, obs_frame: dict | None) -> torch.Tensor | None: + """Pop the next action from the RTC queue (ignores ``obs_frame``).""" + if self._action_queue is None: + return None + return self._action_queue.get() + + def notify_observation(self, obs: dict) -> None: + """Publish the latest observation for the RTC thread to consume.""" + with self._obs_lock: + self._obs_holder["obs"] = obs + + # ------------------------------------------------------------------ + # RTC: background inference thread + # ------------------------------------------------------------------ + + def _rtc_loop(self) -> None: + """Background thread that generates action chunks via RTC.""" + try: + latency_tracker = LatencyTracker() + time_per_chunk = 1.0 / self._fps + policy_device = torch.device(self._device) + + warmup_required = max(1, self._compile_warmup_inferences) if self._use_torch_compile else 0 + inference_count = 0 + consecutive_errors = 0 + + while not self._shutdown_event.is_set(): + if not self._policy_active.is_set(): + time.sleep(_RTC_IDLE_SLEEP_S) + continue + + queue = self._action_queue + with self._obs_lock: + obs = self._obs_holder.get("obs") + if queue is None or obs is None: + time.sleep(_RTC_IDLE_SLEEP_S) + continue + + if queue.qsize() <= self._rtc_queue_threshold: + try: + current_time = time.perf_counter() + idx_before = queue.get_action_index() + prev_actions = queue.get_left_over() + + latency = latency_tracker.max() + delay = math.ceil(latency / time_per_chunk) if latency else 0 + + obs_batch = build_dataset_frame(self._hw_features, obs, prefix="observation") + obs_batch = prepare_observation_for_inference( + obs_batch, policy_device, self._task, self._robot.robot_type + ) + obs_batch["task"] = [self._task] + + preprocessed = self._preprocessor(obs_batch) + + if prev_actions is not None and self._relative_step is not None: + # Rebase against the raw cached state so the leftover tail stays in + # the training-time coordinate frame. + raw_state = self._relative_step.get_cached_state() + if raw_state is not None: + prev_abs = queue.get_processed_left_over() + if prev_abs is not None and prev_abs.numel() > 0: + prev_actions = reanchor_relative_rtc_prefix( + prev_actions_absolute=prev_abs, + current_state=raw_state, + relative_step=self._relative_step, + normalizer_step=self._normalizer_step, + policy_device=policy_device, + ) + + if prev_actions is not None: + prev_actions = _normalize_prev_actions_length( + prev_actions, target_steps=self._rtc_config.execution_horizon + ) + + actions = self._policy.predict_action_chunk( + preprocessed, inference_delay=delay, prev_chunk_left_over=prev_actions + ) + + original = actions.squeeze(0).clone() + processed = self._postprocessor(actions).squeeze(0) + new_latency = time.perf_counter() - current_time + new_delay = math.ceil(new_latency / time_per_chunk) + + inference_count += 1 + consecutive_errors = 0 + is_warmup = self._use_torch_compile and inference_count <= warmup_required + if is_warmup: + latency_tracker.reset() + else: + latency_tracker.add(new_latency) + + queue.merge(original, processed, new_delay, idx_before) + + if ( + is_warmup + and inference_count >= warmup_required + and not self._compile_warmup_done.is_set() + ): + self._compile_warmup_done.set() + logger.info("Compile warmup complete (%d inferences)", inference_count) + + logger.debug("RTC inference latency=%.2fs, queue=%d", new_latency, queue.qsize()) + + except Exception as e: + consecutive_errors += 1 + logger.error( + "RTC inference error (%d/%d): %s", + consecutive_errors, + _RTC_MAX_CONSECUTIVE_ERRORS, + e, + ) + logger.debug(traceback.format_exc()) + if consecutive_errors >= _RTC_MAX_CONSECUTIVE_ERRORS: + # Persistent failure: stop retrying and propagate shutdown. + raise + time.sleep(_RTC_ERROR_RETRY_DELAY_S) + else: + time.sleep(_RTC_IDLE_SLEEP_S) + + except Exception as e: + logger.error("Fatal error in RTC thread: %s", e) + logger.error(traceback.format_exc()) + self._rtc_error.set() + # Unblock any warmup waiters so the main loop doesn't spin forever + self._compile_warmup_done.set() + # Signal the top-level shutdown so strategies exit their control loops + if self._global_shutdown_event is not None: + self._global_shutdown_event.set() diff --git a/src/lerobot/rollout/inference/sync.py b/src/lerobot/rollout/inference/sync.py new file mode 100644 index 000000000..2bb05b6ab --- /dev/null +++ b/src/lerobot/rollout/inference/sync.py @@ -0,0 +1,122 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Synchronous inference engine: inline policy call per control tick.""" + +from __future__ import annotations + +import logging +from contextlib import nullcontext +from copy import copy + +import torch + +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.utils import make_robot_action, prepare_observation_for_inference +from lerobot.processor import PolicyProcessorPipeline + +from .base import InferenceEngine + +logger = logging.getLogger(__name__) + + +# TODO(Steven): support relative-action policies. The per-tick flow refreshes +# ``RelativeActionsProcessorStep._last_state`` every call, so cached chunk +# actions popped on later ticks get reanchored to the *current* robot state and +# absolute targets drift through the chunk. Relative-action policies are +# rejected at context-build time today; RTC postprocesses the whole chunk and +# is unaffected. +# +# Candidate fix: drive the policy via ``predict_action_chunk`` and serve a +# local FIFO of postprocessed actions. Eliminates drift by construction and +# saves per-tick pre/post work, but bypasses ``select_action`` — needs +# fallbacks for SAC (raises), ACT temporal ensembling (ensembler lives in +# ``select_action``), and Diffusion-family (obs-history queues populated as a +# side effect of ``select_action``). + + +class SyncInferenceEngine(InferenceEngine): + """Inline synchronous inference: compute one action per call. + + ``get_action`` runs the full policy pipeline (pre/post-processor + + ``select_action``) on the given observation frame and returns a + CPU action tensor reordered to match the dataset action keys. + """ + + def __init__( + self, + policy: PreTrainedPolicy, + preprocessor: PolicyProcessorPipeline, + postprocessor: PolicyProcessorPipeline, + dataset_features: dict, + ordered_action_keys: list[str], + task: str, + device: str | None, + robot_type: str, + ) -> None: + self._policy = policy + self._preprocessor = preprocessor + self._postprocessor = postprocessor + self._dataset_features = dataset_features + self._ordered_action_keys = ordered_action_keys + self._task = task + self._device = torch.device(device or "cpu") + self._robot_type = robot_type + logger.info( + "SyncInferenceEngine initialized (device=%s, action_keys=%d)", + self._device, + len(ordered_action_keys), + ) + + def start(self) -> None: + """No background resources to start.""" + logger.info("SyncInferenceEngine started (inline mode — no background thread)") + + def stop(self) -> None: + """No background resources to stop.""" + logger.info("SyncInferenceEngine stopped") + + def reset(self) -> None: + """Reset the policy and pre/post-processors.""" + logger.info("Resetting sync inference state (policy + processors)") + self._policy.reset() + self._preprocessor.reset() + self._postprocessor.reset() + + def get_action(self, obs_frame: dict | None) -> torch.Tensor | None: + """Run the full inference pipeline on ``obs_frame`` and return an action tensor.""" + if obs_frame is None: + return None + # Shallow copy is intentional: the caller (`send_next_action`) builds + # ``obs_frame`` fresh per tick via ``build_dataset_frame``, so the + # tensor/array values are not shared with any other reader. + observation = copy(obs_frame) + autocast_ctx = ( + torch.autocast(device_type=self._device.type) + if self._device.type == "cuda" and self._policy.config.use_amp + else nullcontext() + ) + with torch.inference_mode(), autocast_ctx: + observation = prepare_observation_for_inference( + observation, self._device, self._task, self._robot_type + ) + observation = self._preprocessor(observation) + action = self._policy.select_action(observation) + action = self._postprocessor(action) + action_tensor = action.squeeze(0).cpu() + + # Reorder to match dataset action ordering so the caller can treat + # the returned tensor uniformly across backends. + action_dict = make_robot_action(action_tensor, self._dataset_features) + return torch.tensor([action_dict[k] for k in self._ordered_action_keys]) diff --git a/src/lerobot/rollout/ring_buffer.py b/src/lerobot/rollout/ring_buffer.py new file mode 100644 index 000000000..2c0a06301 --- /dev/null +++ b/src/lerobot/rollout/ring_buffer.py @@ -0,0 +1,112 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Memory-bounded ring buffer for the Highlight Reel rollout strategy.""" + +from __future__ import annotations + +from collections import deque + +import numpy as np +import torch + + +class RolloutRingBuffer: + """Fixed-capacity circular buffer for observation/action frames. + + Stores the last *N* seconds of telemetry in memory, bounded by both + time (``max_frames``) and memory (``max_memory_bytes``). When either + limit is reached the oldest frames are evicted. + + .. note:: + This class is **single-threaded**. ``append``/``drain``/``clear`` + must all be called from the same thread (the rollout main loop). + Concurrent access from a background thread will corrupt + ``_current_bytes`` accounting. + + Parameters + ---------- + max_seconds: + Maximum duration of buffered telemetry. + max_memory_mb: + Hard memory cap in MiB. Frames are evicted when the estimated + total size exceeds this. + fps: + Frames per second — used to convert ``max_seconds`` to a frame + count. + """ + + def __init__(self, max_seconds: float = 30.0, max_memory_mb: int = 2048, fps: float = 30.0) -> None: + self._max_frames = int(max_seconds * fps) + self._max_bytes = int(max_memory_mb * 1024 * 1024) + self._buffer: deque[dict] = deque(maxlen=self._max_frames) + self._current_bytes: int = 0 + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + + def append(self, frame: dict) -> None: + """Add *frame* to the buffer, evicting the oldest if at capacity.""" + frame_bytes = _estimate_frame_bytes(frame) + + # Evict oldest frames until we are under the memory cap + while self._current_bytes + frame_bytes > self._max_bytes and self._buffer: + evicted = self._buffer.popleft() + self._current_bytes -= _estimate_frame_bytes(evicted) + + self._buffer.append(frame) + self._current_bytes += frame_bytes + + def drain(self) -> list[dict]: + """Return all buffered frames and clear the buffer.""" + frames = list(self._buffer) + self._buffer.clear() + self._current_bytes = 0 + return frames + + def clear(self) -> None: + """Discard all buffered frames.""" + self._buffer.clear() + self._current_bytes = 0 + + def __len__(self) -> int: + return len(self._buffer) + + @property + def estimated_bytes(self) -> int: + """Estimated total byte size of all buffered frames.""" + return self._current_bytes + + +# ------------------------------------------------------------------ +# Helpers +# ------------------------------------------------------------------ + + +def _estimate_frame_bytes(frame: dict) -> int: + """Rough byte estimate for a single frame dictionary.""" + total = 0 + for v in frame.values(): + if isinstance(v, torch.Tensor): + # ``torch.Tensor`` has no ``nbytes``; compute it explicitly so the + # memory cap is honoured even when frames hold unconverted tensors. + total += v.nelement() * v.element_size() + elif isinstance(v, np.ndarray) or hasattr(v, "nbytes"): + total += v.nbytes + elif isinstance(v, (int, float)): + total += 8 + elif isinstance(v, (str, bytes)): + total += len(v) + return max(total, 1) # avoid zero-size frames diff --git a/src/lerobot/rollout/robot_wrapper.py b/src/lerobot/rollout/robot_wrapper.py new file mode 100644 index 000000000..44f744812 --- /dev/null +++ b/src/lerobot/rollout/robot_wrapper.py @@ -0,0 +1,79 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Thread-safe robot wrapper for concurrent observation/action access.""" + +from __future__ import annotations + +from threading import Lock +from typing import Any + +from lerobot.robots import Robot + + +class ThreadSafeRobot: + """Lock-protected wrapper around a :class:`Robot` for use with background threads. + + When RTC inference runs in a background thread while the main loop + executes actions, both threads may access the robot concurrently. + This wrapper serialises ``get_observation`` and ``send_action`` calls. + + Read-only properties are proxied without the lock since they don't + mutate hardware state. + """ + + def __init__(self, robot: Robot) -> None: + self._robot = robot + self._lock = Lock() + + # -- Lock-protected I/O -------------------------------------------------- + + def get_observation(self) -> dict[str, Any]: + with self._lock: + return self._robot.get_observation() + + def send_action(self, action: dict[str, Any] | Any) -> Any: + with self._lock: + return self._robot.send_action(action) + + # -- Read-only proxies (no lock needed) ----------------------------------- + + @property + def observation_features(self) -> dict: + return self._robot.observation_features + + @property + def action_features(self) -> dict: + return self._robot.action_features + + @property + def name(self) -> str: + return self._robot.name + + @property + def robot_type(self) -> str: + return self._robot.robot_type + + @property + def cameras(self): + return getattr(self._robot, "cameras", {}) + + @property + def is_connected(self) -> bool: + return self._robot.is_connected + + @property + def inner(self) -> Robot: + """Access the underlying robot (e.g. for connect/disconnect).""" + return self._robot diff --git a/src/lerobot/rollout/strategies/__init__.py b/src/lerobot/rollout/strategies/__init__.py new file mode 100644 index 000000000..554327073 --- /dev/null +++ b/src/lerobot/rollout/strategies/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rollout strategies — public API re-exports.""" + +from .base import BaseStrategy +from .core import RolloutStrategy, estimate_max_episode_seconds, safe_push_to_hub, send_next_action +from .dagger import DAggerEvents, DAggerPhase, DAggerStrategy +from .factory import create_strategy +from .highlight import HighlightStrategy +from .sentry import SentryStrategy + +__all__ = [ + "BaseStrategy", + "DAggerEvents", + "DAggerPhase", + "DAggerStrategy", + "HighlightStrategy", + "RolloutStrategy", + "SentryStrategy", + "create_strategy", + "estimate_max_episode_seconds", + "safe_push_to_hub", + "send_next_action", +] diff --git a/src/lerobot/rollout/strategies/base.py b/src/lerobot/rollout/strategies/base.py new file mode 100644 index 000000000..e47b65209 --- /dev/null +++ b/src/lerobot/rollout/strategies/base.py @@ -0,0 +1,85 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base rollout strategy: autonomous policy execution with no data recording.""" + +from __future__ import annotations + +import logging +import time + +from lerobot.utils.robot_utils import precise_sleep + +from ..context import RolloutContext +from .core import RolloutStrategy, send_next_action + +logger = logging.getLogger(__name__) + + +class BaseStrategy(RolloutStrategy): + """Autonomous policy rollout with no data recording. + + All actions flow through the ``robot_action_processor`` pipeline + before reaching the robot. + """ + + def setup(self, ctx: RolloutContext) -> None: + """Initialise the inference engine.""" + self._init_engine(ctx) + logger.info("Base strategy ready") + + def run(self, ctx: RolloutContext) -> None: + """Run the autonomous control loop until shutdown or duration expires.""" + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + interpolator = self._interpolator + + control_interval = interpolator.get_control_interval(cfg.fps) + + start_time = time.perf_counter() + engine.resume() + logger.info("Base strategy control loop started") + + while not ctx.runtime.shutdown_event.is_set(): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + obs = robot.get_observation() + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + def teardown(self, ctx: RolloutContext) -> None: + """Disconnect hardware and stop inference.""" + self._teardown_hardware( + ctx.hardware, + return_to_initial_position=ctx.runtime.cfg.return_to_initial_position, + ) + logger.info("Base strategy teardown complete") diff --git a/src/lerobot/rollout/strategies/core.py b/src/lerobot/rollout/strategies/core.py new file mode 100644 index 000000000..9c897522f --- /dev/null +++ b/src/lerobot/rollout/strategies/core.py @@ -0,0 +1,304 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rollout strategy ABC and shared action-dispatch helper.""" + +from __future__ import annotations + +import abc +import logging +import time +from typing import TYPE_CHECKING + +from lerobot.datasets.utils import DEFAULT_VIDEO_FILE_SIZE_IN_MB +from lerobot.utils.action_interpolator import ActionInterpolator +from lerobot.utils.constants import OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame +from lerobot.utils.robot_utils import precise_sleep +from lerobot.utils.visualization_utils import log_rerun_data + +from ..inference import InferenceEngine + +if TYPE_CHECKING: + from ..configs import RolloutStrategyConfig + from ..context import HardwareContext, ProcessorContext, RolloutContext, RuntimeContext + +logger = logging.getLogger(__name__) + + +class RolloutStrategy(abc.ABC): + """Abstract base for rollout execution strategies. + + Each concrete strategy implements a self-contained control loop with + its own recording/interaction semantics. Strategies are mutually + exclusive — only one runs per session. + """ + + def __init__(self, config: RolloutStrategyConfig) -> None: + self.config = config + self._engine: InferenceEngine | None = None + self._interpolator: ActionInterpolator | None = None + self._warmup_flushed: bool = False + self._cached_obs_processed: dict | None = None + + def _init_engine(self, ctx: RolloutContext) -> None: + """Attach the inference engine and action interpolator, then start the backend. + + Creates an :class:`ActionInterpolator` from the config's + ``interpolation_multiplier`` and starts the inference engine. + Call this from ``setup()`` so strategies share identical + initialisation without duplicating code. + """ + self._interpolator = ActionInterpolator(multiplier=ctx.runtime.cfg.interpolation_multiplier) + self._engine = ctx.policy.inference + logger.info("Starting inference engine...") + self._engine.reset() + self._engine.start() + self._warmup_flushed = False + self._cached_obs_processed = None + logger.info("Inference engine started") + + def _process_observation_and_notify(self, processors: ProcessorContext, obs_raw: dict) -> dict: + """Run the observation processor and notify the engine — throttled to policy ticks. + + Callers are responsible for calling ``robot.get_observation()`` every loop + iteration so ``obs_raw`` stays fresh for the action post-processor. This + helper gates only the comparatively expensive bits — the processor pipeline + and ``engine.notify_observation`` — to fire when the interpolator signals + it needs a new action (once per ``interpolation_multiplier`` ticks). On + interpolated ticks the cached ``obs_processed`` is reused. + + With ``interpolation_multiplier == 1`` this is equivalent to the unthrottled + path: ``needs_new_action()`` is True every tick. + + The cache is implicitly invalidated whenever ``interpolator.reset()`` is + called (warmup completion, DAgger phase transitions back to AUTONOMOUS), + because reset makes ``needs_new_action()`` return True on the next call. + """ + if self._cached_obs_processed is None or self._interpolator.needs_new_action(): + obs_processed = processors.robot_observation_processor(obs_raw) + self._engine.notify_observation(obs_processed) + self._cached_obs_processed = obs_processed + return self._cached_obs_processed + + def _handle_warmup(self, use_torch_compile: bool, loop_start: float, control_interval: float) -> bool: + """Handle torch.compile warmup phase. + + Returns ``True`` if the caller should ``continue`` (still warming + up). On the first post-warmup iteration the engine and + interpolator are reset so stale warmup state is discarded. + """ + engine = self._engine + interpolator = self._interpolator + if not use_torch_compile: + return False + if not engine.ready: + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + return True + if not self._warmup_flushed: + logger.info("Warmup complete — flushing stale state and resuming engine") + engine.reset() + interpolator.reset() + self._warmup_flushed = True + engine.resume() + return False + + def _teardown_hardware(self, hw: HardwareContext, return_to_initial_position: bool = True) -> None: + """Stop the inference engine, optionally return robot to initial position, and disconnect hardware.""" + if self._engine is not None: + logger.info("Stopping inference engine...") + self._engine.stop() + robot = hw.robot_wrapper.inner + if robot.is_connected: + if return_to_initial_position and hw.initial_position: + logger.info("Returning robot to initial position before shutdown...") + self._return_to_initial_position(hw) + elif not return_to_initial_position: + logger.info( + "Skipping return-to-initial-position (disabled by config); leaving robot in final pose." + ) + logger.info("Disconnecting robot...") + robot.disconnect() + teleop = hw.teleop + if teleop is not None and teleop.is_connected: + logger.info("Disconnecting teleoperator...") + teleop.disconnect() + + @staticmethod + def _return_to_initial_position(hw: HardwareContext, duration_s: float = 3.0, fps: int = 50) -> None: + """Smoothly interpolate the robot back to its initial position.""" + robot = hw.robot_wrapper + target = hw.initial_position + try: + current_obs = robot.get_observation() + current_pos = {k: v for k, v in current_obs.items() if k in target} + steps = max(int(duration_s * fps), 1) + for step in range(1, steps + 1): + t = step / steps + interp = {} + for k in current_pos: + interp[k] = current_pos[k] * (1 - t) + target[k] * t + robot.send_action(interp) + precise_sleep(1 / fps) + except Exception as e: + logger.warning("Could not return to initial position: %s", e) + + @staticmethod + def _log_telemetry( + obs_processed: dict | None, + action_dict: dict | None, + runtime_ctx: RuntimeContext, + ) -> None: + """Log observation/action telemetry to Rerun if display_data is enabled.""" + cfg = runtime_ctx.cfg + if not cfg.display_data: + return + log_rerun_data( + observation=obs_processed, + action=action_dict, + compress_images=cfg.display_compressed_images, + ) + + @abc.abstractmethod + def setup(self, ctx: RolloutContext) -> None: + """Strategy-specific initialisation (keyboard listeners, buffers, etc.).""" + + @abc.abstractmethod + def run(self, ctx: RolloutContext) -> None: + """Main rollout loop. Returns when shutdown is requested or duration expires.""" + + @abc.abstractmethod + def teardown(self, ctx: RolloutContext) -> None: + """Cleanup: save dataset, stop threads, disconnect hardware.""" + + +# --------------------------------------------------------------------------- +# Shared helpers +# --------------------------------------------------------------------------- + + +def safe_push_to_hub(dataset, tags=None, private=False) -> bool: + """Push dataset to hub, skipping if no episodes have been saved. + + Returns ``True`` if the push was attempted, ``False`` if skipped. + """ + if dataset.num_episodes == 0: + logger.warning("No episodes saved — skipping push to hub") + return False + dataset.push_to_hub(tags=tags, private=private) + return True + + +def estimate_max_episode_seconds( + dataset_features: dict, + fps: float, + target_size_mb: float = DEFAULT_VIDEO_FILE_SIZE_IN_MB, +) -> float: + """Conservatively estimate how many seconds of video will exceed *target_size_mb*. + + Each camera produces its own video file, so the episode duration is + driven by the **slowest** camera to fill ``target_size_mb`` — i.e. + the one with the fewest pixels per frame (lowest bitrate). + + Uses a deliberately **low** bits-per-pixel estimate so the computed + duration is *longer* than reality. By the time the timer fires the + actual video file is guaranteed to have crossed the target size, + which aligns episode boundaries with the dataset's video-file + chunking — each ``push_to_hub`` uploads complete files rather than + re-uploading a still-growing one. + + The estimate ignores codec-specific settings (CRF, preset) on purpose: + we only need a rough lower bound on bitrate, not a precise prediction. + + Falls back to 300 s (5 min) when no video features are present. + """ + # 0.1 bits-per-pixel is a *low* estimate for CRF-30 streaming video of + # robot footage (real-world is typically 0.1 – 0.3 bpp). Under- + # estimating the bitrate over-estimates the time → the episode will be + # *larger* than target_size_mb when we save, which is what we want. + conservative_bpp = 0.1 + + # Collect per-camera pixel counts — each camera has its own video file. + camera_pixels = [] + for feat in dataset_features.values(): + if feat.get("dtype") == "video": + shape = feat.get("shape", ()) + + # (H, W, C) — bits-per-pixel is a per-spatial-pixel metric, + # so we exclude the channel dimension from the count. + if len(shape) == 3: + pixels = shape[0] * shape[1] + camera_pixels.append(pixels) + else: + raise ValueError(f"Unexpected video feature shape: {shape}") + + if not camera_pixels: + return 300.0 + + # Use the smallest camera: it produces the lowest bitrate and therefore + # takes the longest to reach the target — the conservative choice. + min_pixels = min(camera_pixels) + bits_per_frame = min_pixels * conservative_bpp + bytes_per_second = (bits_per_frame * fps) / 8 + + # Guard against division by zero just in case + if bytes_per_second <= 0: + return 300.0 + + return (target_size_mb * 1024 * 1024) / bytes_per_second + + +# --------------------------------------------------------------------------- +# Shared action-dispatch helper +# --------------------------------------------------------------------------- + + +def send_next_action( + obs_processed: dict, + obs_raw: dict, + ctx: RolloutContext, + interpolator: ActionInterpolator, +) -> dict | None: + """Dispatch the next action to the robot. + + Pulls the next action tensor from the inference engine, feeds the + interpolator, and sends the interpolated action through the + ``robot_action_processor`` to the robot. Works identically for + sync and async backends — the rollout strategy never needs to branch. + + Returns the action dict that was sent, or ``None`` if no action was + ready (e.g. empty async queue, interpolator not yet primed). + """ + engine = ctx.policy.inference + features = ctx.data.dataset_features + ordered_keys = ctx.data.ordered_action_keys + + if interpolator.needs_new_action(): + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_tensor = engine.get_action(obs_frame) + if action_tensor is not None: + interpolator.add(action_tensor.cpu()) + + interp = interpolator.get() + if interp is None: + return None + + if len(interp) != len(ordered_keys): + raise ValueError(f"Interpolated tensor length ({len(interp)}) != action keys ({len(ordered_keys)})") + action_dict = {k: interp[i].item() for i, k in enumerate(ordered_keys)} + processed = ctx.processors.robot_action_processor((action_dict, obs_raw)) + ctx.hardware.robot_wrapper.send_action(processed) + return action_dict diff --git a/src/lerobot/rollout/strategies/dagger.py b/src/lerobot/rollout/strategies/dagger.py new file mode 100644 index 000000000..1bd6d9bb0 --- /dev/null +++ b/src/lerobot/rollout/strategies/dagger.py @@ -0,0 +1,832 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DAgger rollout strategy: Human-in-the-Loop data collection. + +Implements the RaC paradigm (Recovery and Correction) for interactive +imitation learning. Alternates between autonomous policy execution and +human intervention via teleoperator. + +Input is controlled via either a keyboard or foot pedal, selected by +the ``input_device`` config field. Each device exposes three actions: + + 1. **pause_resume** — Toggle policy execution (AUTONOMOUS <-> PAUSED). + 2. **correction** — Toggle correction recording (PAUSED <-> CORRECTING). + 3. **upload** — Push dataset to hub on demand (corrections-only mode). + ESC (keyboard only) — Stop session. + +Recording modes: + ``record_autonomous=True``: Sentry-like continuous recording with + time-based episode rotation. Both autonomous and correction + frames are recorded; corrections tagged ``intervention=True``. + ``record_autonomous=False``: Only correction windows are recorded. + Each correction (start to stop) becomes one episode. + +Teleoperator handover: + On AUTONOMOUS → PAUSED, actuated teleops (those with non-empty + ``feedback_features``, e.g. SO-101, OpenArmMini) are smoothly driven to + the follower's last position via ``send_feedback`` so the operator takes + over without a jerk. Non-actuated teleops cannot be driven, + so on PAUSED → CORRECTING the follower is instead slid to the teleop's + current pose before the correction begins. +""" + +from __future__ import annotations + +import contextlib +import enum +import logging +import os +import sys +import time +from concurrent.futures import Future, ThreadPoolExecutor +from threading import Event, Lock +from typing import Any + +import numpy as np + +from lerobot.common.control_utils import is_headless +from lerobot.datasets import VideoEncodingManager +from lerobot.datasets.utils import DEFAULT_VIDEO_FILE_SIZE_IN_MB +from lerobot.teleoperators import Teleoperator +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame +from lerobot.utils.import_utils import _pynput_available +from lerobot.utils.pedal import start_pedal_listener +from lerobot.utils.robot_utils import precise_sleep +from lerobot.utils.utils import log_say + +from ..configs import DAggerKeyboardConfig, DAggerPedalConfig, DAggerStrategyConfig +from ..context import RolloutContext +from ..robot_wrapper import ThreadSafeRobot +from .core import RolloutStrategy, estimate_max_episode_seconds, safe_push_to_hub, send_next_action + +PYNPUT_AVAILABLE = _pynput_available +keyboard = None +if PYNPUT_AVAILABLE: + try: + if ("DISPLAY" not in os.environ) and ("linux" in sys.platform): + logging.info("No DISPLAY set. Skipping pynput import.") + PYNPUT_AVAILABLE = False + else: + from pynput import keyboard + except Exception as e: + PYNPUT_AVAILABLE = False + logging.info(f"Could not import pynput: {e}") + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# DAgger state machine +# --------------------------------------------------------------------------- + + +class DAggerPhase(enum.Enum): + """Observable phases of a DAgger episode.""" + + AUTONOMOUS = "autonomous" # Policy driving + PAUSED = "paused" # Engine paused, teleop aligned, awaiting input + CORRECTING = "correcting" # Human driving via teleop, recording interventions + + +# Valid (current_phase, event) -> next_phase +_DAGGER_TRANSITIONS: dict[tuple[DAggerPhase, str], DAggerPhase] = { + (DAggerPhase.AUTONOMOUS, "pause_resume"): DAggerPhase.PAUSED, + (DAggerPhase.PAUSED, "pause_resume"): DAggerPhase.AUTONOMOUS, + (DAggerPhase.PAUSED, "correction"): DAggerPhase.CORRECTING, + (DAggerPhase.CORRECTING, "correction"): DAggerPhase.PAUSED, +} + + +class DAggerEvents: + """Thread-safe container for DAgger input device events. + + The keyboard/pedal threads write transition requests; the main loop + consumes them. + """ + + def __init__(self) -> None: + self._lock = Lock() + self._phase = DAggerPhase.AUTONOMOUS + self._pending_transition: str | None = None + + # Session-level flags + self.stop_recording = Event() + self.upload_requested = Event() + + # -- Thread-safe phase access ------------------------------------------ + + @property + def phase(self) -> DAggerPhase: + """Current phase of the DAgger state machine.""" + with self._lock: + return self._phase + + @phase.setter + def phase(self, value: DAggerPhase) -> None: + with self._lock: + self._phase = value + + def request_transition(self, event: str) -> None: + """Request a phase transition (called from keyboard/pedal threads). + + Only enqueues the request if it corresponds to a valid transition + from the current phase, preventing impossible state changes. + """ + with self._lock: + if (self._phase, event) in _DAGGER_TRANSITIONS: + self._pending_transition = event + + def consume_transition(self) -> tuple[DAggerPhase, DAggerPhase] | None: + """Consume a pending transition (called from main loop).""" + with self._lock: + if self._pending_transition is None: + return None + key = (self._phase, self._pending_transition) + self._pending_transition = None + new_phase = _DAGGER_TRANSITIONS.get(key) + if new_phase is None: + return None + old_phase = self._phase + self._phase = new_phase + return old_phase, new_phase + + def reset(self) -> None: + """Reset all transient state for a fresh session.""" + with self._lock: + self._phase = DAggerPhase.AUTONOMOUS + self._pending_transition = None + self.upload_requested.clear() + + +# --------------------------------------------------------------------------- +# Teleoperator helpers +# --------------------------------------------------------------------------- + + +def _teleop_supports_feedback(teleop: Teleoperator) -> bool: + """Return True when the teleop can receive position feedback (is actuated). + TODO(Maxime): See if it is possible to unify this interface across teleops instead of duck-typing. + """ + return ( + bool(teleop.feedback_features) + and hasattr(teleop, "disable_torque") + and hasattr(teleop, "enable_torque") + ) + + +def _teleop_smooth_move_to( + teleop: Teleoperator, target_pos: dict, duration_s: float = 2.0, fps: int = 30 +) -> None: + """Smoothly move an actuated teleop to ``target_pos`` via linear interpolation. + + Requires the teleoperator to support feedback + (i.e. have non-empty ``feedback_features`` and implement ``disable_torque`` / ``enable_torque``). + + TODO(Maxime): This blocks up to ``duration_s`` seconds, during this time + the follower robot doesn't receive new actions, this could be an issue on LeKiwi. + """ + teleop.enable_torque() + current = teleop.get_action() + steps = max(int(duration_s * fps), 1) + + for step in range(steps + 1): + t = step / steps + interp = { + k: current[k] * (1 - t) + target_pos[k] * t if k in target_pos else current[k] for k in current + } + teleop.send_feedback(interp) + time.sleep(1 / fps) + + +def _follower_smooth_move_to( + robot: ThreadSafeRobot, current: dict, target: dict, duration_s: float = 1.0, fps: int = 30 +) -> None: + """Smoothly move the follower robot from ``current`` to ``target`` action. + + Used when the teleop is non-actuated: instead of driving the leader arm + to the follower, we bring the follower to the teleop's current pose. + Both ``current`` and ``target`` must be in robot-action key space. + """ + steps = max(int(duration_s * fps), 1) + + for step in range(steps + 1): + t = step / steps + interp = {k: current[k] * (1 - t) + target[k] * t if k in target else current[k] for k in current} + robot.send_action(interp) + time.sleep(1 / fps) + + +# --------------------------------------------------------------------------- +# Input device handlers +# --------------------------------------------------------------------------- + + +def _init_dagger_keyboard(events: DAggerEvents, cfg: DAggerKeyboardConfig): + """Initialise keyboard listener with DAgger 3-key controls. + + Returns the pynput Listener (or ``None`` in headless mode or when + pynput is unavailable). + """ + if not PYNPUT_AVAILABLE or is_headless(): + logger.warning("Headless environment or pynput unavailable — keyboard controls disabled") + return None + + # Map config key names to pynput Key objects for special keys + special_keys = { + "space": keyboard.Key.space, + "tab": keyboard.Key.tab, + "enter": keyboard.Key.enter, + } + + def _resolve_key(key) -> str | None: + """Resolve a pynput key event to a config-comparable string.""" + if key == keyboard.Key.esc: + return "esc" + for name, pynput_key in special_keys.items(): + if key == pynput_key: + return name + if hasattr(key, "char") and key.char: + return key.char + return None + + # Build mapping: resolved key string -> DAgger event name + key_to_event = { + cfg.pause_resume: "pause_resume", + cfg.correction: "correction", + } + + def on_press(key): + try: + resolved = _resolve_key(key) + if resolved is None: + return + if resolved == "esc": + logger.info("Stop recording...") + events.stop_recording.set() + return + if resolved in key_to_event: + events.request_transition(key_to_event[resolved]) + if resolved == cfg.upload: + events.upload_requested.set() + except Exception as e: + logger.debug("Key error: %s", e) + + listener = keyboard.Listener(on_press=on_press) + listener.start() + logger.info( + "DAgger keyboard listener started (pause_resume='%s', correction='%s', upload='%s', ESC=stop)", + cfg.pause_resume, + cfg.correction, + cfg.upload, + ) + return listener + + +def _init_dagger_pedal(events: DAggerEvents, cfg: DAggerPedalConfig): + """Initialise foot pedal listener with DAgger 3-pedal controls. + + Returns the pedal listener thread (or ``None`` if evdev is unavailable). + """ + code_to_event = { + cfg.pause_resume: "pause_resume", + cfg.correction: "correction", + } + + def on_press(code: str) -> None: + if code in code_to_event: + events.request_transition(code_to_event[code]) + if code == cfg.upload: + events.upload_requested.set() + + logger.info("Initializing DAgger foot pedal listener (device=%s)", cfg.device_path) + return start_pedal_listener(on_press, device_path=cfg.device_path) + + +# --------------------------------------------------------------------------- +# DAgger Strategy +# --------------------------------------------------------------------------- + + +class DAggerStrategy(RolloutStrategy): + """Human-in-the-Loop data collection with intervention tagging. + + State machine:: + + AUTONOMOUS --(key1)--> PAUSED --(key2)--> CORRECTING --(key2)--> PAUSED + --(key1)--> AUTONOMOUS + + Recording modes: + ``record_autonomous=True``: Sentry-like continuous recording with + time-based episode rotation. Intervention frames tagged True. + ``record_autonomous=False``: Only correction windows recorded. + Each correction = one episode. Upload on demand via key3. + """ + + config: DAggerStrategyConfig + + def __init__(self, config: DAggerStrategyConfig): + super().__init__(config) + self._listener = None + self._pedal_thread = None + self._events = DAggerEvents() + self._push_executor: ThreadPoolExecutor | None = None + self._pending_push: Future | None = None + self._needs_push = Event() + self._episode_lock = Lock() + + def setup(self, ctx: RolloutContext) -> None: + """Initialise the inference engine and input device listener.""" + self._init_engine(ctx) + self._push_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="dagger-push") + target_mb = self.config.target_video_file_size_mb or DEFAULT_VIDEO_FILE_SIZE_IN_MB + self._episode_duration_s = estimate_max_episode_seconds( + ctx.data.dataset_features, ctx.runtime.cfg.fps, target_size_mb=target_mb + ) + + if self.config.input_device == "keyboard": + self._listener = _init_dagger_keyboard(self._events, self.config.keyboard) + else: + self._pedal_thread = _init_dagger_pedal(self._events, self.config.pedal) + + record_mode = "all frames (sentry-like)" if self.config.record_autonomous else "corrections only" + logger.info( + "DAgger strategy ready (input=%s, episodes=%d, record=%s, episode_duration=%.0fs)", + self.config.input_device, + self.config.num_episodes, + record_mode, + self._episode_duration_s, + ) + + def run(self, ctx: RolloutContext) -> None: + """Run DAgger episodes with human-in-the-loop intervention.""" + if self.config.record_autonomous: + self._run_continuous(ctx) + else: + self._run_corrections_only(ctx) + + def teardown(self, ctx: RolloutContext) -> None: + """Stop listeners, finalise the dataset, and disconnect hardware.""" + play_sounds = ctx.runtime.cfg.play_sounds + logger.info("Stopping DAgger recording") + log_say("Stopping DAgger recording", play_sounds) + + if self._listener is not None and not is_headless(): + logger.info("Stopping keyboard listener") + self._listener.stop() + + # Flush any queued/running push cleanly + if self._push_executor is not None: + logger.info("Shutting down push executor (waiting for pending pushes)...") + self._push_executor.shutdown(wait=True) + self._push_executor = None + + if ctx.data.dataset is not None: + logger.info("Finalizing dataset...") + ctx.data.dataset.finalize() + if self._needs_push.is_set() and ctx.runtime.cfg.dataset and ctx.runtime.cfg.dataset.push_to_hub: + logger.info("Pushing final dataset to hub...") + if safe_push_to_hub( + ctx.data.dataset, + tags=ctx.runtime.cfg.dataset.tags, + private=ctx.runtime.cfg.dataset.private, + ): + logger.info("Dataset uploaded to hub") + log_say("Dataset uploaded to hub", play_sounds) + + self._teardown_hardware( + ctx.hardware, + return_to_initial_position=ctx.runtime.cfg.return_to_initial_position, + ) + logger.info("DAgger strategy teardown complete") + + # ------------------------------------------------------------------ + # Continuous recording mode (record_autonomous=True) + # ------------------------------------------------------------------ + + def _run_continuous(self, ctx: RolloutContext) -> None: + """Sentry-like continuous recording with intervention tagging. + + Episodes are auto-rotated every ``episode_time_s`` seconds and + uploaded in the background every ``upload_every_n_episodes`` episodes. + Both autonomous and correction frames are recorded; corrections are + tagged with ``intervention=True``. + """ + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + teleop = ctx.hardware.teleop + dataset = ctx.data.dataset + events = self._events + interpolator = self._interpolator + features = ctx.data.dataset_features + + control_interval = interpolator.get_control_interval(cfg.fps) + record_stride = max(1, cfg.interpolation_multiplier) + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + play_sounds = cfg.play_sounds + + engine.reset() + interpolator.reset() + events.reset() + engine.resume() + + last_action: dict[str, Any] | None = None + record_tick = 0 + start_time = time.perf_counter() + episode_start = time.perf_counter() + episodes_since_push = 0 + episode_duration_s = self._episode_duration_s + logger.info("DAgger continuous recording started (episode_duration=%.0fs)", episode_duration_s) + + with VideoEncodingManager(dataset): + try: + while not events.stop_recording.is_set() and not ctx.runtime.shutdown_event.is_set(): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + # Process transitions + transition = events.consume_transition() + if transition is not None: + old_phase, new_phase = transition + self._apply_transition( + old_phase, + new_phase, + engine, + interpolator, + ctx, + last_action, + ) + if new_phase == DAggerPhase.AUTONOMOUS: + last_action = None + + phase = events.phase + obs = robot.get_observation() + + # --- CORRECTING: human teleop control --- + # TODO(Steven): teleop runs at the same FPS as the policy. To + # decouple the two, sample teleop at its native rate and + # interpolate to the control loop's tick rate. + if phase == DAggerPhase.CORRECTING: + obs_processed = ctx.processors.robot_observation_processor(obs) + teleop_action = teleop.get_action() + processed_teleop = ctx.processors.teleop_action_processor((teleop_action, obs)) + robot_action_to_send = ctx.processors.robot_action_processor((processed_teleop, obs)) + robot.send_action(robot_action_to_send) + last_action = robot_action_to_send + self._log_telemetry(obs_processed, processed_teleop, ctx.runtime) + if record_tick % record_stride == 0: + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, processed_teleop, prefix=ACTION) + frame = { + **obs_frame, + **action_frame, + "task": task_str, + "intervention": np.array([True], dtype=bool), + } + dataset.add_frame(frame) + record_tick += 1 + + # --- PAUSED: hold position --- + elif phase == DAggerPhase.PAUSED: + if last_action: + robot.send_action(last_action) + + # --- AUTONOMOUS: policy control --- + else: + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + if action_dict is not None: + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + last_action = ctx.processors.robot_action_processor((action_dict, obs)) + if record_tick % record_stride == 0: + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, action_dict, prefix=ACTION) + frame = { + **obs_frame, + **action_frame, + "task": task_str, + "intervention": np.array([False], dtype=bool), + } + dataset.add_frame(frame) + record_tick += 1 + + # Episode rotation derived from the video file-size target. + # Saving is deferred while a correction is ongoing so the + # episode boundary lands on a clean autonomous frame. + elapsed = time.perf_counter() - episode_start + if elapsed >= episode_duration_s and phase != DAggerPhase.CORRECTING: + with self._episode_lock: + dataset.save_episode() + episodes_since_push += 1 + self._needs_push.set() + logger.info( + "Episode saved (total: %d, elapsed: %.1fs)", + dataset.num_episodes, + elapsed, + ) + log_say(f"Episode {dataset.num_episodes} saved", play_sounds) + + if episodes_since_push >= self.config.upload_every_n_episodes: + self._background_push(dataset, cfg) + episodes_since_push = 0 + + episode_start = time.perf_counter() + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + finally: + logger.info("DAgger continuous control loop ended — pausing engine") + engine.pause() + with contextlib.suppress(Exception): + with self._episode_lock: + dataset.save_episode() + self._needs_push.set() + logger.info("Final in-progress episode saved") + + # ------------------------------------------------------------------ + # Corrections-only mode (record_autonomous=False) + # ------------------------------------------------------------------ + + def _run_corrections_only(self, ctx: RolloutContext) -> None: + """Record only human correction windows. Each correction = one episode. + + The policy runs autonomously without recording. When the user + pauses and starts a correction, frames are recorded with + ``intervention=True``. Stopping the correction saves the episode. + The dataset can be uploaded on demand via the upload key/pedal. + """ + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + teleop = ctx.hardware.teleop + dataset = ctx.data.dataset + events = self._events + interpolator = self._interpolator + features = ctx.data.dataset_features + + control_interval = interpolator.get_control_interval(cfg.fps) + record_stride = max(1, cfg.interpolation_multiplier) + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + play_sounds = cfg.play_sounds + + engine.reset() + interpolator.reset() + events.reset() + engine.resume() + + last_action: dict[str, Any] | None = None + start_time = time.perf_counter() + record_tick = 0 + recorded = 0 + logger.info( + "DAgger corrections-only recording started (target: %d episodes)", self.config.num_episodes + ) + + with VideoEncodingManager(dataset): + try: + while ( + recorded < self.config.num_episodes + and not events.stop_recording.is_set() + and not ctx.runtime.shutdown_event.is_set() + ): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + # Process transitions + transition = events.consume_transition() + if transition is not None: + old_phase, new_phase = transition + self._apply_transition( + old_phase, + new_phase, + engine, + interpolator, + ctx, + last_action, + ) + if new_phase == DAggerPhase.AUTONOMOUS: + last_action = None + + # Correction ended -> save episode (blocking if not streaming) + if old_phase == DAggerPhase.CORRECTING and new_phase == DAggerPhase.PAUSED: + with self._episode_lock: + dataset.save_episode() + recorded += 1 + self._needs_push.set() + logger.info( + "Correction %d/%d saved", + recorded, + self.config.num_episodes, + ) + log_say(f"Correction {recorded} saved", play_sounds) + + # On-demand upload + if events.upload_requested.is_set(): + events.upload_requested.clear() + logger.info("Upload requested by user") + self._background_push(dataset, cfg) + + phase = events.phase + obs = robot.get_observation() + + # --- CORRECTING: human teleop control + recording --- + # TODO(Steven): teleop runs at the same FPS as the policy. To + # decouple the two, sample teleop at its native rate and + # interpolate to the control loop's tick rate. + if phase == DAggerPhase.CORRECTING: + obs_processed = ctx.processors.robot_observation_processor(obs) + teleop_action = teleop.get_action() + processed_teleop = ctx.processors.teleop_action_processor((teleop_action, obs)) + robot_action_to_send = ctx.processors.robot_action_processor((processed_teleop, obs)) + robot.send_action(robot_action_to_send) + last_action = robot_action_to_send + self._log_telemetry(obs_processed, processed_teleop, ctx.runtime) + + if record_tick % record_stride == 0: + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, processed_teleop, prefix=ACTION) + dataset.add_frame( + { + **obs_frame, + **action_frame, + "task": task_str, + "intervention": np.array([True], dtype=bool), + } + ) + record_tick += 1 + + # --- PAUSED: hold position --- + elif phase == DAggerPhase.PAUSED: + if last_action: + robot.send_action(last_action) + + # --- AUTONOMOUS: policy control (no recording) --- + else: + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + if action_dict is not None: + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + last_action = ctx.processors.robot_action_processor((action_dict, obs)) + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + finally: + logger.info("DAgger corrections-only loop ended — pausing engine") + engine.pause() + with contextlib.suppress(Exception): + with self._episode_lock: + dataset.save_episode() + self._needs_push.set() + logger.info("Final in-progress episode saved") + + # ------------------------------------------------------------------ + # State-machine transition side-effects + # ------------------------------------------------------------------ + + @staticmethod + def _apply_transition( + old_phase: DAggerPhase, + new_phase: DAggerPhase, + engine, + interpolator, + ctx: RolloutContext, + prev_action: dict | None, + ) -> None: + """Execute side-effects for a validated phase transition, including smooth handovers. + + AUTONOMOUS -> PAUSED (actuated teleop): + Pause the engine, then drive the leader arm to the follower's last + commanded position so the operator takes over without a jerk. + + PAUSED -> CORRECTING (non-actuated teleop): + Slide the follower to the teleop's current pose so the robot meets + the operator's hand rather than jumping to it on the first frame. + + CORRECTING -> PAUSED (actuated teleop): + Re-enable torque to hold position after correction. + This will be potentially useful if cancelling the correction recording + + PAUSED -> AUTONOMOUS: + Reset and resume the inference engine. + """ + teleop = ctx.hardware.teleop + robot = ctx.hardware.robot_wrapper + + logger.info("Phase transition: %s -> %s", old_phase.value, new_phase.value) + if old_phase == DAggerPhase.AUTONOMOUS and new_phase == DAggerPhase.PAUSED: + logger.info("Pausing engine - robot holds position") + engine.pause() + + if _teleop_supports_feedback(teleop) and prev_action is not None: + # TODO(Maxime): prev_action is in robot action key space (output of robot_action_processor). + # send_feedback expects teleop feedback key space. For homogeneous setups (e.g. SO-101 + # leader + SO-101 follower) the keys are identical so this works. If the processor pipeline + # does non-trivial key renaming (e.g. a rename_map on action keys), the interpolation in + # _teleop_smooth_move_to silently no-ops and the arm doesn't move. + logger.info("Smooth handover: moving leader arm to follower position") + _teleop_smooth_move_to(teleop, prev_action) + + elif old_phase == DAggerPhase.PAUSED and new_phase == DAggerPhase.CORRECTING: + logger.info("Entering correction mode - human teleop control") + if not _teleop_supports_feedback(teleop) and prev_action is not None: + logger.info("Smooth handover: sliding follower to teleop position") + obs = robot.get_observation() + teleop_action = teleop.get_action() + processed = ctx.processors.teleop_action_processor((teleop_action, obs)) + target = ctx.processors.robot_action_processor((processed, obs)) + _follower_smooth_move_to(robot, prev_action, target) + + # unlock the teleop for human control + if _teleop_supports_feedback(teleop): + teleop.disable_torque() + + elif old_phase == DAggerPhase.CORRECTING and new_phase == DAggerPhase.PAUSED: + if _teleop_supports_feedback(teleop): + teleop.enable_torque() + + elif new_phase == DAggerPhase.AUTONOMOUS: + logger.info("Resuming autonomous mode - resetting engine and interpolator") + interpolator.reset() + engine.reset() + engine.resume() + + # release teleop before resuming the policy + if _teleop_supports_feedback(teleop): + teleop.disable_torque() + + # ------------------------------------------------------------------ + # Background push (shared by both modes) + # ------------------------------------------------------------------ + + def _background_push(self, dataset, cfg) -> None: + """Queue a Hub push on the single-worker executor. + + The executor's max_workers=1 guarantees at most one push runs at + a time; submitted tasks are queued rather than dropped. Pushes + are blocked while the operator is mid-correction to avoid + uploading a partially-recorded episode. + """ + if self._push_executor is None: + return + + if self._events.phase == DAggerPhase.CORRECTING: + logger.info("Skipping push — correction in progress") + return + + if self._pending_push is not None and not self._pending_push.done(): + logger.info("Previous push still in progress; queueing next") + + def _push(): + try: + with self._episode_lock: + if safe_push_to_hub( + dataset, + tags=cfg.dataset.tags if cfg.dataset else None, + private=cfg.dataset.private if cfg.dataset else False, + ): + self._needs_push.clear() + logger.info("Background push to hub complete") + except Exception as e: + logger.error("Background push failed: %s", e) + + self._pending_push = self._push_executor.submit(_push) + logger.info("Background push task submitted") diff --git a/src/lerobot/rollout/strategies/factory.py b/src/lerobot/rollout/strategies/factory.py new file mode 100644 index 000000000..8a9727769 --- /dev/null +++ b/src/lerobot/rollout/strategies/factory.py @@ -0,0 +1,45 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Strategy factory: config type-name → strategy class dispatch.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .base import BaseStrategy +from .core import RolloutStrategy +from .dagger import DAggerStrategy +from .highlight import HighlightStrategy +from .sentry import SentryStrategy + +if TYPE_CHECKING: + from ..configs import RolloutStrategyConfig + + +def create_strategy(config: RolloutStrategyConfig) -> RolloutStrategy: + """Instantiate the appropriate strategy from a config object. + + Dispatches on ``config.type`` (the name registered via + ``draccus.ChoiceRegistry``). + """ + if config.type == "base": + return BaseStrategy(config) + if config.type == "sentry": + return SentryStrategy(config) + if config.type == "highlight": + return HighlightStrategy(config) + if config.type == "dagger": + return DAggerStrategy(config) + raise ValueError(f"Unknown strategy type '{config.type}'. Available: base, sentry, highlight, dagger") diff --git a/src/lerobot/rollout/strategies/highlight.py b/src/lerobot/rollout/strategies/highlight.py new file mode 100644 index 000000000..baff70da7 --- /dev/null +++ b/src/lerobot/rollout/strategies/highlight.py @@ -0,0 +1,283 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Highlight Reel strategy: on-demand recording via ring buffer.""" + +from __future__ import annotations + +import contextlib +import logging +import os +import sys +import time +from concurrent.futures import Future, ThreadPoolExecutor +from threading import Event as ThreadingEvent, Lock + +from lerobot.common.control_utils import is_headless +from lerobot.datasets import VideoEncodingManager +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame +from lerobot.utils.import_utils import _pynput_available, require_package +from lerobot.utils.robot_utils import precise_sleep +from lerobot.utils.utils import log_say + +from ..configs import HighlightStrategyConfig +from ..context import RolloutContext +from ..ring_buffer import RolloutRingBuffer +from .core import RolloutStrategy, safe_push_to_hub, send_next_action + +PYNPUT_AVAILABLE = _pynput_available +keyboard = None +if PYNPUT_AVAILABLE: + try: + if ("DISPLAY" not in os.environ) and ("linux" in sys.platform): + logging.info("No DISPLAY set. Skipping pynput import.") + PYNPUT_AVAILABLE = False + else: + from pynput import keyboard + except Exception as e: + PYNPUT_AVAILABLE = False + logging.info(f"Could not import pynput: {e}") + +logger = logging.getLogger(__name__) + + +class HighlightStrategy(RolloutStrategy): + """Autonomous rollout with on-demand recording via ring buffer. + + The robot runs autonomously while a memory-bounded ring buffer + captures continuous telemetry. When the user presses the save key: + + 1. The ring buffer is flushed to the dataset (last *Z* seconds). + 2. Live recording continues until the save key is pressed again. + 3. The episode is saved and the ring buffer resumes capturing. + + Requires ``streaming_encoding=True`` (enforced in config validation) + so that ``dataset.add_frame`` is a non-blocking queue put — flushing + the entire ring buffer in one tick must not stall the control loop. + """ + + config: HighlightStrategyConfig + + def __init__(self, config: HighlightStrategyConfig): + super().__init__(config) + require_package("pynput", extra="pynput-dep") + self._ring: RolloutRingBuffer | None = None + self._listener = None + self._save_requested = ThreadingEvent() + self._recording_live = ThreadingEvent() + self._push_requested = ThreadingEvent() + self._push_executor: ThreadPoolExecutor | None = None + self._pending_push: Future | None = None + self._episode_lock = Lock() + + def setup(self, ctx: RolloutContext) -> None: + """Initialise the inference engine, ring buffer, and keyboard listener.""" + self._init_engine(ctx) + + self._ring = RolloutRingBuffer( + max_seconds=self.config.ring_buffer_seconds, + max_memory_mb=self.config.ring_buffer_max_memory_mb, + fps=ctx.runtime.cfg.fps, + ) + + self._push_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="highlight-push") + logger.info( + "Ring buffer initialized (max_seconds=%.0f, max_memory=%.0fMB)", + self.config.ring_buffer_seconds, + self.config.ring_buffer_max_memory_mb, + ) + self._setup_keyboard(ctx.runtime.shutdown_event) + logger.info( + "Highlight strategy ready (buffer=%.0fs, save='%s', push='%s')", + self.config.ring_buffer_seconds, + self.config.save_key, + self.config.push_key, + ) + + def run(self, ctx: RolloutContext) -> None: + """Run the autonomous loop, buffering frames and recording on demand.""" + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + dataset = ctx.data.dataset + ring = self._ring + interpolator = self._interpolator + features = ctx.data.dataset_features + + control_interval = interpolator.get_control_interval(cfg.fps) + + engine.resume() + play_sounds = cfg.play_sounds + + start_time = time.perf_counter() + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + logger.info("Highlight strategy recording started (press '%s' to save)", self.config.save_key) + + with VideoEncodingManager(dataset): + try: + while not ctx.runtime.shutdown_event.is_set(): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + obs = robot.get_observation() + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + + if action_dict is not None: + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, action_dict, prefix=ACTION) + frame = {**obs_frame, **action_frame, "task": task_str} + + # NOTE: ``is_set()`` then ``clear()`` is not atomic + # against the keyboard thread setting the flag again + # in between — but that is benign: we lose at most one + # toggle, processed on the next iteration. + if self._save_requested.is_set(): + self._save_requested.clear() + if not self._recording_live.is_set(): + logger.info( + "Flushing ring buffer (%d frames) + starting live recording", + len(ring), + ) + for buffered_frame in ring.drain(): + dataset.add_frame(buffered_frame) + self._recording_live.set() + else: + dataset.add_frame(frame) + with self._episode_lock: + dataset.save_episode() + logger.info("Episode saved (total: %d)", dataset.num_episodes) + log_say( + f"Episode {dataset.num_episodes} saved", + play_sounds, + ) + self._recording_live.clear() + continue # frame already consumed — skip ring.append + + if self._push_requested.is_set(): + self._push_requested.clear() + logger.info("Push requested by user") + self._background_push(dataset, cfg) + + if self._recording_live.is_set(): + dataset.add_frame(frame) + else: + ring.append(frame) + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + finally: + logger.info("Highlight control loop ended") + if self._recording_live.is_set(): + logger.info("Saving in-progress live episode") + with contextlib.suppress(Exception), self._episode_lock: + dataset.save_episode() + + def teardown(self, ctx: RolloutContext) -> None: + """Stop listeners, finalise the dataset, and disconnect hardware.""" + play_sounds = ctx.runtime.cfg.play_sounds + logger.info("Stopping highlight recording") + log_say("Stopping highlight recording", play_sounds) + + if self._listener is not None: + logger.info("Stopping keyboard listener") + self._listener.stop() + + if self._push_executor is not None: + logger.info("Shutting down push executor (waiting for pending pushes)...") + self._push_executor.shutdown(wait=True) + self._push_executor = None + + if ctx.data.dataset is not None: + logger.info("Finalizing dataset...") + ctx.data.dataset.finalize() + if ctx.runtime.cfg.dataset and ctx.runtime.cfg.dataset.push_to_hub: + logger.info("Pushing final dataset to hub...") + if safe_push_to_hub( + ctx.data.dataset, + tags=ctx.runtime.cfg.dataset.tags, + private=ctx.runtime.cfg.dataset.private, + ): + logger.info("Dataset uploaded to hub") + log_say("Dataset uploaded to hub", play_sounds) + + self._teardown_hardware( + ctx.hardware, + return_to_initial_position=ctx.runtime.cfg.return_to_initial_position, + ) + logger.info("Highlight strategy teardown complete") + + def _setup_keyboard(self, shutdown_event: ThreadingEvent) -> None: + """Set up keyboard listener for save and push keys.""" + if is_headless(): + logger.warning("Headless environment — highlight keys unavailable") + return + + try: + save_key = self.config.save_key + push_key = self.config.push_key + + def on_press(key): + with contextlib.suppress(Exception): + if hasattr(key, "char") and key.char == save_key: + self._save_requested.set() + elif hasattr(key, "char") and key.char == push_key: + self._push_requested.set() + elif key == keyboard.Key.esc: + self._save_requested.clear() + shutdown_event.set() + + self._listener = keyboard.Listener(on_press=on_press) + self._listener.start() + logger.info("Keyboard listener started (save='%s', push='%s', ESC=stop)", save_key, push_key) + except ImportError: + logger.warning("pynput not available — keyboard listener disabled") + + def _background_push(self, dataset, cfg) -> None: + """Queue a Hub push on the single-worker executor.""" + if self._push_executor is None: + return + + if self._pending_push is not None and not self._pending_push.done(): + logger.info("Previous push still in progress; queueing next") + + def _push(): + try: + with self._episode_lock: + if safe_push_to_hub( + dataset, + tags=cfg.dataset.tags if cfg.dataset else None, + private=cfg.dataset.private if cfg.dataset else False, + ): + logger.info("Background push to hub complete") + except Exception as e: + logger.error("Background push failed: %s", e) + + self._pending_push = self._push_executor.submit(_push) + logger.info("Background push task submitted") diff --git a/src/lerobot/rollout/strategies/sentry.py b/src/lerobot/rollout/strategies/sentry.py new file mode 100644 index 000000000..61e38aa68 --- /dev/null +++ b/src/lerobot/rollout/strategies/sentry.py @@ -0,0 +1,231 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Sentry rollout strategy: continuous autonomous recording with auto-upload.""" + +from __future__ import annotations + +import contextlib +import logging +import time +from concurrent.futures import Future, ThreadPoolExecutor +from threading import Event, Lock + +from lerobot.datasets import VideoEncodingManager +from lerobot.datasets.utils import DEFAULT_VIDEO_FILE_SIZE_IN_MB +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.feature_utils import build_dataset_frame +from lerobot.utils.robot_utils import precise_sleep +from lerobot.utils.utils import log_say + +from ..configs import SentryStrategyConfig +from ..context import RolloutContext +from .core import RolloutStrategy, estimate_max_episode_seconds, safe_push_to_hub, send_next_action + +logger = logging.getLogger(__name__) + + +class SentryStrategy(RolloutStrategy): + """Continuous autonomous rollout with always-on recording. + + Episode duration is derived from camera resolution, FPS, and + ``DEFAULT_VIDEO_FILE_SIZE_IN_MB`` so that each saved episode + produces a video file that has crossed the chunk-size boundary. + This keeps ``push_to_hub`` efficient — it uploads complete video + files rather than re-uploading a still-growing one. + + The dataset is pushed to the Hub via a bounded single-worker executor + so no push is ever silently dropped and exactly one push runs at a + time. + + Policy state (hidden state, RTC queue) intentionally persists across + episode boundaries — Sentry slices one continuous rollout, the robot + does not reset between slices. + + Requires ``streaming_encoding=True`` (enforced in config validation) + to prevent disk I/O from blocking the control loop. + """ + + config: SentryStrategyConfig + + def __init__(self, config: SentryStrategyConfig): + super().__init__(config) + self._push_executor: ThreadPoolExecutor | None = None + self._pending_push: Future | None = None + self._needs_push = Event() + self._episode_lock = Lock() + + def setup(self, ctx: RolloutContext) -> None: + """Initialise the inference engine and background push executor.""" + self._init_engine(ctx) + self._push_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="sentry-push") + target_mb = self.config.target_video_file_size_mb or DEFAULT_VIDEO_FILE_SIZE_IN_MB + self._episode_duration_s = estimate_max_episode_seconds( + ctx.data.dataset_features, ctx.runtime.cfg.fps, target_size_mb=target_mb + ) + logger.info( + "Sentry strategy ready (episode_duration=%.0fs, upload_every=%d eps)", + self._episode_duration_s, + self.config.upload_every_n_episodes, + ) + + def run(self, ctx: RolloutContext) -> None: + """Run the continuous recording loop with automatic episode rotation.""" + engine = self._engine + cfg = ctx.runtime.cfg + robot = ctx.hardware.robot_wrapper + dataset = ctx.data.dataset + interpolator = self._interpolator + features = ctx.data.dataset_features + + control_interval = interpolator.get_control_interval(cfg.fps) + + engine.resume() + play_sounds = cfg.play_sounds + episode_duration_s = self._episode_duration_s + + start_time = time.perf_counter() + episode_start = time.perf_counter() + episodes_since_push = 0 + task_str = cfg.dataset.single_task if cfg.dataset else cfg.task + logger.info("Sentry recording started (episode_duration=%.0fs)", episode_duration_s) + + with VideoEncodingManager(dataset): + try: + while not ctx.runtime.shutdown_event.is_set(): + loop_start = time.perf_counter() + + if cfg.duration > 0 and (time.perf_counter() - start_time) >= cfg.duration: + logger.info("Duration limit reached (%.0fs)", cfg.duration) + break + + obs = robot.get_observation() + obs_processed = self._process_observation_and_notify(ctx.processors, obs) + + if self._handle_warmup(cfg.use_torch_compile, loop_start, control_interval): + continue + + action_dict = send_next_action(obs_processed, obs, ctx, interpolator) + + if action_dict is not None: + self._log_telemetry(obs_processed, action_dict, ctx.runtime) + obs_frame = build_dataset_frame(features, obs_processed, prefix=OBS_STR) + action_frame = build_dataset_frame(features, action_dict, prefix=ACTION) + frame = {**obs_frame, **action_frame, "task": task_str} + # ``add_frame`` writes to the in-progress episode buffer; the + # background pusher only ever touches *finalised* episode + # artifacts on disk. The two operate on disjoint state, so + # ``add_frame`` does not need ``_episode_lock``. + dataset.add_frame(frame) + + # Episode rotation derived from video file-size target. + # The duration is a conservative estimate so the actual + # video has crossed DEFAULT_VIDEO_FILE_SIZE_IN_MB by now, + # keeping push_to_hub efficient (uploads complete files). + elapsed = time.perf_counter() - episode_start + if elapsed >= episode_duration_s: + # ``save_episode`` finalises the in-progress episode and + # flushes it to disk; ``_episode_lock`` serialises this with + # ``push_to_hub`` (run in the background executor) so the + # pusher never reads a half-written episode. + with self._episode_lock: + dataset.save_episode() + episodes_since_push += 1 + self._needs_push.set() + logger.info( + "Episode saved (total: %d, elapsed: %.1fs)", + dataset.num_episodes, + elapsed, + ) + log_say(f"Episode {dataset.num_episodes} saved", play_sounds) + + if episodes_since_push >= self.config.upload_every_n_episodes: + self._background_push(dataset, cfg) + episodes_since_push = 0 + + episode_start = time.perf_counter() + + dt = time.perf_counter() - loop_start + if (sleep_t := control_interval - dt) > 0: + precise_sleep(sleep_t) + else: + logger.warning( + f"Record loop is running slower ({1 / dt:.1f} Hz) than the target FPS ({cfg.fps} Hz). Dataset frames might be dropped and robot control might be unstable. Common causes are: 1) Camera FPS not keeping up 2) Policy inference taking too long 3) CPU starvation" + ) + + finally: + logger.info("Sentry control loop ended — saving final episode") + with contextlib.suppress(Exception): + with self._episode_lock: + dataset.save_episode() + self._needs_push.set() + + def teardown(self, ctx: RolloutContext) -> None: + """Flush pending pushes, finalise the dataset, and disconnect hardware.""" + play_sounds = ctx.runtime.cfg.play_sounds + logger.info("Stopping sentry recording") + log_say("Stopping sentry recording", play_sounds) + + # Flush any queued/running push cleanly. + if self._push_executor is not None: + logger.info("Shutting down push executor (waiting for pending pushes)...") + self._push_executor.shutdown(wait=True) + self._push_executor = None + + if ctx.data.dataset is not None: + logger.info("Finalizing dataset...") + ctx.data.dataset.finalize() + if self._needs_push.is_set() and ctx.runtime.cfg.dataset and ctx.runtime.cfg.dataset.push_to_hub: + logger.info("Pushing final dataset to hub...") + if safe_push_to_hub( + ctx.data.dataset, + tags=ctx.runtime.cfg.dataset.tags, + private=ctx.runtime.cfg.dataset.private, + ): + logger.info("Dataset uploaded to hub") + log_say("Dataset uploaded to hub", play_sounds) + + self._teardown_hardware( + ctx.hardware, + return_to_initial_position=ctx.runtime.cfg.return_to_initial_position, + ) + logger.info("Sentry strategy teardown complete") + + def _background_push(self, dataset, cfg) -> None: + """Queue a Hub push on the single-worker executor. + + The executor's max_workers=1 guarantees at most one push runs at + a time; submitted tasks are queued rather than dropped. + """ + if self._push_executor is None: + return + + if self._pending_push is not None and not self._pending_push.done(): + logger.info("Previous push still in progress; queueing next") + + def _push(): + try: + with self._episode_lock: + if safe_push_to_hub( + dataset, + tags=cfg.dataset.tags if cfg.dataset else None, + private=cfg.dataset.private if cfg.dataset else False, + ): + self._needs_push.clear() + logger.info("Background push to hub complete") + except Exception as e: + logger.error("Background push failed: %s", e) + + self._pending_push = self._push_executor.submit(_push) + logger.info("Background push task submitted") diff --git a/src/lerobot/scripts/convert_dataset_v21_to_v30.py b/src/lerobot/scripts/convert_dataset_v21_to_v30.py index 1f220dc20..f516aa8fc 100644 --- a/src/lerobot/scripts/convert_dataset_v21_to_v30.py +++ b/src/lerobot/scripts/convert_dataset_v21_to_v30.py @@ -70,6 +70,7 @@ from lerobot.datasets.io_utils import ( get_parquet_file_size_in_mb, get_parquet_num_frames, load_info, + load_json, write_episodes, write_info, write_stats, @@ -81,9 +82,11 @@ from lerobot.datasets.utils import ( DEFAULT_DATA_PATH, DEFAULT_VIDEO_FILE_SIZE_IN_MB, DEFAULT_VIDEO_PATH, + INFO_PATH, LEGACY_EPISODES_PATH, LEGACY_EPISODES_STATS_PATH, LEGACY_TASKS_PATH, + DatasetInfo, update_chunk_file_indices, ) from lerobot.datasets.video_utils import concatenate_video_files, get_video_duration_in_s @@ -165,7 +168,7 @@ def legacy_load_tasks(local_dir: Path) -> tuple[dict, dict]: def validate_local_dataset_version(local_path: Path) -> None: """Validate that the local dataset has the expected v2.1 version.""" info = load_info(local_path) - dataset_version = info.get("codebase_version", "unknown") + dataset_version = info.codebase_version or "unknown" if dataset_version != V21: raise ValueError( f"Local dataset has codebase version '{dataset_version}', expected '{V21}'. " @@ -256,14 +259,14 @@ def convert_data(root: Path, new_root: Path, data_file_size_in_mb: int): def get_video_keys(root): info = load_info(root) - features = info["features"] + features = info.features video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"] return video_keys def get_image_keys(root): info = load_info(root) - features = info["features"] + features = info.features image_keys = [key for key, ft in features.items() if ft["dtype"] == "image"] return image_keys @@ -434,7 +437,8 @@ def convert_episodes_metadata(root, new_root, episodes_metadata, episodes_video_ def convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb): - info = load_info(root) + # Load as raw dict to remove legacy v2.1 fields before constructing DatasetInfo. + info = load_json(root / INFO_PATH) info["codebase_version"] = V30 del info["total_chunks"] del info["total_videos"] @@ -449,7 +453,9 @@ def convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb): # already has fps in video_info continue info["features"][key]["fps"] = info["fps"] - write_info(info, new_root) + # Convert raw dict to typed DatasetInfo before writing + dataset_info = DatasetInfo.from_dict(info) + write_info(dataset_info, new_root) def convert_dataset( diff --git a/src/lerobot/scripts/lerobot_edit_dataset.py b/src/lerobot/scripts/lerobot_edit_dataset.py index 0cfb34325..a708d37a3 100644 --- a/src/lerobot/scripts/lerobot_edit_dataset.py +++ b/src/lerobot/scripts/lerobot_edit_dataset.py @@ -150,11 +150,24 @@ Show dataset information without feature details: --operation.type info \ --operation.show_features false -Recompute dataset statistics: +Recompute dataset statistics (saves to lerobot/pusht_recomputed_stats by default): lerobot-edit-dataset \ --repo_id lerobot/pusht \ --operation.type recompute_stats +Recompute stats and save to a specific new repo_id: + lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --new_repo_id lerobot/pusht_new_stats \ + --operation.type recompute_stats + +Recompute stats in-place (overwrites original dataset stats): + lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --new_repo_id lerobot/pusht \ + --operation.type recompute_stats \ + --operation.overwrite true + Recompute stats for relative actions and push to hub: lerobot-edit-dataset \ --repo_id lerobot/pusht \ @@ -256,6 +269,7 @@ class RecomputeStatsConfig(OperationConfig): relative_exclude_joints: list[str] | None = None chunk_size: int = 50 num_workers: int = 0 + overwrite: bool = False @OperationConfig.register_subclass("info") @@ -280,16 +294,30 @@ class EditDatasetConfig: push_to_hub: bool = False +def _resolve_io_paths( + repo_id: str, + new_repo_id: str | None, + root: Path | str | None, + new_root: Path | str | None, + default_new_repo_id: str | None = None, +) -> tuple[str, Path, Path]: + """Resolve input/output paths and repo_id for dataset operations. + + Returns (output_repo_id, input_path, output_path) with resolved (symlink-safe) paths. + """ + input_path = (Path(root) if root else HF_LEROBOT_HOME / repo_id).resolve() + output_repo_id = new_repo_id or default_new_repo_id or repo_id + output_path = (Path(new_root) if new_root else HF_LEROBOT_HOME / output_repo_id).resolve() + return output_repo_id, input_path, output_path + + def get_output_path( repo_id: str, new_repo_id: str | None, root: Path | str | None, new_root: Path | str | None, ) -> tuple[str, Path]: - input_path = Path(root) if root else HF_LEROBOT_HOME / repo_id - - output_repo_id = new_repo_id if new_repo_id else repo_id - output_path = Path(new_root) if new_root else HF_LEROBOT_HOME / output_repo_id + output_repo_id, input_path, output_path = _resolve_io_paths(repo_id, new_repo_id, root, new_root) # In case of in-place modification, create a backup of the original dataset (if it exists) if output_path == input_path: @@ -557,7 +585,39 @@ def handle_recompute_stats(cfg: EditDatasetConfig) -> None: if not isinstance(cfg.operation, RecomputeStatsConfig): raise ValueError("Operation config must be RecomputeStatsConfig") - dataset = LeRobotDataset(cfg.repo_id, root=cfg.root) + # Determine whether this is an in-place operation + output_repo_id, input_root, output_root = _resolve_io_paths( + cfg.repo_id, + cfg.new_repo_id, + cfg.root, + cfg.new_root, + default_new_repo_id=f"{cfg.repo_id}_recomputed_stats", + ) + in_place = output_root == input_root + + if in_place and not cfg.operation.overwrite: + raise ValueError( + f"recompute_stats would overwrite the dataset in-place at {input_root}. " + "Pass --operation.overwrite true to allow in-place modification, " + "or use --new_repo_id / --new_root to write to a different location. " + f"Default output repo_id when neither is set: '{cfg.repo_id}_recomputed_stats'." + ) + + if in_place: + logging.warning( + f"Overwriting dataset stats in-place at {input_root}. The original stats will be lost." + ) + dataset = LeRobotDataset(cfg.repo_id, root=input_root) + else: + logging.info(f"Copying dataset from {input_root} to {output_root}") + if output_root.exists(): + backup_path = output_root.with_name(output_root.name + "_old") + logging.warning(f"Output directory {output_root} already exists. Moving to {backup_path}") + if backup_path.exists(): + shutil.rmtree(backup_path) + shutil.move(output_root, backup_path) + shutil.copytree(input_root, output_root) + dataset = LeRobotDataset(output_repo_id, root=output_root) logging.info(f"Recomputing stats for {cfg.repo_id}") if cfg.operation.relative_action: @@ -578,7 +638,7 @@ def handle_recompute_stats(cfg: EditDatasetConfig) -> None: logging.info(f"Stats written to {dataset.root}") if cfg.push_to_hub: - logging.info(f"Pushing to hub as {dataset.meta.repo_id}...") + logging.info(f"Pushing to hub as {dataset.repo_id}...") dataset.push_to_hub() diff --git a/src/lerobot/scripts/lerobot_record.py b/src/lerobot/scripts/lerobot_record.py index 50b41c69d..129696bd3 100644 --- a/src/lerobot/scripts/lerobot_record.py +++ b/src/lerobot/scripts/lerobot_record.py @@ -13,70 +13,62 @@ # limitations under the License. """ -Records a dataset. Actions for the robot can be either generated by teleoperation or by a policy. +Records a dataset via teleoperation. This is a pure data-collection +tool — no policy inference. For deploying trained policies, use +``lerobot-rollout`` instead. Requires: pip install 'lerobot[core_scripts]' (includes dataset + hardware + viz extras) Example: ```shell -lerobot-record \ - --robot.type=so100_follower \ - --robot.port=/dev/tty.usbmodem58760431541 \ - --robot.cameras="{laptop: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ - --robot.id=black \ - --dataset.repo_id=/ \ - --dataset.num_episodes=2 \ - --dataset.single_task="Grab the cube" \ - --dataset.streaming_encoding=true \ - --dataset.encoder_threads=2 \ +lerobot-record \\ + --robot.type=so100_follower \\ + --robot.port=/dev/tty.usbmodem58760431541 \\ + --robot.cameras="{laptop: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \\ + --robot.id=black \\ + --teleop.type=so100_leader \\ + --teleop.port=/dev/tty.usbmodem58760431551 \\ + --teleop.id=blue \\ + --dataset.repo_id=/ \\ + --dataset.num_episodes=2 \\ + --dataset.single_task="Grab the cube" \\ + --dataset.streaming_encoding=true \\ + --dataset.encoder_threads=2 \\ --display_data=true - # <- Optional: specify video codec (auto, h264, hevc, libsvtav1). Default is libsvtav1. \ - # --dataset.vcodec=h264 \ - # <- Teleop optional if you want to teleoperate to record or in between episodes with a policy \ - # --teleop.type=so100_leader \ - # --teleop.port=/dev/tty.usbmodem58760431551 \ - # --teleop.id=blue \ - # <- Policy optional if you want to record with a policy \ - # --policy.path=${HF_USER}/my_policy \ ``` Example recording with bimanual so100: ```shell -lerobot-record \ - --robot.type=bi_so_follower \ - --robot.left_arm_config.port=/dev/tty.usbmodem5A460822851 \ - --robot.right_arm_config.port=/dev/tty.usbmodem5A460814411 \ - --robot.id=bimanual_follower \ +lerobot-record \\ + --robot.type=bi_so_follower \\ + --robot.left_arm_config.port=/dev/tty.usbmodem5A460822851 \\ + --robot.right_arm_config.port=/dev/tty.usbmodem5A460814411 \\ + --robot.id=bimanual_follower \\ --robot.left_arm_config.cameras='{ wrist: {"type": "opencv", "index_or_path": 1, "width": 640, "height": 480, "fps": 30}, top: {"type": "opencv", "index_or_path": 3, "width": 640, "height": 480, "fps": 30}, }' --robot.right_arm_config.cameras='{ wrist: {"type": "opencv", "index_or_path": 2, "width": 640, "height": 480, "fps": 30}, front: {"type": "opencv", "index_or_path": 4, "width": 640, "height": 480, "fps": 30}, - }' \ - --teleop.type=bi_so_leader \ - --teleop.left_arm_config.port=/dev/tty.usbmodem5A460852721 \ - --teleop.right_arm_config.port=/dev/tty.usbmodem5A460819811 \ - --teleop.id=bimanual_leader \ - --display_data=true \ - --dataset.repo_id=${HF_USER}/bimanual-so-handover-cube \ - --dataset.num_episodes=25 \ - --dataset.single_task="Grab and handover the red cube to the other arm" \ - --dataset.streaming_encoding=true \ - # --dataset.vcodec=auto \ + }' \\ + --teleop.type=bi_so_leader \\ + --teleop.left_arm_config.port=/dev/tty.usbmodem5A460852721 \\ + --teleop.right_arm_config.port=/dev/tty.usbmodem5A460819811 \\ + --teleop.id=bimanual_leader \\ + --display_data=true \\ + --dataset.repo_id=${HF_USER}/bimanual-so-handover-cube \\ + --dataset.num_episodes=25 \\ + --dataset.single_task="Grab and handover the red cube to the other arm" \\ + --dataset.streaming_encoding=true \\ --dataset.encoder_threads=2 ``` """ import logging import time -from dataclasses import asdict, dataclass, field -from pathlib import Path +from dataclasses import asdict, dataclass from pprint import pformat -from typing import Any - -import torch from lerobot.cameras import CameraConfig # noqa: F401 from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401 @@ -86,11 +78,10 @@ from lerobot.cameras.zmq import ZMQCameraConfig # noqa: F401 from lerobot.common.control_utils import ( init_keyboard_listener, is_headless, - predict_action, - sanity_check_dataset_name, sanity_check_dataset_robot_compatibility, ) -from lerobot.configs import PreTrainedConfig, parser +from lerobot.configs import parser +from lerobot.configs.dataset import DatasetRecordConfig from lerobot.datasets import ( LeRobotDataset, VideoEncodingManager, @@ -98,21 +89,11 @@ from lerobot.datasets import ( create_initial_features, safe_stop_image_writer, ) -from lerobot.policies import ( - ActionInterpolator, - PreTrainedPolicy, - make_policy, - make_pre_post_processors, - make_robot_action, -) from lerobot.processor import ( - PolicyAction, - PolicyProcessorPipeline, RobotAction, RobotObservation, RobotProcessorPipeline, make_default_processors, - rename_stats, ) from lerobot.robots import ( # noqa: F401 Robot, @@ -146,7 +127,6 @@ from lerobot.teleoperators import ( # noqa: F401 ) from lerobot.teleoperators.keyboard import KeyboardTeleop from lerobot.utils.constants import ACTION, OBS_STR -from lerobot.utils.device_utils import get_safe_torch_device from lerobot.utils.feature_utils import build_dataset_frame, combine_feature_dicts from lerobot.utils.import_utils import register_third_party_plugins from lerobot.utils.robot_utils import precise_sleep @@ -157,71 +137,12 @@ from lerobot.utils.utils import ( from lerobot.utils.visualization_utils import init_rerun, log_rerun_data -@dataclass -class DatasetRecordConfig: - # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). - repo_id: str - # A short but accurate description of the task performed during the recording (e.g. "Pick the Lego block and drop it in the box on the right.") - single_task: str - # Root directory where the dataset will be stored (e.g. 'dataset/path'). If None, defaults to $HF_LEROBOT_HOME/repo_id. - root: str | Path | None = None - # Limit the frames per second. - fps: int = 30 - # Number of seconds for data recording for each episode. - episode_time_s: int | float = 60 - # Number of seconds for resetting the environment after each episode. - reset_time_s: int | float = 60 - # Number of episodes to record. - num_episodes: int = 50 - # Encode frames in the dataset into video - video: bool = True - # Upload dataset to Hugging Face hub. - push_to_hub: bool = True - # Upload on private repository on the Hugging Face hub. - private: bool = False - # Add tags to your dataset on the hub. - tags: list[str] | None = None - # Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only; - # set to ≥1 to use subprocesses, each using threads to write images. The best number of processes - # and threads depends on your system. We recommend 4 threads per camera with 0 processes. - # If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses. - num_image_writer_processes: int = 0 - # Number of threads writing the frames as png images on disk, per camera. - # Too many threads might cause unstable teleoperation fps due to main thread being blocked. - # Not enough threads might cause low camera fps. - num_image_writer_threads_per_camera: int = 4 - # Number of episodes to record before batch encoding videos - # Set to 1 for immediate encoding (default behavior), or higher for batched encoding - video_encoding_batch_size: int = 1 - # Video codec for encoding videos. Options: 'h264', 'hevc', 'libsvtav1', 'auto', - # or hardware-specific: 'h264_videotoolbox', 'h264_nvenc', 'h264_vaapi', 'h264_qsv'. - # Use 'auto' to auto-detect the best available hardware encoder. - vcodec: str = "libsvtav1" - # Enable streaming video encoding: encode frames in real-time during capture instead - # of writing PNG images first. Makes save_episode() near-instant. More info in the documentation: https://huggingface.co/docs/lerobot/streaming_video_encoding - streaming_encoding: bool = False - # Maximum number of frames to buffer per camera when using streaming encoding. - # ~1s buffer at 30fps. Provides backpressure if the encoder can't keep up. - encoder_queue_maxsize: int = 30 - # Number of threads per encoder instance. None = auto (codec default). - # Lower values reduce CPU usage, maps to 'lp' (via svtav1-params) for libsvtav1 and 'threads' for h264/hevc.. - encoder_threads: int | None = None - # Rename map for the observation to override the image and state keys - rename_map: dict[str, str] = field(default_factory=dict) - - def __post_init__(self): - if self.single_task is None: - raise ValueError("You need to provide a task as argument in `single_task`.") - - @dataclass class RecordConfig: robot: RobotConfig dataset: DatasetRecordConfig - # Whether to control the robot with a teleoperator + # Teleoperator to control the robot (required) teleop: TeleoperatorConfig | None = None - # Whether to control the robot with a policy - policy: PreTrainedConfig | None = None # Display all cameras on screen display_data: bool = False # Display data on a remote Rerun server @@ -234,27 +155,14 @@ class RecordConfig: play_sounds: bool = True # Resume recording on an existing dataset. resume: bool = False - # Action interpolation multiplier for smoother policy control (1=off, 2=2x, 3=3x) - # Only applies when using a policy (not teleop) - interpolation_multiplier: int = 1 def __post_init__(self): - # HACK: We parse again the cli args here to get the pretrained path if there was one. - policy_path = parser.get_path_arg("policy") - - if policy_path: - cli_overrides = parser.get_cli_overrides("policy") - - self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.pretrained_path = policy_path - - if self.teleop is None and self.policy is None: - raise ValueError("Choose a policy, a teleoperator or both to control the robot") - - @classmethod - def __get_path_fields__(cls) -> list[str]: - """This enables the parser to load config from the policy using `--policy.path=local/dir`""" - return ["policy"] + if self.teleop is None: + raise ValueError( + "A teleoperator is required for recording. " + "Use --teleop.type=... to specify one. " + "For policy-based deployment, use lerobot-rollout instead." + ) """ --------------- record_loop() data flow -------------------------- @@ -264,18 +172,14 @@ class RecordConfig: V [ robot_observation_processor ] ---> processed_obs V - .-----( ACTION LOGIC )------------------. - V V - [ From Teleoperator ] [ From Policy ] - | | - | [teleop.get_action] -> raw_action | [predict_action] - | | | | - | V | V - | [teleop_action_processor] | | - | | | | - '---> processed_teleop_action '---> processed_policy_action - | | - '-------------------------.-------------' + [ Teleoperator ] + | + | [teleop.get_action] -> raw_action + | | + | V + | [teleop_action_processor] + | | + '---> processed_teleop_action V [ robot_action_processor ] --> robot_action_to_send V @@ -303,13 +207,9 @@ def record_loop( ], # runs after robot dataset: LeRobotDataset | None = None, teleop: Teleoperator | list[Teleoperator] | None = None, - policy: PreTrainedPolicy | None = None, - preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]] | None = None, - postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction] | None = None, control_time_s: int | None = None, single_task: str | None = None, display_data: bool = False, - interpolator: ActionInterpolator | None = None, display_compressed_images: bool = False, ): if dataset is not None and dataset.fps != fps: @@ -340,21 +240,7 @@ def record_loop( "For multi-teleop, the list must contain exactly one KeyboardTeleop and one arm teleoperator. Currently only supported for LeKiwi robot." ) - # Reset policy and processor if they are provided - if policy is not None and preprocessor is not None and postprocessor is not None: - policy.reset() - preprocessor.reset() - postprocessor.reset() - - # Reset interpolator if provided - if interpolator is not None: - interpolator.reset() - - # Calculate control interval based on interpolation - use_interpolation = interpolator is not None and interpolator.enabled and policy is not None - control_interval = interpolator.get_control_interval(fps) if interpolator else 1 / fps - # Pre-compute action key order outside the hot loop — it won't change mid-episode. - action_keys = sorted(robot.action_features) if use_interpolation else [] + control_interval = 1 / fps no_action_count = 0 timestamp = 0 @@ -372,63 +258,11 @@ def record_loop( # Applies a pipeline to the raw robot observation, default is IdentityProcessor obs_processed = robot_observation_processor(obs) - if policy is not None or dataset is not None: + if dataset is not None: observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR) - # Track whether this iteration should be recorded to the dataset. - # Interpolated-only iterations send actions to the robot but don't record frames, - # keeping the dataset at the original fps while the robot moves at the higher rate. - is_record_frame = True - - # Get action from either policy or teleop - if policy is not None and preprocessor is not None and postprocessor is not None: - # With interpolation: only call policy when interpolator needs new action - if use_interpolation: - ran_inference = False - - if interpolator.needs_new_action(): - action_values = predict_action( - observation=observation_frame, - policy=policy, - device=get_safe_torch_device(policy.config.device), - preprocessor=preprocessor, - postprocessor=postprocessor, - use_amp=policy.config.use_amp, - task=single_task, - robot_type=robot.robot_type, - ) - act_processed_policy = make_robot_action(action_values, dataset.features) - robot_action_to_send = robot_action_processor((act_processed_policy, obs)) - - action_tensor = torch.tensor([robot_action_to_send[k] for k in action_keys]) - interpolator.add(action_tensor) - ran_inference = True - - interp_action = interpolator.get() - if interp_action is not None: - robot_action_to_send = {k: interp_action[i].item() for i, k in enumerate(action_keys)} - action_values = robot_action_to_send - else: - continue - - is_record_frame = ran_inference - else: - action_values = predict_action( - observation=observation_frame, - policy=policy, - device=get_safe_torch_device(policy.config.device), - preprocessor=preprocessor, - postprocessor=postprocessor, - use_amp=policy.config.use_amp, - task=single_task, - robot_type=robot.robot_type, - ) - act_processed_policy: RobotAction = make_robot_action(action_values, dataset.features) - # Applies a pipeline to the action, default is IdentityProcessor - robot_action_to_send = robot_action_processor((act_processed_policy, obs)) - action_values = robot_action_to_send - - elif policy is None and isinstance(teleop, Teleoperator): + # Get action from teleop + if isinstance(teleop, Teleoperator): act = teleop.get_action() if robot.name == "unitree_g1": teleop.send_feedback(obs) @@ -438,7 +272,7 @@ def record_loop( action_values = act_processed_teleop robot_action_to_send = robot_action_processor((act_processed_teleop, obs)) - elif policy is None and isinstance(teleop, list): + elif isinstance(teleop, list): arm_action = teleop_arm.get_action() arm_action = {f"arm_{k}": v for k, v in arm_action.items()} keyboard_action = teleop_keyboard.get_action() @@ -451,7 +285,7 @@ def record_loop( no_action_count += 1 if no_action_count == 1 or no_action_count % 10 == 0: logging.warning( - "No policy or teleoperator provided, skipping action generation. " + "No teleoperator provided, skipping action generation. " "This is likely to happen when resetting the environment without a teleop device. " "The robot won't be at its rest position at the start of the next episode." ) @@ -463,8 +297,8 @@ def record_loop( # TODO(steven, pepijn, adil): we should use a pipeline step to clip the action, so the sent action is the action that we input to the robot. _sent_action = robot.send_action(robot_action_to_send) - # Write to dataset (only on real policy frames, not interpolated-only iterations) - if dataset is not None and is_record_frame: + # Write to dataset + if dataset is not None: action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION) frame = {**observation_frame, **action_frame, "task": single_task} dataset.add_frame(frame) @@ -488,7 +322,12 @@ def record_loop( @parser.wrap() -def record(cfg: RecordConfig) -> LeRobotDataset: +def record( + cfg: RecordConfig, + teleop_action_processor: RobotProcessorPipeline | None = None, + robot_action_processor: RobotProcessorPipeline | None = None, + robot_observation_processor: RobotProcessorPipeline | None = None, +) -> LeRobotDataset: init_logging() logging.info(pformat(asdict(cfg))) if cfg.display_data: @@ -502,7 +341,16 @@ def record(cfg: RecordConfig) -> LeRobotDataset: robot = make_robot_from_config(cfg.robot) teleop = make_teleoperator_from_config(cfg.teleop) if cfg.teleop is not None else None - teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors() + # Fall back to identity pipelines when the caller doesn't supply processors. + if ( + teleop_action_processor is None + or robot_action_processor is None + or robot_observation_processor is None + ): + _t, _r, _o = make_default_processors() + teleop_action_processor = teleop_action_processor or _t + robot_action_processor = robot_action_processor or _r + robot_observation_processor = robot_observation_processor or _o dataset_features = combine_feature_dicts( aggregate_pipeline_dataset_features( @@ -540,8 +388,14 @@ def record(cfg: RecordConfig) -> LeRobotDataset: ) sanity_check_dataset_robot_compatibility(dataset, robot, cfg.dataset.fps, dataset_features) else: - # Create empty dataset or load existing saved episodes - sanity_check_dataset_name(cfg.dataset.repo_id, cfg.policy) + # Reject eval_ prefix — for policy evaluation use lerobot-rollout + repo_name = cfg.dataset.repo_id.split("/", 1)[-1] + if repo_name.startswith("eval_"): + raise ValueError( + "Dataset names starting with 'eval_' are reserved for policy evaluation. " + "lerobot-record is for data collection only. Use lerobot-rollout for policy deployment." + ) + cfg.dataset.stamp_repo_id() dataset = LeRobotDataset.create( cfg.dataset.repo_id, cfg.dataset.fps, @@ -558,30 +412,6 @@ def record(cfg: RecordConfig) -> LeRobotDataset: encoder_threads=cfg.dataset.encoder_threads, ) - # Load pretrained policy - policy = ( - None - if cfg.policy is None - else make_policy(cfg.policy, ds_meta=dataset.meta, rename_map=cfg.dataset.rename_map) - ) - preprocessor = None - postprocessor = None - interpolator = None - if cfg.policy is not None: - preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=cfg.policy, - pretrained_path=cfg.policy.pretrained_path, - dataset_stats=rename_stats(dataset.meta.stats, cfg.dataset.rename_map), - preprocessor_overrides={ - "device_processor": {"device": cfg.policy.device}, - "rename_observations_processor": {"rename_map": cfg.dataset.rename_map}, - }, - ) - # Create interpolator for smoother policy control - if cfg.interpolation_multiplier > 1: - interpolator = ActionInterpolator(multiplier=cfg.interpolation_multiplier) - logging.info(f"Action interpolation enabled: {cfg.interpolation_multiplier}x control rate") - robot.connect() if teleop is not None: teleop.connect() @@ -605,14 +435,10 @@ def record(cfg: RecordConfig) -> LeRobotDataset: robot_action_processor=robot_action_processor, robot_observation_processor=robot_observation_processor, teleop=teleop, - policy=policy, - preprocessor=preprocessor, - postprocessor=postprocessor, dataset=dataset, control_time_s=cfg.dataset.episode_time_s, single_task=cfg.dataset.single_task, display_data=cfg.display_data, - interpolator=interpolator, display_compressed_images=display_compressed_images, ) @@ -660,7 +486,10 @@ def record(cfg: RecordConfig) -> LeRobotDataset: listener.stop() if cfg.dataset.push_to_hub: - dataset.push_to_hub(tags=cfg.dataset.tags, private=cfg.dataset.private) + if dataset and dataset.num_episodes > 0: + dataset.push_to_hub(tags=cfg.dataset.tags, private=cfg.dataset.private) + else: + logging.warning("No episodes saved — skipping push to hub") log_say("Exiting", cfg.play_sounds) return dataset diff --git a/src/lerobot/scripts/lerobot_rollout.py b/src/lerobot/scripts/lerobot_rollout.py new file mode 100644 index 000000000..6a81563ee --- /dev/null +++ b/src/lerobot/scripts/lerobot_rollout.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Policy deployment engine with pluggable rollout strategies. + +``lerobot-rollout`` is the single CLI for running trained policies on +real robots. + +Strategies +---------- + --strategy.type=base Autonomous rollout, no recording + --strategy.type=sentry Continuous recording with auto-upload + --strategy.type=highlight Ring buffer + keystroke save + --strategy.type=dagger Human-in-the-loop (DAgger / RaC) + +Inference backends +------------------ + --inference.type=sync One policy call per control tick (default) + --inference.type=rtc Real-Time Chunking for slow VLA models + +Usage examples +-------------- +:: + + # Base mode — quick evaluation with sync inference + lerobot-rollout \\ + --strategy.type=base \\ + --policy.path=lerobot/act_koch_real \\ + --robot.type=koch_follower \\ + --robot.port=/dev/ttyACM0 \\ + --task="pick up cube" --duration=30 + + # Base mode — RTC inference for slow VLAs (Pi0, Pi0.5, SmolVLA) + lerobot-rollout \\ + --strategy.type=base \\ + --policy.path=lerobot/pi0_base \\ + --inference.type=rtc \\ + --inference.rtc.execution_horizon=10 \\ + --inference.rtc.max_guidance_weight=10.0 \\ + --robot.type=so100_follower \\ + --robot.port=/dev/ttyACM0 \\ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \\ + --task="pick up cube" --duration=60 + + # Sentry mode — continuous recording with periodic upload + lerobot-rollout \\ + --strategy.type=sentry \\ + --strategy.upload_every_n_episodes=5 \\ + --policy.path=lerobot/pi0_base \\ + --inference.type=rtc \\ + --robot.type=so100_follower \\ + --robot.port=/dev/ttyACM0 \\ + --dataset.repo_id=user/rollout_sentry_data \\ + --dataset.single_task="patrol" --duration=3600 + + # Highlight mode — ring buffer, press 's' to save, 'h' to push + lerobot-rollout \\ + --strategy.type=highlight \\ + --strategy.ring_buffer_seconds=30 \\ + --policy.path=lerobot/act_koch_real \\ + --robot.type=koch_follower \\ + --robot.port=/dev/ttyACM0 \\ + --dataset.repo_id=user/rollout_highlight_data \\ + --dataset.single_task="pick up cube" + + # DAgger mode — human-in-the-loop corrections only + lerobot-rollout \\ + --strategy.type=dagger \\ + --strategy.num_episodes=20 \\ + --policy.path=outputs/pretrain/checkpoints/last/pretrained_model \\ + --robot.type=bi_openarm_follower \\ + --teleop.type=openarm_mini \\ + --dataset.repo_id=user/rollout_hil_data \\ + --dataset.single_task="Fold the T-shirt" + + # DAgger mode — continuous recording with RTC inference + lerobot-rollout \\ + --strategy.type=dagger \\ + --strategy.record_autonomous=true \\ + --strategy.num_episodes=50 \\ + --inference.type=rtc \\ + --inference.rtc.execution_horizon=10 \\ + --policy.path=user/my_pi0_policy \\ + --robot.type=so100_follower \\ + --robot.port=/dev/ttyACM0 \\ + --teleop.type=so101_leader \\ + --teleop.port=/dev/ttyACM1 \\ + --dataset.repo_id=user/rollout_dagger_rtc_data \\ + --dataset.single_task="Grasp the block" + + # With Rerun visualization and torch.compile + lerobot-rollout \\ + --strategy.type=base \\ + --policy.path=lerobot/act_koch_real \\ + --robot.type=koch_follower \\ + --robot.port=/dev/ttyACM0 \\ + --task="pick up cube" --duration=60 \\ + --display_data=true \\ + --use_torch_compile=true + + # Resume a previous sentry recording session + lerobot-rollout \\ + --strategy.type=sentry \\ + --policy.path=user/my_policy \\ + --robot.type=so100_follower \\ + --robot.port=/dev/ttyACM0 \\ + --dataset.repo_id=user/rollout_sentry_data \\ + --dataset.single_task="patrol" \\ + --resume=true +""" + +import logging + +from lerobot.cameras.opencv import OpenCVCameraConfig # noqa: F401 +from lerobot.cameras.realsense import RealSenseCameraConfig # noqa: F401 +from lerobot.cameras.zmq import ZMQCameraConfig # noqa: F401 +from lerobot.configs import parser +from lerobot.robots import ( # noqa: F401 + Robot, + RobotConfig, + bi_openarm_follower, + bi_so_follower, + earthrover_mini_plus, + hope_jr, + koch_follower, + omx_follower, + openarm_follower, + reachy2, + so_follower, + unitree_g1 as unitree_g1_robot, +) +from lerobot.rollout import RolloutConfig, build_rollout_context, create_strategy +from lerobot.teleoperators import ( # noqa: F401 + Teleoperator, + TeleoperatorConfig, + bi_openarm_leader, + bi_so_leader, + homunculus, + koch_leader, + omx_leader, + openarm_leader, + openarm_mini, + reachy2_teleoperator, + so_leader, + unitree_g1, +) +from lerobot.utils.import_utils import register_third_party_plugins +from lerobot.utils.process import ProcessSignalHandler +from lerobot.utils.utils import init_logging +from lerobot.utils.visualization_utils import init_rerun + +logger = logging.getLogger(__name__) + + +@parser.wrap() +def rollout(cfg: RolloutConfig): + """Main entry point for policy deployment.""" + init_logging() + + if cfg.display_data: + logger.info("Initializing Rerun visualization (ip=%s, port=%s)", cfg.display_ip, cfg.display_port) + init_rerun(session_name="rollout", ip=cfg.display_ip, port=cfg.display_port) + + signal_handler = ProcessSignalHandler(use_threads=True, display_pid=False) + shutdown_event = signal_handler.shutdown_event + + logger.info("Building rollout context...") + ctx = build_rollout_context(cfg, shutdown_event) + + strategy = create_strategy(cfg.strategy) + logger.info("Rollout strategy: %s", cfg.strategy.type) + logger.info( + "Robot: %s | FPS: %.0f | Duration: %s", + cfg.robot.type if cfg.robot else "?", + cfg.fps, + f"{cfg.duration}s" if cfg.duration > 0 else "infinite", + ) + + try: + strategy.setup(ctx) + logger.info("Rollout setup complete, starting rollout...") + strategy.run(ctx) + except KeyboardInterrupt: + logger.info("Interrupted by user") + finally: + strategy.teardown(ctx) + + logger.info("Rollout finished") + + +def main(): + """CLI entry point for ``lerobot-rollout``.""" + register_third_party_plugins() + rollout() + + +if __name__ == "__main__": + main() diff --git a/src/lerobot/scripts/lerobot_train.py b/src/lerobot/scripts/lerobot_train.py index ec923bcc1..ac4893a42 100644 --- a/src/lerobot/scripts/lerobot_train.py +++ b/src/lerobot/scripts/lerobot_train.py @@ -47,6 +47,7 @@ from lerobot.datasets import EpisodeAwareSampler, make_dataset from lerobot.envs import close_envs, make_env, make_env_pre_post_processors from lerobot.optim.factory import make_optimizer_and_scheduler from lerobot.policies import PreTrainedPolicy, make_policy, make_pre_post_processors +from lerobot.rewards import make_reward_pre_post_processors from lerobot.utils.collate import lerobot_collate_fn from lerobot.utils.import_utils import register_third_party_plugins from lerobot.utils.logging_utils import AverageMeter, MetricsTracker @@ -71,8 +72,8 @@ def update_policy( accelerator: "Accelerator", lr_scheduler=None, lock=None, - rabc_weights_provider=None, -) -> tuple[MetricsTracker, dict]: + sample_weighter=None, +) -> tuple[MetricsTracker, dict | None]: """ Performs a single training step to update the policy's weights. @@ -88,7 +89,7 @@ def update_policy( accelerator: The Accelerator instance for distributed training and mixed precision. lr_scheduler: An optional learning rate scheduler. lock: An optional lock for thread-safe optimizer updates. - rabc_weights_provider: Optional RABCWeights instance for sample weighting. + sample_weighter: Optional SampleWeighter instance for per-sample loss weighting. Returns: A tuple containing: @@ -98,27 +99,31 @@ def update_policy( start_time = time.perf_counter() policy.train() - # Get RA-BC weights if enabled - rabc_batch_weights = None - rabc_batch_stats = None - if rabc_weights_provider is not None: - rabc_batch_weights, rabc_batch_stats = rabc_weights_provider.compute_batch_weights(batch) + # Compute sample weights if a weighter is provided + sample_weights = None + weight_stats = None + if sample_weighter is not None: + sample_weights, weight_stats = sample_weighter.compute_batch_weights(batch) # Let accelerator handle mixed precision with accelerator.autocast(): - # Use per-sample loss when RA-BC is enabled for proper weighting - if rabc_batch_weights is not None: - # Get per-sample losses + if sample_weights is not None: + # Use per-sample loss for weighted training + # Note: Policies supporting sample weighting must implement forward(batch, reduction="none") per_sample_loss, output_dict = policy.forward(batch, reduction="none") - # Apply RA-BC weights: L_RA-BC = Σ(w_i * l_i) / (Σw_i + ε) - # rabc_batch_weights is already normalized to sum to batch_size + # Weighted loss: each sample's contribution is scaled by its weight. + # We divide by weight sum (not batch size) so that if some weights are zero, + # the remaining samples contribute proportionally more, preserving gradient scale. + # Weights are pre-normalized to sum to batch_size for stable training dynamics. epsilon = 1e-6 - loss = (per_sample_loss * rabc_batch_weights).sum() / (rabc_batch_weights.sum() + epsilon) - # Log raw mean weight (before normalization) - this is the meaningful metric - output_dict["rabc_mean_weight"] = rabc_batch_stats["raw_mean_weight"] - output_dict["rabc_num_zero_weight"] = rabc_batch_stats["num_zero_weight"] - output_dict["rabc_num_full_weight"] = rabc_batch_stats["num_full_weight"] + loss = (per_sample_loss * sample_weights).sum() / (sample_weights.sum() + epsilon) + + # Log weighting statistics + if output_dict is None: + output_dict = {} + for key, value in weight_stats.items(): + output_dict[f"sample_weight_{key}"] = value else: loss, output_dict = policy.forward(batch) @@ -189,8 +194,8 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) # Accelerate auto-detects the device based on the available hardware and ignores the policy.device setting. - # Force the device to be CPU when policy.device is set to CPU. - force_cpu = cfg.policy.device == "cpu" + # Force the device to be CPU when the active config's device is set to CPU (works for both policy and reward model training). + force_cpu = cfg.trainable_config.device == "cpu" accelerator = Accelerator( step_scheduler_with_optimizer=False, kwargs_handlers=[ddp_kwargs], @@ -246,26 +251,49 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): logging.info("Creating env") eval_env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs) - if is_main_process: - logging.info("Creating policy") - policy = make_policy( - cfg=cfg.policy, - ds_meta=dataset.meta, - rename_map=cfg.rename_map, - ) + if cfg.is_reward_model_training: + if is_main_process: + logging.info("Creating reward model") + from lerobot.rewards import make_reward_model + + policy = make_reward_model( + cfg=cfg.reward_model, + dataset_stats=dataset.meta.stats, + dataset_meta=dataset.meta, + ) + if not policy.is_trainable: + raise ValueError( + f"Reward model '{policy.name}' is zero-shot and cannot be trained via lerobot-train. " + "Use it directly for inference via compute_reward() (e.g. offline precompute)." + ) + else: + if is_main_process: + logging.info("Creating policy") + policy = make_policy( + cfg=cfg.policy, + ds_meta=dataset.meta, + rename_map=cfg.rename_map, + ) if cfg.peft is not None: - logging.info("Using PEFT! Wrapping model.") - # Convert CLI peft config to dict for overrides - peft_cli_overrides = dataclasses.asdict(cfg.peft) - policy = policy.wrap_with_peft(peft_cli_overrides=peft_cli_overrides) + if cfg.is_reward_model_training: + raise ValueError("PEFT is only supported for policy training. ") + from peft import PeftModel - # Wait for all processes to finish policy creation before continuing + if isinstance(policy, PeftModel): + logging.info("PEFT adapter already loaded from checkpoint, skipping wrap_with_peft.") + else: + logging.info("Using PEFT! Wrapping model.") + peft_cli_overrides = dataclasses.asdict(cfg.peft) + policy = policy.wrap_with_peft(peft_cli_overrides=peft_cli_overrides) + + # Wait for all processes to finish model creation before continuing accelerator.wait_for_everyone() - processor_pretrained_path = cfg.policy.pretrained_path + active_cfg = cfg.trainable_config + processor_pretrained_path = active_cfg.pretrained_path if ( - getattr(cfg.policy, "use_relative_actions", False) + getattr(active_cfg, "use_relative_actions", False) and processor_pretrained_path is not None and not cfg.resume ): @@ -275,18 +303,15 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): ) processor_pretrained_path = None - # Create processors - only provide dataset_stats if not resuming from saved processors processor_kwargs = {} postprocessor_kwargs = {} if (processor_pretrained_path and not cfg.resume) or not processor_pretrained_path: - # Only provide dataset_stats when not resuming from saved processor state processor_kwargs["dataset_stats"] = dataset.meta.stats - # For SARM, always provide dataset_meta for progress normalization - if cfg.policy.type == "sarm": + if cfg.is_reward_model_training: processor_kwargs["dataset_meta"] = dataset.meta - if processor_pretrained_path is not None: + if not cfg.is_reward_model_training and processor_pretrained_path is not None: processor_kwargs["preprocessor_overrides"] = { "device_processor": {"device": device.type}, "normalizer_processor": { @@ -306,38 +331,36 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): }, } - preprocessor, postprocessor = make_pre_post_processors( - policy_cfg=cfg.policy, - pretrained_path=processor_pretrained_path, - **processor_kwargs, - **postprocessor_kwargs, - ) + if cfg.is_reward_model_training: + preprocessor, postprocessor = make_reward_pre_post_processors( + cfg.reward_model, + **processor_kwargs, + ) + else: + preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=cfg.policy, + pretrained_path=processor_pretrained_path, + **processor_kwargs, + **postprocessor_kwargs, + ) if is_main_process: logging.info("Creating optimizer and scheduler") optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy) - # Load precomputed SARM progress for RA-BC if enabled - # Generate progress using: src/lerobot/policies/sarm/compute_rabc_weights.py - rabc_weights = None - if cfg.use_rabc: - from lerobot.utils.rabc import RABCWeights + # Create sample weighter if configured (e.g., for RA-BC training) + sample_weighter = None + if cfg.sample_weighting is not None: + from lerobot.utils.sample_weighting import make_sample_weighter - # Get chunk_size from policy config - chunk_size = getattr(policy.config, "chunk_size", None) - if chunk_size is None: - raise ValueError("Chunk size is not found in policy config") - - head_mode = getattr(cfg, "rabc_head_mode", "sparse") - logging.info(f"Loading SARM progress for RA-BC from {cfg.rabc_progress_path}") - logging.info(f"Using chunk_size={chunk_size} from policy config, head_mode={head_mode}") - rabc_weights = RABCWeights( - progress_path=cfg.rabc_progress_path, - chunk_size=chunk_size, - head_mode=head_mode, - kappa=getattr(cfg, "rabc_kappa", 0.01), - epsilon=getattr(cfg, "rabc_epsilon", 1e-6), - device=device, + if is_main_process: + logging.info(f"Creating sample weighter: {cfg.sample_weighting.type}") + sample_weighter = make_sample_weighter( + cfg.sample_weighting, + policy, + device, + dataset_root=cfg.dataset.root, + dataset_repo_id=cfg.dataset.repo_id, ) step = 0 # number of policy updates (forward + backward + optim) @@ -366,13 +389,13 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})") # create dataloader for offline training - if hasattr(cfg.policy, "drop_n_last_frames"): + if hasattr(active_cfg, "drop_n_last_frames"): shuffle = False sampler = EpisodeAwareSampler( dataset.meta.episodes["dataset_from_index"], dataset.meta.episodes["dataset_to_index"], episode_indices_to_use=dataset.episodes, - drop_n_last_frames=cfg.policy.drop_n_last_frames, + drop_n_last_frames=active_cfg.drop_n_last_frames, shuffle=True, ) else: @@ -450,7 +473,7 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): cfg.optimizer.grad_clip_norm, accelerator=accelerator, lr_scheduler=lr_scheduler, - rabc_weights_provider=rabc_weights, + sample_weighter=sample_weighter, ) # Note: eval and checkpoint happens *after* the `step`th training update has completed, so we @@ -469,16 +492,10 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): wandb_log_dict = train_tracker.to_dict() if output_dict: wandb_log_dict.update(output_dict) - # Log RA-BC statistics if enabled - if rabc_weights is not None: - rabc_stats = rabc_weights.get_stats() - wandb_log_dict.update( - { - "rabc_delta_mean": rabc_stats["delta_mean"], - "rabc_delta_std": rabc_stats["delta_std"], - "rabc_num_frames": rabc_stats["num_frames"], - } - ) + # Log sample weighting statistics if enabled + if sample_weighter is not None: + weighter_stats = sample_weighter.get_stats() + wandb_log_dict.update({f"sample_weighting/{k}": v for k, v in weighter_stats.items()}) wandb_logger.log_dict(wandb_log_dict, step) train_tracker.reset_averages() @@ -560,14 +577,15 @@ def train(cfg: TrainPipelineConfig, accelerator: "Accelerator | None" = None): if is_main_process: logging.info("End of training") - if cfg.policy.push_to_hub: - unwrapped_policy = accelerator.unwrap_model(policy) - if cfg.policy.use_peft: - unwrapped_policy.push_model_to_hub(cfg, peft_model=unwrapped_policy) + if getattr(active_cfg, "push_to_hub", False): + unwrapped_model = accelerator.unwrap_model(policy) + # PEFT only applies when training a policy — reward models use the plain path. + if not cfg.is_reward_model_training and cfg.policy.use_peft: + unwrapped_model.push_model_to_hub(cfg, peft_model=unwrapped_model) else: - unwrapped_policy.push_model_to_hub(cfg) - preprocessor.push_to_hub(cfg.policy.repo_id) - postprocessor.push_to_hub(cfg.policy.repo_id) + unwrapped_model.push_model_to_hub(cfg) + preprocessor.push_to_hub(active_cfg.repo_id) + postprocessor.push_to_hub(active_cfg.repo_id) # Properly clean up the distributed process group accelerator.wait_for_everyone() diff --git a/src/lerobot/teleoperators/openarm_mini/openarm_mini.py b/src/lerobot/teleoperators/openarm_mini/openarm_mini.py index edc28e7a6..2df1dfcfe 100644 --- a/src/lerobot/teleoperators/openarm_mini/openarm_mini.py +++ b/src/lerobot/teleoperators/openarm_mini/openarm_mini.py @@ -112,7 +112,7 @@ class OpenArmMini(Teleoperator): @property def feedback_features(self) -> dict[str, type]: - return {} + return self.action_features @property def is_connected(self) -> bool: @@ -348,8 +348,9 @@ class OpenArmMini(Teleoperator): if left_goals: self.bus_left.sync_write("Goal_Position", left_goals) + @check_if_not_connected def send_feedback(self, feedback: dict[str, float]) -> None: - raise NotImplementedError("Feedback is not yet implemented for OpenArm Mini.") + self.write_goal_positions(feedback) @check_if_not_connected def disconnect(self) -> None: diff --git a/src/lerobot/teleoperators/so_leader/so_leader.py b/src/lerobot/teleoperators/so_leader/so_leader.py index 04ce0f21f..7e731d5ed 100644 --- a/src/lerobot/teleoperators/so_leader/so_leader.py +++ b/src/lerobot/teleoperators/so_leader/so_leader.py @@ -59,7 +59,7 @@ class SOLeader(Teleoperator): @property def feedback_features(self) -> dict[str, type]: - return {} + return self.action_features @property def is_connected(self) -> bool: @@ -130,6 +130,12 @@ class SOLeader(Teleoperator): for motor in self.bus.motors: self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) + def enable_torque(self) -> None: + self.bus.enable_torque() + + def disable_torque(self) -> None: + self.bus.disable_torque() + def setup_motors(self) -> None: for motor in reversed(self.bus.motors): input(f"Connect the controller board to the '{motor}' motor only and press enter.") @@ -145,9 +151,11 @@ class SOLeader(Teleoperator): logger.debug(f"{self} read action: {dt_ms:.1f}ms") return action + @check_if_not_connected def send_feedback(self, feedback: dict[str, float]) -> None: - # TODO: Implement force feedback - raise NotImplementedError + goals = {k.removesuffix(".pos"): v for k, v in feedback.items() if k.endswith(".pos")} + if goals: + self.bus.sync_write("Goal_Position", goals) @check_if_not_connected def disconnect(self) -> None: diff --git a/src/lerobot/templates/lerobot_rewardmodel_modelcard_template.md b/src/lerobot/templates/lerobot_rewardmodel_modelcard_template.md new file mode 100644 index 000000000..933bf7586 --- /dev/null +++ b/src/lerobot/templates/lerobot_rewardmodel_modelcard_template.md @@ -0,0 +1,55 @@ +--- +# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/model-cards +# prettier-ignore +{{card_data}} +--- + +# Reward Model Card for {{ model_name | default("Reward Model ID", true) }} + + + +{% if model_name == "reward_classifier" %} +A reward classifier is a lightweight neural network that scores observations or trajectories for task success, providing a learned reward signal or offline evaluation when explicit rewards are unavailable. +{% elif model_name == "sarm" %} +A Success-Aware Reward Model (SARM) predicts a dense reward signal from observations, typically used downstream for reinforcement learning or human-in-the-loop fine-tuning when task success is not directly observable. +{% else %} +_Reward model type not recognized — please update this template._ +{% endif %} + +This reward model has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). +See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index). + +--- + +## How to Get Started with the Reward Model + +### Train from scratch + +```bash +lerobot-train \ + --dataset.repo_id=${HF_USER}/ \ + --reward_model.type={{ model_name | default("reward_classifier", true) }} \ + --output_dir=outputs/train/ \ + --job_name=lerobot_reward_training \ + --reward_model.device=cuda \ + --reward_model.repo_id=${HF_USER}/ \ + --wandb.enable=true +``` + +_Writes checkpoints to `outputs/train//checkpoints/`._ + +### Load the reward model in Python + +```python +from lerobot.rewards import make_reward_model + +reward_model = make_reward_model(pretrained_path="/") +reward = reward_model.compute_reward(batch) +``` + +--- + +## Model Details + +- **License:** {{ license | default("\[More Information Needed]", true) }} diff --git a/src/lerobot/utils/action_interpolator.py b/src/lerobot/utils/action_interpolator.py new file mode 100644 index 000000000..222dc33b5 --- /dev/null +++ b/src/lerobot/utils/action_interpolator.py @@ -0,0 +1,116 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Action interpolation for smoother robot control. + +Provides configurable Nx control rate by interpolating between consecutive actions. +Useful with RTC and action-chunking policies to reduce jerkiness. +""" + +from torch import Tensor + + +class ActionInterpolator: + """Interpolates between consecutive actions for smoother control. + + When enabled with multiplier N, produces N actions per policy action + by linearly interpolating between the previous and current action. + + Example with multiplier=3: + prev_action -> [1/3 interpolated, 2/3 interpolated, current_action] + + This effectively multiplies the control rate for smoother motion. + + Usage: + interpolator = ActionInterpolator(multiplier=2) # 2x control rate + + # In control loop: + if interpolator.needs_new_action(): + new_action = queue.get() + if new_action: + interpolator.add(new_action.cpu()) + + action = interpolator.get() + if action: + robot.send_action(action) + """ + + def __init__(self, multiplier: int = 1): + """Initialize the interpolator. + + Args: + multiplier: Control rate multiplier (1 = no interpolation, 2 = 2x, 3 = 3x, etc.) + """ + if multiplier < 1: + raise ValueError(f"multiplier must be >= 1, got {multiplier}") + self.multiplier = multiplier + self._prev: Tensor | None = None + self._buffer: list[Tensor] = [] + self._idx = 0 + + @property + def enabled(self) -> bool: + """Whether interpolation is active (multiplier > 1).""" + return self.multiplier > 1 + + def reset(self): + """Reset interpolation state (call between episodes).""" + self._prev = None + self._buffer = [] + self._idx = 0 + + def needs_new_action(self) -> bool: + """Check if a new action is needed from the queue.""" + return self._idx >= len(self._buffer) + + def add(self, action: Tensor) -> None: + """Add a new action and compute interpolated sequence. + + Args: + action: New action tensor from policy/queue (already on CPU). + """ + if self.multiplier > 1 and self._prev is not None: + self._buffer = [] + for i in range(1, self.multiplier + 1): + t = i / self.multiplier + interp = self._prev + t * (action - self._prev) + self._buffer.append(interp) + else: + # First step: no previous action yet, so run at base FPS without interpolation. + self._buffer = [action.clone()] + self._prev = action.clone() + self._idx = 0 + + def get(self) -> Tensor | None: + """Get the next interpolated action. + + Returns: + Next action tensor, or None if buffer is exhausted. + """ + if self._idx >= len(self._buffer): + return None + action = self._buffer[self._idx] + self._idx += 1 + return action + + def get_control_interval(self, fps: float) -> float: + """Get the control interval based on interpolation multiplier. + + Args: + fps: Base frames per second. + + Returns: + Control interval in seconds (divided by multiplier). + """ + return 1.0 / (fps * self.multiplier) diff --git a/src/lerobot/utils/import_utils.py b/src/lerobot/utils/import_utils.py index 1ec0b6375..bfa87fb86 100644 --- a/src/lerobot/utils/import_utils.py +++ b/src/lerobot/utils/import_utils.py @@ -115,7 +115,9 @@ _feetech_sdk_available = is_package_available("feetech-servo-sdk", import_name=" _reachy2_sdk_available = is_package_available("reachy2_sdk") _can_available = is_package_available("python-can", "can") _unitree_sdk_available = is_package_available("unitree-sdk2py", "unitree_sdk2py") -_pyrealsense2_available = is_package_available("pyrealsense2") +_pyrealsense2_available = is_package_available("pyrealsense2") or is_package_available( + "pyrealsense2-macosx", import_name="pyrealsense2" +) _zmq_available = is_package_available("pyzmq", import_name="zmq") _hebi_available = is_package_available("hebi-py", import_name="hebi") _teleop_available = is_package_available("teleop") diff --git a/src/lerobot/utils/pedal.py b/src/lerobot/utils/pedal.py new file mode 100644 index 000000000..88f3db1f9 --- /dev/null +++ b/src/lerobot/utils/pedal.py @@ -0,0 +1,83 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generic foot pedal listener using evdev. + +Callers supply a callback receiving the pressed key code (e.g. ``"KEY_A"``) +and an optional device path. The listener runs in a daemon thread and +silently no-ops when :mod:`evdev` is not installed or the device is +unavailable. Strategy-specific key mapping logic lives in the caller. +""" + +from __future__ import annotations + +import logging +import threading +from collections.abc import Callable + +logger = logging.getLogger(__name__) + +DEFAULT_PEDAL_DEVICE = "/dev/input/by-id/usb-PCsensor_FootSwitch-event-kbd" + + +def start_pedal_listener( + on_press: Callable[[str], None], + device_path: str = DEFAULT_PEDAL_DEVICE, +) -> threading.Thread | None: + """Spawn a daemon thread that forwards pedal key-press codes to ``on_press``. + + Parameters + ---------- + on_press: + Callback invoked with the pressed key code string (e.g. ``"KEY_A"``) + on each pedal press event. The callback runs in the listener thread + and must be thread-safe. + device_path: + Linux input device path (e.g. ``/dev/input/by-id/...``). + + Returns + ------- + The started daemon :class:`threading.Thread`, or ``None`` when + :mod:`evdev` is not installed (optional dependency; silent no-op). + """ + try: + from evdev import InputDevice, categorize, ecodes + except ImportError: + return None + + def pedal_reader() -> None: + try: + dev = InputDevice(device_path) + logger.info("Pedal connected: %s", dev.name) + for ev in dev.read_loop(): + if ev.type != ecodes.EV_KEY: + continue + key = categorize(ev) + code = key.keycode + if isinstance(code, (list, tuple)): + code = code[0] + if key.keystate != 1: # only key-down events + continue + try: + on_press(code) + except Exception as cb_err: # pragma: no cover - defensive + logger.warning("Pedal callback error: %s", cb_err) + except (FileNotFoundError, PermissionError): + pass + except Exception as e: + logger.warning("Pedal error: %s", e) + + thread = threading.Thread(target=pedal_reader, daemon=True, name="PedalListener") + thread.start() + return thread diff --git a/src/lerobot/rl/process.py b/src/lerobot/utils/process.py similarity index 100% rename from src/lerobot/rl/process.py rename to src/lerobot/utils/process.py diff --git a/src/lerobot/utils/sample_weighting.py b/src/lerobot/utils/sample_weighting.py new file mode 100644 index 000000000..83eec3126 --- /dev/null +++ b/src/lerobot/utils/sample_weighting.py @@ -0,0 +1,239 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Sample weighting abstraction for training. + +This module provides an abstract base class for sample weighting strategies (e.g., RA-BC) +that can be used during training without polluting the training script with +policy-specific code. + +Example usage: + # In training config + sample_weighting: + type: rabc + progress_path: hf://datasets/my-dataset/sarm_progress.parquet + head_mode: sparse + kappa: 0.01 + + # In training script + sample_weighter = make_sample_weighter(cfg.sample_weighting, policy, device, dataset_root=cfg.dataset.root, dataset_repo_id=cfg.dataset.repo_id) + ... + weights, stats = sample_weighter.compute_batch_weights(batch) +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from pathlib import Path +from typing import TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from lerobot.policies.pretrained import PreTrainedPolicy + + +class SampleWeighter(ABC): + """ + Implementations compute per-sample weights that can be used to weight + the loss during training. This enables techniques like: + - RA-BC (Reward-Aligned Behavior Cloning) + - Importance sampling + - Curriculum learning + - Quality-based filtering + """ + + @abstractmethod + def compute_batch_weights(self, batch: dict) -> tuple[torch.Tensor, dict]: + """ + Compute per-sample weights for a training batch. + + Args: + batch: Training batch dictionary containing at minimum an "index" key + with global frame indices. + """ + + @abstractmethod + def get_stats(self) -> dict: + """ + Get global statistics about the weighting strategy. + """ + + +@dataclass +class SampleWeightingConfig: + """ + Configuration for sample weighting during training. + + This is a generic config that supports multiple weighting strategies. + The `type` field determines which implementation to use, and `extra_params` + contains additional type-specific parameters. + + Attributes: + type: Weighting strategy type ("rabc", "uniform", etc.) + progress_path: Path to precomputed progress values (for RABC) + head_mode: Which model head to use for progress ("sparse" or "dense") + kappa: Hard threshold for high-quality samples (RABC-specific) + epsilon: Small constant for numerical stability + extra_params: Additional type-specific parameters passed to the weighter + """ + + type: str = "rabc" + progress_path: str | None = None + head_mode: str = "sparse" + kappa: float = 0.01 + epsilon: float = 1e-6 + # Additional type-specific params can be added here or passed via extra_params + extra_params: dict = field(default_factory=dict) + + +def make_sample_weighter( + config: SampleWeightingConfig | None, + policy: PreTrainedPolicy, + device: torch.device, + dataset_root: str | None = None, + dataset_repo_id: str | None = None, +) -> SampleWeighter | None: + """ + Factory function to create a SampleWeighter from config. + + This keeps policy-specific initialization logic out of the training script. + + Args: + config: Sample weighting configuration, or None to disable weighting. + policy: The policy being trained (used to extract chunk_size, etc.) + device: Device to place weight tensors on. + dataset_root: Local path to dataset root (for auto-detecting progress_path). + dataset_repo_id: HuggingFace repo ID (for auto-detecting progress_path). + """ + if config is None: + return None + + if config.type == "rabc": + return _make_rabc_weighter(config, policy, device, dataset_root, dataset_repo_id) + + if config.type == "uniform": + # No-op weighter that returns uniform weights + return UniformWeighter(device=device) + + raise ValueError(f"Unknown sample weighting type: '{config.type}'. Supported types: 'rabc', 'uniform'") + + +def _make_rabc_weighter( + config: SampleWeightingConfig, + policy: PreTrainedPolicy, + device: torch.device, + dataset_root: str | None = None, + dataset_repo_id: str | None = None, +) -> SampleWeighter: + """Create RABC weighter with policy-specific initialization. + + Args: + config: Sample weighting configuration. + policy: The policy being trained (used to extract chunk_size). + device: Device to place weight tensors on. + dataset_root: Local path to dataset root (for auto-detecting progress_path). + dataset_repo_id: HuggingFace repo ID (for auto-detecting progress_path). + """ + # Import here to avoid circular imports and keep RABC code in SARM module + from lerobot.rewards.sarm.rabc import RABCWeights + + # Extract chunk_size from policy config + chunk_size = getattr(policy.config, "chunk_size", None) + if chunk_size is None: + raise ValueError( + "RABC sample weighting requires a policy with 'chunk_size' in its config. " + "This is typically set for action-chunking policies like ACT, Diffusion, PI0, etc." + ) + + # Determine progress_path: use explicit config or auto-detect from dataset + progress_path = config.progress_path + if progress_path is None: + if dataset_root: + progress_path = str(Path(dataset_root) / "sarm_progress.parquet") + elif dataset_repo_id: + progress_path = f"hf://datasets/{dataset_repo_id}/sarm_progress.parquet" + else: + raise ValueError( + "RABC sample weighting requires 'progress_path' to be set, " + "or dataset_root/dataset_repo_id for auto-detection. " + "Generate progress values using: " + "python -m lerobot.rewards.sarm.compute_rabc_weights --help" + ) + + return RABCWeights( + progress_path=progress_path, + chunk_size=chunk_size, + head_mode=config.head_mode, + kappa=config.kappa, + epsilon=config.epsilon, + device=device, + **config.extra_params, + ) + + +class UniformWeighter(SampleWeighter): + """ + No-op sample weighter that returns uniform weights. + + Useful as a baseline or when you want to disable weighting without + changing the training code structure. + + Note: + Batch size is determined by looking for tensor values in the batch + dictionary. The method checks common keys like "action", "index", + and "observation.state" first, then falls back to scanning all values. + """ + + def __init__(self, device: torch.device): + self.device = device + + def compute_batch_weights(self, batch: dict) -> tuple[torch.Tensor, dict]: + """Return uniform weights (all ones).""" + batch_size = self._determine_batch_size(batch) + + weights = torch.ones(batch_size, device=self.device) + stats = {"mean_weight": 1.0, "type": "uniform"} + return weights, stats + + def _determine_batch_size(self, batch: dict) -> int: + """ + Determine batch size from the batch dictionary. + + Checks common keys first, then scans all values for tensors. + + Args: + batch: Training batch dictionary. + """ + if not batch: + raise ValueError("Cannot determine batch size from empty batch") + + # Check common keys first + for key in ["action", "index", "observation.state"]: + if key in batch and isinstance(batch[key], torch.Tensor): + return batch[key].shape[0] + + # Scan all values for any tensor + for value in batch.values(): + if isinstance(value, torch.Tensor) and value.ndim >= 1: + return value.shape[0] + + # Last resort: return 1 (this handles non-tensor batches) + return 1 + + def get_stats(self) -> dict: + """Return empty stats for uniform weighting.""" + return {"type": "uniform"} diff --git a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors index dd7d4d0e7..5584b8643 100644 --- a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors +++ b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2b8f8532c7a0b776de5e536b8b54e30b1a0c2e3d5cc25a2d86fe43e40ae5e8c +oid sha256:8a31653c11eccdd4d80fd3f6a351cd54c49b8a48db1f7e9faf38fddd7900a09f size 515400 diff --git a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors index c58bb44bc..3baa80ba7 100644 --- a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors +++ b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:224b5fa4828aa88171b68c036e8919c1eae563e2113f03b6461eadf5bf8525a6 +oid sha256:75bf051698b37dcd7517ec8025a896ab5a0551a6dde5f89d0a3d5d50966e83e6 size 31672 diff --git a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors index 9b6ef7f5d..f3d1ff59a 100644 --- a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors +++ b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:016d2fa8fe5f58017dfd46f4632fdc19dfd751e32a2c7cde2077c6f95546d6bd +oid sha256:88e10930a10041d50f2cf369e6813ac14618d13dad1c21bdde1ac7798611c6ba size 68 diff --git a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors index 5da67a1af..bdc26816f 100644 --- a/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors +++ b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eca0d87a699620e4fec7e68539b0be91e4cc933f6bf12032da52c182ab6f38cf +oid sha256:89833a5ccdb7d85c83f717ff8ec68b8e822005cb8803899acaae88c578e2e3ae size 31672 diff --git a/tests/configs/test_recipe.py b/tests/configs/test_recipe.py index bd71d540f..ba0ad9117 100644 --- a/tests/configs/test_recipe.py +++ b/tests/configs/test_recipe.py @@ -1,7 +1,5 @@ #!/usr/bin/env python -from pathlib import Path - import pytest from lerobot.configs.recipe import MessageTurn, TrainingRecipe @@ -15,18 +13,3 @@ def test_message_recipe_validates_unknown_binding(): MessageTurn(role="assistant", content="ok", stream="high_level", target=True), ] ) - - -def test_canonical_recipe_loads(): - recipe = TrainingRecipe.from_yaml(Path("src/lerobot/configs/recipes/pi05_hirobot.yaml")) - - assert recipe.blend is not None - assert set(recipe.blend) == { - "memory_update", - "user_interjection_response", - "high_level_subtask", - "low_level_execution", - "ask_vqa_top", - "ask_vqa_wrist", - } - assert sum(component.weight for component in recipe.blend.values()) == pytest.approx(0.96) diff --git a/tests/datasets/test_aggregate.py b/tests/datasets/test_aggregate.py index b74299311..6d646d4f7 100644 --- a/tests/datasets/test_aggregate.py +++ b/tests/datasets/test_aggregate.py @@ -113,7 +113,7 @@ def assert_metadata_consistency(aggr_ds, ds_0, ds_1): """Test that metadata is correctly aggregated.""" # Test basic info assert aggr_ds.fps == ds_0.fps == ds_1.fps, "FPS should be the same across all datasets" - assert aggr_ds.meta.info["robot_type"] == ds_0.meta.info["robot_type"] == ds_1.meta.info["robot_type"], ( + assert aggr_ds.meta.info.robot_type == ds_0.meta.info.robot_type == ds_1.meta.info.robot_type, ( "Robot type should be the same" ) @@ -153,8 +153,8 @@ def assert_video_frames_integrity(aggr_ds, ds_0, ds_1): video_keys = list( filter( - lambda key: aggr_ds.meta.info["features"][key]["dtype"] == "video", - aggr_ds.meta.info["features"].keys(), + lambda key: aggr_ds.meta.info.features[key]["dtype"] == "video", + aggr_ds.meta.info.features.keys(), ) ) diff --git a/tests/datasets/test_dataset_metadata.py b/tests/datasets/test_dataset_metadata.py index 6db41d05c..6c784c90b 100644 --- a/tests/datasets/test_dataset_metadata.py +++ b/tests/datasets/test_dataset_metadata.py @@ -161,7 +161,7 @@ def test_init_loads_existing_metadata(tmp_path, lerobot_dataset_metadata_factory assert meta.total_episodes == 3 assert meta.total_frames == 150 - assert meta.fps == info["fps"] + assert meta.fps == info.fps # ── Property accessors ─────────────────────────────────────────────── diff --git a/tests/datasets/test_language_render.py b/tests/datasets/test_language_render.py index a7bc026ca..ca27089fc 100644 --- a/tests/datasets/test_language_render.py +++ b/tests/datasets/test_language_render.py @@ -1,7 +1,5 @@ #!/usr/bin/env python -from pathlib import Path - import pytest from lerobot.configs.recipe import MessageTurn, TrainingRecipe @@ -370,9 +368,27 @@ def test_resolve_task_explicit_override_beats_rephrasings(): assert rendered["messages"][0]["content"] == "explicit override wins" -def test_canonical_recipe_can_render_low_level_branch(): - recipe = TrainingRecipe.from_yaml(Path("src/lerobot/configs/recipes/pi05_hirobot.yaml")) - low_level = TrainingRecipe(blend={"low": recipe.blend["low_level_execution"]}) +def test_low_level_branch_renders_active_subtask(): + low_level = TrainingRecipe( + blend={ + "low": TrainingRecipe( + weight=1.0, + messages=[ + MessageTurn( + role="user", + content="${task}\nPlan: ${plan}\nMemory: ${memory}", + stream="high_level", + ), + MessageTurn( + role="assistant", + content="${subtask}", + stream="low_level", + target=True, + ), + ], + ) + } + ) rendered = render_sample( recipe=low_level, diff --git a/tests/datasets/test_lerobot_dataset.py b/tests/datasets/test_lerobot_dataset.py index 49efa84d9..f3bda037f 100644 --- a/tests/datasets/test_lerobot_dataset.py +++ b/tests/datasets/test_lerobot_dataset.py @@ -80,18 +80,18 @@ def _write_dataset_tree( ) tasks = tasks_factory(total_tasks=1) episodes = episodes_factory( - features=info["features"], - fps=info["fps"], + features=info.features, + fps=info.fps, total_episodes=1, total_frames=3, tasks=tasks, ) - stats = stats_factory(features=info["features"]) + stats = stats_factory(features=info.features) hf_dataset = hf_dataset_factory( - features=info["features"], + features=info.features, tasks=tasks, episodes=episodes, - fps=info["fps"], + fps=info.fps, ) create_info(root, info) @@ -416,6 +416,18 @@ def test_create_initial_counts_zero(tmp_path): assert dataset.num_frames == 0 +def test_create_propagates_video_files_size_in_mb(tmp_path): + """video_files_size_in_mb passed to create() is reflected in the dataset metadata.""" + dataset = LeRobotDataset.create( + repo_id=DUMMY_REPO_ID, + fps=DEFAULT_FPS, + features=SIMPLE_FEATURES, + root=tmp_path / "ds", + video_files_size_in_mb=42.0, + ) + assert dataset.meta.video_files_size_in_mb == 42.0 + + def test_add_frame_works_in_write_mode(tmp_path): """add_frame() succeeds on a dataset created via create().""" dataset = LeRobotDataset.create( diff --git a/tests/fixtures/dataset_factories.py b/tests/fixtures/dataset_factories.py index e068484b0..48128a8d0 100644 --- a/tests/fixtures/dataset_factories.py +++ b/tests/fixtures/dataset_factories.py @@ -28,7 +28,7 @@ from datasets import Dataset from lerobot.datasets.dataset_metadata import CODEBASE_VERSION, LeRobotDatasetMetadata from lerobot.datasets.feature_utils import get_hf_features_from_features -from lerobot.datasets.io_utils import hf_transform_to_torch +from lerobot.datasets.io_utils import flatten_dict, hf_transform_to_torch from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets.utils import ( DEFAULT_CHUNK_SIZE, @@ -36,10 +36,10 @@ from lerobot.datasets.utils import ( DEFAULT_DATA_PATH, DEFAULT_VIDEO_FILE_SIZE_IN_MB, DEFAULT_VIDEO_PATH, + DatasetInfo, ) from lerobot.datasets.video_utils import encode_video_frames from lerobot.utils.constants import DEFAULT_FEATURES -from lerobot.utils.utils import flatten_dict from tests.fixtures.constants import ( DEFAULT_FPS, DUMMY_CAMERA_FEATURES, @@ -157,33 +157,31 @@ def info_factory(features_factory): total_episodes: int = 0, total_frames: int = 0, total_tasks: int = 0, - total_videos: int = 0, chunks_size: int = DEFAULT_CHUNK_SIZE, - data_files_size_in_mb: float = DEFAULT_DATA_FILE_SIZE_IN_MB, - video_files_size_in_mb: float = DEFAULT_VIDEO_FILE_SIZE_IN_MB, + data_files_size_in_mb: int = DEFAULT_DATA_FILE_SIZE_IN_MB, + video_files_size_in_mb: int = DEFAULT_VIDEO_FILE_SIZE_IN_MB, data_path: str = DEFAULT_DATA_PATH, video_path: str = DEFAULT_VIDEO_PATH, motor_features: dict = DUMMY_MOTOR_FEATURES, camera_features: dict = DUMMY_CAMERA_FEATURES, use_videos: bool = True, - ) -> dict: + ) -> DatasetInfo: features = features_factory(motor_features, camera_features, use_videos) - return { - "codebase_version": codebase_version, - "robot_type": robot_type, - "total_episodes": total_episodes, - "total_frames": total_frames, - "total_tasks": total_tasks, - "total_videos": total_videos, - "chunks_size": chunks_size, - "data_files_size_in_mb": data_files_size_in_mb, - "video_files_size_in_mb": video_files_size_in_mb, - "fps": fps, - "splits": {}, - "data_path": data_path, - "video_path": video_path if use_videos else None, - "features": features, - } + return DatasetInfo( + codebase_version=codebase_version, + robot_type=robot_type, + total_episodes=total_episodes, + total_frames=total_frames, + total_tasks=total_tasks, + chunks_size=chunks_size, + data_files_size_in_mb=data_files_size_in_mb, + video_files_size_in_mb=video_files_size_in_mb, + fps=fps, + splits={}, + data_path=data_path, + video_path=video_path if use_videos else None, + features=features, + ) return _create_info @@ -333,12 +331,12 @@ def create_videos(info_factory, img_array_factory): total_episodes=total_episodes, total_frames=total_frames, total_tasks=total_tasks ) - video_feats = {key: feats for key, feats in info["features"].items() if feats["dtype"] == "video"} + video_feats = {key: feats for key, feats in info.features.items() if feats["dtype"] == "video"} for key, ft in video_feats.items(): # create and save images with identifiable content tmp_dir = root / "tmp_images" tmp_dir.mkdir(parents=True, exist_ok=True) - for frame_index in range(info["total_frames"]): + for frame_index in range(info.total_frames): content = f"{key}-{frame_index}" img = img_array_factory(height=ft["shape"][0], width=ft["shape"][1], content=content) pil_img = PIL.Image.fromarray(img) @@ -348,7 +346,7 @@ def create_videos(info_factory, img_array_factory): video_path = root / DEFAULT_VIDEO_PATH.format(video_key=key, chunk_index=0, file_index=0) video_path.parent.mkdir(parents=True, exist_ok=True) # Use the global fps from info, not video-specific fps which might not exist - encode_video_frames(tmp_dir, video_path, fps=info["fps"]) + encode_video_frames(tmp_dir, video_path, fps=info.fps) shutil.rmtree(tmp_dir) return _create_video_directory @@ -433,16 +431,16 @@ def lerobot_dataset_metadata_factory( if info is None: info = info_factory() if stats is None: - stats = stats_factory(features=info["features"]) + stats = stats_factory(features=info.features) if tasks is None: - tasks = tasks_factory(total_tasks=info["total_tasks"]) + tasks = tasks_factory(total_tasks=info.total_tasks) if episodes is None: - video_keys = [key for key, ft in info["features"].items() if ft["dtype"] == "video"] + video_keys = [key for key, ft in info.features.items() if ft["dtype"] == "video"] episodes = episodes_factory( - features=info["features"], - fps=info["fps"], - total_episodes=info["total_episodes"], - total_frames=info["total_frames"], + features=info.features, + fps=info.fps, + total_episodes=info.total_episodes, + total_frames=info.total_frames, video_keys=video_keys, tasks=tasks, ) @@ -503,23 +501,23 @@ def lerobot_dataset_factory( chunks_size=chunks_size, ) if stats is None: - stats = stats_factory(features=info["features"]) + stats = stats_factory(features=info.features) if tasks is None: - tasks = tasks_factory(total_tasks=info["total_tasks"]) + tasks = tasks_factory(total_tasks=info.total_tasks) if episodes_metadata is None: - video_keys = [key for key, ft in info["features"].items() if ft["dtype"] == "video"] + video_keys = [key for key, ft in info.features.items() if ft["dtype"] == "video"] episodes_metadata = episodes_factory( - features=info["features"], - fps=info["fps"], - total_episodes=info["total_episodes"], - total_frames=info["total_frames"], + features=info.features, + fps=info.fps, + total_episodes=info.total_episodes, + total_frames=info.total_frames, video_keys=video_keys, tasks=tasks, multi_task=multi_task, ) if hf_dataset is None: hf_dataset = hf_dataset_factory( - features=info["features"], tasks=tasks, episodes=episodes_metadata, fps=info["fps"] + features=info.features, tasks=tasks, episodes=episodes_metadata, fps=info.fps ) # Write data on disk diff --git a/tests/fixtures/hub.py b/tests/fixtures/hub.py index 4333b91a3..2f521c766 100644 --- a/tests/fixtures/hub.py +++ b/tests/fixtures/hub.py @@ -62,19 +62,19 @@ def mock_snapshot_download_factory( if info is None: info = info_factory(data_files_size_in_mb=data_files_size_in_mb, chunks_size=chunks_size) if stats is None: - stats = stats_factory(features=info["features"]) + stats = stats_factory(features=info.features) if tasks is None: - tasks = tasks_factory(total_tasks=info["total_tasks"]) + tasks = tasks_factory(total_tasks=info.total_tasks) if episodes is None: episodes = episodes_factory( - features=info["features"], - fps=info["fps"], - total_episodes=info["total_episodes"], - total_frames=info["total_frames"], + features=info.features, + fps=info.fps, + total_episodes=info.total_episodes, + total_frames=info.total_frames, tasks=tasks, ) if hf_dataset is None: - hf_dataset = hf_dataset_factory(tasks=tasks, episodes=episodes, fps=info["fps"]) + hf_dataset = hf_dataset_factory(tasks=tasks, episodes=episodes, fps=info.fps) def _mock_snapshot_download( repo_id: str, # TODO(rcadene): repo_id should be used no? @@ -97,7 +97,7 @@ def mock_snapshot_download_factory( DEFAULT_DATA_PATH.format(chunk_index=0, file_index=0), ] - video_keys = [key for key, feats in info["features"].items() if feats["dtype"] == "video"] + video_keys = [key for key, feats in info.features.items() if feats["dtype"] == "video"] for key in video_keys: all_files.append(DEFAULT_VIDEO_PATH.format(video_key=key, chunk_index=0, file_index=0)) diff --git a/tests/policies/rtc/test_action_interpolator.py b/tests/policies/rtc/test_action_interpolator.py index 9a4276df1..3eb239d7e 100644 --- a/tests/policies/rtc/test_action_interpolator.py +++ b/tests/policies/rtc/test_action_interpolator.py @@ -17,9 +17,9 @@ import pytest import torch -from lerobot.policies.rtc.action_interpolator import ActionInterpolator from lerobot.policies.rtc.action_queue import ActionQueue from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.utils.action_interpolator import ActionInterpolator # ====================== Fixtures ====================== diff --git a/tests/policies/rtc/test_configuration_rtc.py b/tests/policies/rtc/test_configuration_rtc.py index bb4550eaa..40d171c0c 100644 --- a/tests/policies/rtc/test_configuration_rtc.py +++ b/tests/policies/rtc/test_configuration_rtc.py @@ -26,7 +26,7 @@ def test_rtc_config_default_initialization(): """Test RTCConfig initializes with default values.""" config = RTCConfig() - assert config.enabled is False + assert config.enabled is True assert config.prefix_attention_schedule == RTCAttentionSchedule.LINEAR assert config.max_guidance_weight == 10.0 assert config.execution_horizon == 10 diff --git a/tests/policies/rtc/test_rtc_relative_actions.py b/tests/policies/rtc/test_rtc_relative_actions.py index fa888ec05..66667ea56 100644 --- a/tests/policies/rtc/test_rtc_relative_actions.py +++ b/tests/policies/rtc/test_rtc_relative_actions.py @@ -22,7 +22,7 @@ from lerobot.configs.types import ( PolicyFeature, RTCAttentionSchedule, ) -from lerobot.processor import TransitionKey, batch_to_transition +from lerobot.processor import TransitionKey, batch_to_transition, create_transition from lerobot.processor.normalize_processor import NormalizerProcessorStep, UnnormalizerProcessorStep from lerobot.processor.relative_action_processor import ( AbsoluteActionsProcessorStep, @@ -52,6 +52,9 @@ _rtc_debug_mod = _import_rtc_module("lerobot.policies.rtc.debug_tracker", "debug _rtc_mod = _import_rtc_module("lerobot.policies.rtc.modeling_rtc", "modeling_rtc.py") RTCProcessor = _rtc_mod.RTCProcessor +_rtc_relative_mod = _import_rtc_module("lerobot.policies.rtc.relative", "relative.py") +reanchor_relative_rtc_prefix = _rtc_relative_mod.reanchor_relative_rtc_prefix + ACTION_DIM = 6 CHUNK_SIZE = 50 EXECUTION_HORIZON = 10 @@ -187,7 +190,7 @@ class TestRTCDenoiseWithRelativeLeftovers: class TestFullPipelineRelativeRTC: - """End-to-end test of the RTC + relative actions pipeline matching eval_with_real_robot.py flow.""" + """End-to-end test of the RTC + relative actions pipeline matching lerobot-rollout flow.""" def test_preprocessor_caches_state_for_postprocessor(self): """Preprocessor's relative step should cache state so postprocessor can convert back.""" @@ -218,7 +221,9 @@ class TestFullPipelineRelativeRTC: def test_roundtrip_with_identity_normalization(self): """Actions → relative → normalize → [model] → unnormalize → absolute should recover originals. - Using mean=0, std=1 normalization (identity).""" + + Using mean=0, std=1 normalization (identity). + """ relative_step, normalizer, unnormalizer, absolute_step = _make_relative_pipeline() state = torch.randn(1, ACTION_DIM) @@ -240,7 +245,7 @@ class TestFullPipelineRelativeRTC: torch.testing.assert_close(recovered, actions, atol=1e-5, rtol=1e-5) def test_eval_loop_simulation(self): - """Simulate the eval_with_real_robot.py loop with relative actions. + """Simulate the lerobot-rollout loop with relative actions. Iteration 1: No leftovers → model generates relative actions → store for RTC Iteration 2: Use leftovers as RTC guidance → model generates new relative actions @@ -400,13 +405,113 @@ class TestStateRebasingApproximation: assert error_excluded < 1e-6, f"Excluded joint should have zero error, got {error_excluded}" +class TestRTCReanchoringWithStateNormalizer: + """RTC re-anchoring under non-identity OBS_STATE normalization.""" + + @staticmethod + def _build_normalizer_with_state_stats(): + """Build a relative-action preprocessor with non-trivial OBS_STATE stats.""" + features = { + ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(ACTION_DIM,)), + OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(ACTION_DIM,)), + } + norm_map = { + FeatureType.ACTION: NormalizationMode.MEAN_STD, + FeatureType.STATE: NormalizationMode.MEAN_STD, + } + stats = { + ACTION: { + "mean": torch.zeros(ACTION_DIM).numpy(), + "std": (0.5 * torch.ones(ACTION_DIM)).numpy(), + }, + OBS_STATE: { + "mean": (5.0 * torch.ones(ACTION_DIM)).numpy(), + "std": (2.0 * torch.ones(ACTION_DIM)).numpy(), + }, + } + relative_step = RelativeActionsProcessorStep(enabled=True) + normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats) + return relative_step, normalizer + + def test_reanchor_with_raw_state_matches_normalize_of_absolute_minus_state(self): + """Reanchoring with the raw cached state yields ``normalize(prev_actions_absolute - raw_state)``.""" + relative_step, normalizer = self._build_normalizer_with_state_stats() + + raw_state = torch.tensor([[7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]) + relative_step(batch_to_transition({OBS_STATE: raw_state.clone()})) + + prev_actions_absolute = torch.tensor([[2.0, 3.0, 4.0, 5.0, 6.0, 7.0]] * 5) + + result = reanchor_relative_rtc_prefix( + prev_actions_absolute=prev_actions_absolute, + current_state=relative_step.get_cached_state(), + relative_step=relative_step, + normalizer_step=normalizer, + policy_device="cpu", + ) + + expected_relative = to_relative_actions(prev_actions_absolute, raw_state, [True] * ACTION_DIM) + expected = normalizer(create_transition(action=expected_relative))[TransitionKey.ACTION] + torch.testing.assert_close(result, expected, atol=1e-5, rtol=1e-5) + + def test_reanchor_with_normalized_state_produces_wrong_result(self): + """Reanchoring with raw vs. normalized state produces meaningfully different outputs.""" + relative_step, normalizer = self._build_normalizer_with_state_stats() + + raw_state = torch.tensor([[7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]) + relative_step(batch_to_transition({OBS_STATE: raw_state.clone()})) + + normalized_obs = normalizer(batch_to_transition({OBS_STATE: raw_state.clone()})) + normalized_state = normalized_obs[TransitionKey.OBSERVATION][OBS_STATE] + assert not torch.allclose(normalized_state, raw_state) + + prev_actions_absolute = torch.tensor([[2.0, 3.0, 4.0, 5.0, 6.0, 7.0]] * 5) + + result_raw = reanchor_relative_rtc_prefix( + prev_actions_absolute=prev_actions_absolute, + current_state=raw_state, + relative_step=relative_step, + normalizer_step=normalizer, + policy_device="cpu", + ) + result_normalized = reanchor_relative_rtc_prefix( + prev_actions_absolute=prev_actions_absolute, + current_state=normalized_state, + relative_step=relative_step, + normalizer_step=normalizer, + policy_device="cpu", + ) + + max_abs_diff = (result_raw - result_normalized).abs().max() + assert max_abs_diff > 0.5, ( + f"Raw and normalized state produced near-identical outputs (max diff {max_abs_diff:.4f}); " + "OBS_STATE stats are too close to identity to be sensitive." + ) + + def test_engine_pipeline_cached_state_is_raw_after_full_preprocess(self): + """``get_cached_state()`` returns raw OBS_STATE after the full preprocessor pipeline runs.""" + relative_step, normalizer = self._build_normalizer_with_state_stats() + + raw_state = torch.tensor([[7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]) + + transition = batch_to_transition({OBS_STATE: raw_state.clone()}) + transition = relative_step(transition) + preprocessed = normalizer(transition) + + cached = relative_step.get_cached_state() + torch.testing.assert_close(cached, raw_state, atol=1e-6, rtol=1e-6) + + post_normalize_state = preprocessed[TransitionKey.OBSERVATION][OBS_STATE] + assert not torch.allclose(cached, post_normalize_state, atol=1e-3) + + def _detect_relative_actions(preprocessor) -> bool: - """Mirror of the helper in eval_with_real_robot.py for testing without importing it.""" + """Mirror of the helper in lerobot-rollout for testing without importing it.""" return any(isinstance(step, RelativeActionsProcessorStep) and step.enabled for step in preprocessor.steps) class TestDetectRelativeActions: - """Test the _detect_relative_actions helper logic used by eval_with_real_robot.py.""" + """Test the _detect_relative_actions helper logic used by lerobot-rollout.""" def test_detects_enabled_relative_step(self): class FakePipeline: diff --git a/tests/rewards/__init__.py b/tests/rewards/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/processor/test_classifier_processor.py b/tests/rewards/test_classifier_processor.py similarity index 92% rename from tests/processor/test_classifier_processor.py rename to tests/rewards/test_classifier_processor.py index e1567bf29..c54d80b0e 100644 --- a/tests/processor/test_classifier_processor.py +++ b/tests/rewards/test_classifier_processor.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +19,6 @@ import pytest import torch from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature -from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig -from lerobot.policies.sac.reward_model.processor_classifier import make_classifier_processor from lerobot.processor import ( DataProcessorPipeline, DeviceProcessorStep, @@ -31,6 +27,8 @@ from lerobot.processor import ( TransitionKey, ) from lerobot.processor.converters import create_transition, transition_to_batch +from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig +from lerobot.rewards.classifier.processor_classifier import make_classifier_processor from lerobot.utils.constants import OBS_IMAGE, OBS_STATE @@ -42,7 +40,7 @@ def create_default_config(): OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)), } config.output_features = { - "reward": PolicyFeature(type=FeatureType.ACTION, shape=(1,)), # Classifier output + "reward": PolicyFeature(type=FeatureType.ACTION, shape=(1,)), } config.normalization_mapping = { FeatureType.STATE: NormalizationMode.MEAN_STD, @@ -90,17 +88,14 @@ def test_classifier_processor_normalization(): config = create_default_config() stats = create_default_stats() - preprocessor, postprocessor = make_classifier_processor( - config, - stats, - ) + preprocessor, postprocessor = make_classifier_processor(config, stats) # Create test data observation = { OBS_STATE: torch.randn(10), OBS_IMAGE: torch.randn(3, 224, 224), } - action = torch.randn(1) # Dummy action/reward + action = torch.randn(1) transition = create_transition(observation, action) batch = transition_to_batch(transition) @@ -120,10 +115,7 @@ def test_classifier_processor_cuda(): config.device = "cuda" stats = create_default_stats() - preprocessor, postprocessor = make_classifier_processor( - config, - stats, - ) + preprocessor, postprocessor = make_classifier_processor(config, stats) # Create CPU data observation = { @@ -132,7 +124,6 @@ def test_classifier_processor_cuda(): } action = torch.randn(1) transition = create_transition(observation, action) - batch = transition_to_batch(transition) # Process through preprocessor @@ -158,10 +149,7 @@ def test_classifier_processor_accelerate_scenario(): config.device = "cuda:0" stats = create_default_stats() - preprocessor, postprocessor = make_classifier_processor( - config, - stats, - ) + preprocessor, postprocessor = make_classifier_processor(config, stats) # Simulate Accelerate: data already on GPU device = torch.device("cuda:0") @@ -171,7 +159,6 @@ def test_classifier_processor_accelerate_scenario(): } action = torch.randn(1).to(device) transition = create_transition(observation, action) - batch = transition_to_batch(transition) # Process through preprocessor @@ -201,7 +188,6 @@ def test_classifier_processor_multi_gpu(): } action = torch.randn(1).to(device) transition = create_transition(observation, action) - batch = transition_to_batch(transition) # Process through preprocessor @@ -231,7 +217,6 @@ def test_classifier_processor_without_stats(): } action = torch.randn(1) transition = create_transition(observation, action) - batch = transition_to_batch(transition) processed = preprocessor(batch) @@ -294,7 +279,6 @@ def test_classifier_processor_mixed_precision(): } action = torch.randn(1, dtype=torch.float32) transition = create_transition(observation, action) - batch = transition_to_batch(transition) # Process through preprocessor @@ -312,10 +296,7 @@ def test_classifier_processor_batch_data(): config = create_default_config() stats = create_default_stats() - preprocessor, postprocessor = make_classifier_processor( - config, - stats, - ) + preprocessor, postprocessor = make_classifier_processor(config, stats) # Test with batched data batch_size = 16 @@ -325,7 +306,6 @@ def test_classifier_processor_batch_data(): } action = torch.randn(batch_size, 1) transition = create_transition(observation, action) - batch = transition_to_batch(transition) # Process through preprocessor @@ -343,15 +323,11 @@ def test_classifier_processor_postprocessor_identity(): config = create_default_config() stats = create_default_stats() - preprocessor, postprocessor = make_classifier_processor( - config, - stats, - ) + preprocessor, postprocessor = make_classifier_processor(config, stats) # Create test data for postprocessor - reward = torch.tensor([[0.8], [0.3], [0.9]]) # Batch of rewards/predictions + reward = torch.tensor([[0.8], [0.3], [0.9]]) transition = create_transition(action=reward) - _ = transition_to_batch(transition) # Process through postprocessor diff --git a/tests/policies/hilserl/test_modeling_classifier.py b/tests/rewards/test_modeling_classifier.py similarity index 86% rename from tests/policies/hilserl/test_modeling_classifier.py rename to tests/rewards/test_modeling_classifier.py index 6d262c01b..08f6121a1 100644 --- a/tests/policies/hilserl/test_modeling_classifier.py +++ b/tests/rewards/test_modeling_classifier.py @@ -1,5 +1,3 @@ -# !/usr/bin/env python - # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +16,8 @@ import pytest import torch from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature -from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig -from lerobot.policies.sac.reward_model.modeling_classifier import ClassifierOutput +from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig +from lerobot.rewards.classifier.modeling_classifier import ClassifierOutput from lerobot.utils.constants import OBS_IMAGE, REWARD from tests.utils import skip_if_package_missing @@ -42,7 +40,7 @@ def test_classifier_output(): reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers" ) def test_binary_classifier_with_default_params(): - from lerobot.policies.sac.reward_model.modeling_classifier import Classifier + from lerobot.rewards.classifier.modeling_classifier import Classifier config = RewardClassifierConfig() config.input_features = { @@ -86,7 +84,7 @@ def test_binary_classifier_with_default_params(): reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers" ) def test_multiclass_classifier(): - from lerobot.policies.sac.reward_model.modeling_classifier import Classifier + from lerobot.rewards.classifier.modeling_classifier import Classifier num_classes = 5 config = RewardClassifierConfig() @@ -128,11 +126,15 @@ def test_multiclass_classifier(): reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers" ) def test_default_device(): - from lerobot.policies.sac.reward_model.modeling_classifier import Classifier + from lerobot.rewards.classifier.modeling_classifier import Classifier config = RewardClassifierConfig() - assert config.device == "cpu" + assert config.device is None or config.device == "cpu" + config.input_features = { + OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)), + } + config.num_cameras = 1 classifier = Classifier(config) for p in classifier.parameters(): assert p.device == torch.device("cpu") @@ -143,11 +145,15 @@ def test_default_device(): reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers" ) def test_explicit_device_setup(): - from lerobot.policies.sac.reward_model.modeling_classifier import Classifier + from lerobot.rewards.classifier.modeling_classifier import Classifier config = RewardClassifierConfig(device="cpu") assert config.device == "cpu" + config.input_features = { + OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)), + } + config.num_cameras = 1 classifier = Classifier(config) for p in classifier.parameters(): assert p.device == torch.device("cpu") diff --git a/tests/rewards/test_reward_model_base.py b/tests/rewards/test_reward_model_base.py new file mode 100644 index 000000000..1c4dad642 --- /dev/null +++ b/tests/rewards/test_reward_model_base.py @@ -0,0 +1,447 @@ +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the reward model base classes and registry.""" + +import json +from dataclasses import dataclass +from pathlib import Path +from types import SimpleNamespace + +import pytest +import torch + +from lerobot.configs.rewards import RewardModelConfig +from lerobot.optim.optimizers import AdamWConfig +from lerobot.rewards.pretrained import PreTrainedRewardModel + + +@RewardModelConfig.register_subclass(name="_dummy_hub_reward") +@dataclass +class _DummyHubRewardConfig(RewardModelConfig): + def get_optimizer_preset(self): + return AdamWConfig(lr=1e-4) + + +class _DummyHubReward(PreTrainedRewardModel): + config_class = _DummyHubRewardConfig + name = "_dummy_hub_reward" + + def __init__(self, config): + super().__init__(config) + self.bias = torch.nn.Parameter(torch.zeros(1)) + + def compute_reward(self, batch): + return self.bias.expand(1) + + +def test_reward_model_config_registry(): + """Verify that classifier and sarm are registered.""" + known = RewardModelConfig.get_known_choices() + assert "reward_classifier" in known + assert "sarm" in known + + +def test_reward_model_config_lookup(): + """Verify that we can look up configs by name.""" + cls = RewardModelConfig.get_choice_class("reward_classifier") + from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig + + assert cls is RewardClassifierConfig + + +def test_factory_get_reward_model_class(): + """Test the get_reward_model_class factory.""" + from lerobot.rewards.factory import get_reward_model_class + + cls = get_reward_model_class("sarm") + from lerobot.rewards.sarm.modeling_sarm import SARMRewardModel + + assert cls is SARMRewardModel + + +def test_factory_unknown_raises(): + """Unknown name should raise ValueError.""" + from lerobot.rewards.factory import get_reward_model_class + + with pytest.raises(ValueError, match="not available"): + get_reward_model_class("nonexistent_reward_model") + + +def test_pretrained_reward_model_requires_config_class(): + """Subclass without config_class should fail.""" + with pytest.raises(TypeError, match="must define 'config_class'"): + + class BadModel(PreTrainedRewardModel): + name = "bad" + + def compute_reward(self, batch): + pass + + +def test_pretrained_reward_model_requires_name(): + """Subclass without name should fail.""" + with pytest.raises(TypeError, match="must define 'name'"): + + class BadModel(PreTrainedRewardModel): + config_class = RewardModelConfig + + def compute_reward(self, batch): + pass + + +def test_non_trainable_forward_raises(): + """Non-trainable model should raise on forward().""" + from dataclasses import dataclass + + from lerobot.optim.optimizers import AdamWConfig + + @dataclass + class DummyConfig(RewardModelConfig): + def get_optimizer_preset(self): + return AdamWConfig(lr=1e-4) + + class DummyReward(PreTrainedRewardModel): + config_class = DummyConfig + name = "dummy_test" + + def compute_reward(self, batch): + return torch.zeros(1) + + config = DummyConfig() + model = DummyReward(config) + + with pytest.raises(NotImplementedError, match="not trainable"): + model.forward({"x": torch.zeros(1)}) + + +# --------------------------------------------------------------------------- +# Trainable vs zero-shot (general-purpose) reward models. +# The proposal explicitly supports models like TOPReward that wrap a pretrained +# VLM and produce a reward signal without any training step. These tests pin +# the contract that lets such models coexist with trainable ones. +# --------------------------------------------------------------------------- + + +def test_is_trainable_false_when_forward_not_overridden(): + """A reward model that only implements ``compute_reward`` is zero-shot.""" + model, _ = _make_dummy_reward_model() + assert model.is_trainable is False + + +def test_is_trainable_true_when_forward_overridden(): + """Overriding ``forward`` flips ``is_trainable`` to True.""" + + class _TrainableReward(_DummyHubReward): + name = "_trainable_dummy_reward" + + def forward(self, batch): + loss = (self.bias**2).sum() + return loss, {} + + # Register a fresh config subclass so the subclass check passes. + @RewardModelConfig.register_subclass(name="_trainable_dummy_reward") + @dataclass + class _TrainableConfig(_DummyHubRewardConfig): + pass + + _TrainableReward.config_class = _TrainableConfig + model = _TrainableReward(_TrainableConfig()) + assert model.is_trainable is True + + +# --------------------------------------------------------------------------- +# RewardModelConfig.from_pretrained +# --------------------------------------------------------------------------- + + +def test_reward_model_config_from_pretrained_raises_when_config_missing(tmp_path): + """``from_pretrained`` must surface a clear ``FileNotFoundError`` when the + target directory exists but does not contain ``config.json``, instead of + crashing later inside ``draccus.parse``. + """ + # tmp_path exists but has no config.json + with pytest.raises(FileNotFoundError, match="config.json not found"): + RewardModelConfig.from_pretrained(tmp_path) + + +def test_reward_model_config_from_pretrained_roundtrip(tmp_path): + """Round-trip: save a RewardClassifierConfig, reload it, fields must match.""" + from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig + + original = RewardClassifierConfig( + num_classes=3, + hidden_dim=128, + latent_dim=64, + num_cameras=1, + learning_rate=5e-4, + ) + original._save_pretrained(tmp_path) + + loaded = RewardModelConfig.from_pretrained(tmp_path) + + assert isinstance(loaded, RewardClassifierConfig) + assert loaded.num_classes == 3 + assert loaded.hidden_dim == 128 + assert loaded.latent_dim == 64 + assert loaded.num_cameras == 1 + assert loaded.learning_rate == 5e-4 + + +# --------------------------------------------------------------------------- +# TrainPipelineConfig — reward model training path +# --------------------------------------------------------------------------- + + +def test_train_pipeline_config_path_fields_includes_reward_model(): + """``--reward_model.path=local/dir`` requires ``reward_model`` to be listed + as a draccus path-field on ``TrainPipelineConfig``.""" + from lerobot.configs.train import TrainPipelineConfig + + fields = TrainPipelineConfig.__get_path_fields__() + assert "policy" in fields + assert "reward_model" in fields + + +def test_train_pipeline_config_trainable_config_returns_reward_model_when_set(): + """When only ``reward_model`` is set, ``trainable_config`` (used by the + trainer for e.g. ``.device``) must return it — not ``None`` from ``policy``.""" + from lerobot.configs.default import DatasetConfig + from lerobot.configs.train import TrainPipelineConfig + from lerobot.rewards.classifier.configuration_classifier import RewardClassifierConfig + + reward_cfg = RewardClassifierConfig(device="cpu") + cfg = TrainPipelineConfig( + dataset=DatasetConfig(repo_id="user/repo"), + reward_model=reward_cfg, + ) + + assert cfg.is_reward_model_training is True + assert cfg.trainable_config is reward_cfg + # This is what lerobot_train.py uses to decide force_cpu; ``cfg.policy.device`` + # would AttributeError here because policy is None. + assert cfg.trainable_config.device == "cpu" + + +def test_train_pipeline_config_trainable_config_returns_policy_when_set(): + """Mirror of the reward-model case: when only ``policy`` is set, + ``trainable_config`` must return it.""" + from lerobot.configs.default import DatasetConfig + from lerobot.configs.train import TrainPipelineConfig + from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig + + policy_cfg = DiffusionConfig(device="cpu") + cfg = TrainPipelineConfig( + dataset=DatasetConfig(repo_id="user/repo"), + policy=policy_cfg, + ) + + assert cfg.is_reward_model_training is False + assert cfg.trainable_config is policy_cfg + assert cfg.trainable_config.device == "cpu" + + +def test_train_pipeline_config_from_pretrained_migrates_legacy_rabc_fields(tmp_path): + """Legacy top-level RA-BC fields should be migrated into ``sample_weighting``.""" + from lerobot.configs.default import DatasetConfig + from lerobot.configs.train import TRAIN_CONFIG_NAME, TrainPipelineConfig + from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig + + cfg = TrainPipelineConfig( + dataset=DatasetConfig(repo_id="user/repo"), + policy=DiffusionConfig(device="cpu"), + ) + cfg._save_pretrained(tmp_path) + + config_path = tmp_path / TRAIN_CONFIG_NAME + with open(config_path) as f: + payload = json.load(f) + + payload.pop("sample_weighting", None) + payload.update( + { + "use_rabc": True, + "rabc_progress_path": "hf://datasets/user/repo/sarm_progress.parquet", + "rabc_kappa": 0.05, + "rabc_epsilon": 1e-5, + "rabc_head_mode": "dense", + } + ) + with open(config_path, "w") as f: + json.dump(payload, f) + + loaded = TrainPipelineConfig.from_pretrained(tmp_path) + + assert loaded.sample_weighting is not None + assert loaded.sample_weighting.type == "rabc" + assert loaded.sample_weighting.progress_path == "hf://datasets/user/repo/sarm_progress.parquet" + assert loaded.sample_weighting.kappa == 0.05 + assert loaded.sample_weighting.epsilon == 1e-5 + assert loaded.sample_weighting.head_mode == "dense" + + +def test_train_pipeline_config_from_pretrained_strips_legacy_rabc_when_disabled(tmp_path): + """Legacy RA-BC fields should be ignored when ``use_rabc`` was false.""" + from lerobot.configs.default import DatasetConfig + from lerobot.configs.train import TRAIN_CONFIG_NAME, TrainPipelineConfig + from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig + + cfg = TrainPipelineConfig( + dataset=DatasetConfig(repo_id="user/repo"), + policy=DiffusionConfig(device="cpu"), + ) + cfg._save_pretrained(tmp_path) + + config_path = tmp_path / TRAIN_CONFIG_NAME + with open(config_path) as f: + payload = json.load(f) + + payload.pop("sample_weighting", None) + payload.update( + { + "use_rabc": False, + "rabc_progress_path": "hf://datasets/user/repo/sarm_progress.parquet", + "rabc_kappa": 0.05, + "rabc_epsilon": 1e-5, + "rabc_head_mode": "dense", + } + ) + with open(config_path, "w") as f: + json.dump(payload, f) + + loaded = TrainPipelineConfig.from_pretrained(tmp_path) + + assert loaded.sample_weighting is None + + +# --------------------------------------------------------------------------- +# PreTrainedRewardModel hub upload: push_model_to_hub + generate_model_card. +# We test the generation side (offline) fully, and the upload side with HfApi +# mocked so nothing actually hits the network. +# --------------------------------------------------------------------------- + + +def _make_dummy_reward_model(**config_kwargs): + return _DummyHubReward(_DummyHubRewardConfig(**config_kwargs)), _DummyHubRewardConfig + + +@pytest.fixture +def _offline_model_card(monkeypatch): + """``ModelCard.validate`` does a live ``POST`` to huggingface.co — bypass it + so tests can run offline.""" + from huggingface_hub import ModelCard + + monkeypatch.setattr(ModelCard, "validate", lambda self, *a, **kw: None) + + +def test_reward_model_generate_model_card_renders_expected_fields(_offline_model_card): + """``generate_model_card`` must produce a card with the right metadata and + body, using the dedicated reward-model template.""" + model, _ = _make_dummy_reward_model( + license="mit", + tags=["robot", "sim"], + ) + + card = model.generate_model_card( + dataset_repo_id="user/my_dataset", + model_type=model.config.type, + license=model.config.license, + tags=model.config.tags, + ) + + # Metadata (YAML header) — ModelCardData fields. + assert card.data.license == "mit" + assert card.data.library_name == "lerobot" + assert card.data.pipeline_tag == "robotics" + assert "reward-model" in card.data.tags + assert model.config.type in card.data.tags + assert card.data.model_name == model.config.type + assert card.data.datasets == "user/my_dataset" + + # Body — specific to the reward-model template, NOT the policy one. + body = str(card) + assert "Reward Model Card" in body + assert "This reward model has been trained" in body + assert "--reward_model.type=" in body # reward-model-specific usage block + + +def test_reward_model_generate_model_card_uses_default_license(_offline_model_card): + """When config.license is None the card falls back to apache-2.0.""" + model, _ = _make_dummy_reward_model() + + card = model.generate_model_card( + dataset_repo_id="user/my_dataset", + model_type=model.config.type, + license=model.config.license, + tags=None, + ) + + assert card.data.license == "apache-2.0" + + +def test_reward_model_push_model_to_hub_uploads_expected_files(monkeypatch, _offline_model_card): + """``push_model_to_hub`` must: + 1. create the repo, + 2. assemble a temp folder with weights + config.json + train_config.json + README.md, + 3. call ``api.upload_folder`` on that folder. + All network calls are mocked. + """ + from huggingface_hub.constants import CONFIG_NAME + + from lerobot.configs.default import DatasetConfig + from lerobot.configs.train import TRAIN_CONFIG_NAME, TrainPipelineConfig + + model, _ = _make_dummy_reward_model( + repo_id="user/my_reward", + license="apache-2.0", + ) + # Point the reward model's train config at a dummy dataset repo. + train_cfg = TrainPipelineConfig( + dataset=DatasetConfig(repo_id="user/my_dataset"), + reward_model=model.config, + ) + + uploaded: dict = {} + fake_commit_info = SimpleNamespace(repo_url=SimpleNamespace(url="https://huggingface.co/user/my_reward")) + + class _FakeHfApi: + def create_repo(self, repo_id, private=None, exist_ok=False): + uploaded["create_repo_id"] = repo_id + uploaded["create_private"] = private + return SimpleNamespace(repo_id=repo_id) + + def upload_folder(self, *, repo_id, repo_type, folder_path, commit_message, **_kwargs): + uploaded["upload_repo_id"] = repo_id + uploaded["upload_repo_type"] = repo_type + uploaded["commit_message"] = commit_message + # Snapshot files assembled in the temp folder — this is the real + # contract we care about. + uploaded["files"] = sorted(p.name for p in Path(folder_path).iterdir()) + return fake_commit_info + + from lerobot.rewards import pretrained as reward_pretrained + + monkeypatch.setattr(reward_pretrained, "HfApi", lambda *a, **kw: _FakeHfApi()) + + model.push_model_to_hub(train_cfg) + + assert uploaded["create_repo_id"] == "user/my_reward" + assert uploaded["upload_repo_id"] == "user/my_reward" + assert uploaded["upload_repo_type"] == "model" + assert uploaded["commit_message"] == "Upload reward model weights, train config and readme" + # Minimum required files that must be uploaded with a reward model. + assert CONFIG_NAME in uploaded["files"] # config.json + assert TRAIN_CONFIG_NAME in uploaded["files"] # train_config.json + assert "README.md" in uploaded["files"] + assert any(name.endswith(".safetensors") for name in uploaded["files"]) diff --git a/tests/policies/test_sarm_processor.py b/tests/rewards/test_sarm_processor.py similarity index 97% rename from tests/policies/test_sarm_processor.py rename to tests/rewards/test_sarm_processor.py index 5b90784a6..65f70d396 100644 --- a/tests/policies/test_sarm_processor.py +++ b/tests/rewards/test_sarm_processor.py @@ -104,8 +104,8 @@ class TestSARMEncodingProcessorStepEndToEnd: def mock_clip_model(self): """Mock CLIP model to avoid loading real weights.""" with ( - patch("lerobot.policies.sarm.processor_sarm.CLIPModel") as mock_model_cls, - patch("lerobot.policies.sarm.processor_sarm.CLIPProcessor") as mock_processor_cls, + patch("lerobot.rewards.sarm.processor_sarm.CLIPModel") as mock_model_cls, + patch("lerobot.rewards.sarm.processor_sarm.CLIPProcessor") as mock_processor_cls, ): # Mock the CLIP model - return embeddings based on input batch size mock_model = MagicMock() @@ -142,7 +142,7 @@ class TestSARMEncodingProcessorStepEndToEnd: @pytest.fixture def processor_with_mocks(self, mock_clip_model): """Create a processor with mocked CLIP and dataset metadata for dual mode.""" - from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + from lerobot.rewards.sarm.processor_sarm import SARMEncodingProcessorStep # Dual mode config with both sparse and dense annotations config = MockConfig( @@ -256,7 +256,7 @@ class TestSARMEncodingProcessorStepEndToEnd: def test_call_with_batched_input(self, mock_clip_model): """Test processor __call__ with a batched input (multiple frames) in dual mode.""" - from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + from lerobot.rewards.sarm.processor_sarm import SARMEncodingProcessorStep config = MockConfig( n_obs_steps=8, @@ -332,7 +332,7 @@ class TestSARMEncodingProcessorStepEndToEnd: def test_targets_increase_with_progress(self, mock_clip_model): """Test that both sparse and dense targets increase as frame index progresses.""" - from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + from lerobot.rewards.sarm.processor_sarm import SARMEncodingProcessorStep config = MockConfig( n_obs_steps=8, @@ -404,7 +404,7 @@ class TestSARMEncodingProcessorStepEndToEnd: def test_progress_labels_exact_values(self, mock_clip_model): """Test that progress labels (stage.tau) are computed correctly for known positions.""" - from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + from lerobot.rewards.sarm.processor_sarm import SARMEncodingProcessorStep # Simple setup: 2 sparse stages, 4 dense stages, 100 frame episode config = MockConfig( @@ -495,7 +495,7 @@ class TestSARMEncodingProcessorStepEndToEnd: """Test that rewind augmentation correctly extends sequence and generates targets.""" import random - from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + from lerobot.rewards.sarm.processor_sarm import SARMEncodingProcessorStep config = MockConfig( n_obs_steps=8, @@ -587,8 +587,8 @@ class TestSARMEncodingProcessorStepEndToEnd: def test_full_sequence_target_consistency(self, mock_clip_model): """Test that the full sequence of targets is consistent with frame positions.""" - from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep - from lerobot.policies.sarm.sarm_utils import find_stage_and_tau + from lerobot.rewards.sarm.processor_sarm import SARMEncodingProcessorStep + from lerobot.rewards.sarm.sarm_utils import find_stage_and_tau config = MockConfig( n_obs_steps=8, diff --git a/tests/policies/test_sarm_utils.py b/tests/rewards/test_sarm_utils.py similarity index 99% rename from tests/policies/test_sarm_utils.py rename to tests/rewards/test_sarm_utils.py index 510477ec8..9ee542909 100644 --- a/tests/policies/test_sarm_utils.py +++ b/tests/rewards/test_sarm_utils.py @@ -18,7 +18,7 @@ import numpy as np import pytest import torch -from lerobot.policies.sarm.sarm_utils import ( +from lerobot.rewards.sarm.sarm_utils import ( apply_rewind_augmentation, compute_absolute_indices, compute_tau, diff --git a/tests/test_cli_peft.py b/tests/test_cli_peft.py index 5d653ee6b..82f41affa 100644 --- a/tests/test_cli_peft.py +++ b/tests/test_cli_peft.py @@ -24,10 +24,6 @@ def lerobot_train(args): return run_command(cmd="lerobot-train", module="lerobot_train", args=args) -def lerobot_record(args): - return run_command(cmd="lerobot-record", module="lerobot_record", args=args) - - def resolve_model_id_for_peft_training(policy_type): """PEFT training needs pretrained models, this finds the pretrained model of a policy type for PEFT training.""" if policy_type == "smolvla": @@ -155,81 +151,3 @@ def test_peft_training_params_are_fewer(policy_type, tmp_path): f"--output_dir={output_dir}", ] ) - - -class DummyRobot: - name = "dummy" - cameras = [] - action_features = {"foo": 1.0, "bar": 2.0} - observation_features = {"obs1": 1.0, "obs2": 2.0} - is_connected = True - - def connect(self, *args): - pass - - def disconnect(self): - pass - - -def dummy_make_robot_from_config(*args, **kwargs): - return DummyRobot() - - -@pytest.mark.parametrize("policy_type", ["smolvla"]) -@skip_if_package_missing("peft") -def test_peft_record_loads_policy(policy_type, tmp_path): - """Train a policy with PEFT and attempt to load it with `lerobot-record`.""" - from peft import PeftModel - - output_dir = tmp_path / f"output_{policy_type}" - model_id = resolve_model_id_for_peft_training(policy_type) - - lerobot_train( - [ - f"--policy.path={model_id}", - "--policy.push_to_hub=false", - "--policy.input_features=null", - "--policy.output_features=null", - "--peft.method=LORA", - "--dataset.repo_id=lerobot/pusht", - "--dataset.episodes=[0, 1]", - "--steps=1", - f"--output_dir={output_dir}", - ] - ) - - policy_dir = output_dir / "checkpoints" / "last" / "pretrained_model" - dataset_dir = tmp_path / "eval_pusht" - single_task = "move the table" - loaded_policy = None - - def dummy_record_loop(*args, **kwargs): - nonlocal loaded_policy - - if "dataset" not in kwargs: - return - - dataset = kwargs["dataset"] - dataset.add_frame({"task": single_task}) - loaded_policy = kwargs["policy"] - - with ( - patch("lerobot.scripts.lerobot_record.make_robot_from_config", dummy_make_robot_from_config), - # disable record loop since we're only interested in successful loading of the policy. - patch("lerobot.scripts.lerobot_record.record_loop", dummy_record_loop), - # disable speech output - patch("lerobot.utils.utils.say"), - ): - lerobot_record( - [ - f"--policy.path={policy_dir}", - "--robot.type=so101_follower", - "--robot.port=/dev/null", - "--dataset.repo_id=lerobot/eval_pusht", - f'--dataset.single_task="{single_task}"', - f"--dataset.root={dataset_dir}", - "--dataset.push_to_hub=false", - ] - ) - - assert isinstance(loaded_policy, PeftModel) diff --git a/tests/test_control_robot.py b/tests/test_control_robot.py index 28e91a149..dd10c0c1c 100644 --- a/tests/test_control_robot.py +++ b/tests/test_control_robot.py @@ -21,8 +21,9 @@ import pytest pytest.importorskip("datasets", reason="datasets is required (install lerobot[dataset])") pytest.importorskip("deepdiff", reason="deepdiff is required (install lerobot[hardware])") +from lerobot.configs.dataset import DatasetRecordConfig from lerobot.scripts.lerobot_calibrate import CalibrateConfig, calibrate -from lerobot.scripts.lerobot_record import DatasetRecordConfig, RecordConfig, record +from lerobot.scripts.lerobot_record import RecordConfig, record from lerobot.scripts.lerobot_replay import DatasetReplayConfig, ReplayConfig, replay from lerobot.scripts.lerobot_teleoperate import TeleoperateConfig, teleoperate from tests.fixtures.constants import DUMMY_REPO_ID diff --git a/tests/test_rollout.py b/tests/test_rollout.py new file mode 100644 index 000000000..5a1ec4703 --- /dev/null +++ b/tests/test_rollout.py @@ -0,0 +1,345 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Minimal tests for the rollout module's public API.""" + +from __future__ import annotations + +import dataclasses +from unittest.mock import MagicMock + +import pytest +import torch + +pytest.importorskip("datasets", reason="datasets is required (install lerobot[dataset])") + +# --------------------------------------------------------------------------- +# Import smoke tests +# --------------------------------------------------------------------------- + + +def test_rollout_top_level_imports(): + import lerobot.rollout + + for name in lerobot.rollout.__all__: + assert hasattr(lerobot.rollout, name), f"Missing export: {name}" + + +def test_inference_submodule_imports(): + import lerobot.rollout.inference + + for name in lerobot.rollout.inference.__all__: + assert hasattr(lerobot.rollout.inference, name), f"Missing export: {name}" + + +def test_strategies_submodule_imports(): + import lerobot.rollout.strategies + + for name in lerobot.rollout.strategies.__all__: + assert hasattr(lerobot.rollout.strategies, name), f"Missing export: {name}" + + +# --------------------------------------------------------------------------- +# Config tests +# --------------------------------------------------------------------------- + + +def test_strategy_config_types(): + from lerobot.rollout import ( + BaseStrategyConfig, + DAggerStrategyConfig, + HighlightStrategyConfig, + SentryStrategyConfig, + ) + + assert BaseStrategyConfig().type == "base" + assert SentryStrategyConfig().type == "sentry" + assert HighlightStrategyConfig().type == "highlight" + assert DAggerStrategyConfig().type == "dagger" + + +def test_dagger_config_invalid_input_device(): + from lerobot.rollout import DAggerStrategyConfig + + with pytest.raises(ValueError, match="input_device must be 'keyboard' or 'pedal'"): + DAggerStrategyConfig(input_device="joystick") + + +def test_dagger_config_defaults(): + from lerobot.rollout import DAggerStrategyConfig + + cfg = DAggerStrategyConfig() + assert cfg.num_episodes is None + assert cfg.record_autonomous is False + assert cfg.input_device == "keyboard" + + +def test_inference_config_types(): + from lerobot.rollout import RTCInferenceConfig, SyncInferenceConfig + + assert SyncInferenceConfig().type == "sync" + + rtc = RTCInferenceConfig() + assert rtc.type == "rtc" + assert rtc.queue_threshold == 30 + assert rtc.rtc is not None + + +def test_sentry_config_defaults(): + from lerobot.rollout import SentryStrategyConfig + + cfg = SentryStrategyConfig() + assert cfg.upload_every_n_episodes == 5 + assert cfg.target_video_file_size_mb is None + + +# --------------------------------------------------------------------------- +# RolloutRingBuffer +# --------------------------------------------------------------------------- + + +def test_ring_buffer_append_and_eviction(): + from lerobot.rollout.ring_buffer import RolloutRingBuffer + + buf = RolloutRingBuffer(max_seconds=0.5, max_memory_mb=100.0, fps=10.0) + # max_frames = 5 + for i in range(8): + buf.append({"val": i}) + assert len(buf) == 5 + + +def test_ring_buffer_drain(): + from lerobot.rollout.ring_buffer import RolloutRingBuffer + + buf = RolloutRingBuffer(max_seconds=1.0, max_memory_mb=100.0, fps=10.0) + for i in range(3): + buf.append({"val": i}) + frames = buf.drain() + assert len(frames) == 3 + assert len(buf) == 0 + assert buf.estimated_bytes == 0 + + +def test_ring_buffer_clear(): + from lerobot.rollout.ring_buffer import RolloutRingBuffer + + buf = RolloutRingBuffer(max_seconds=1.0, max_memory_mb=100.0, fps=10.0) + buf.append({"val": 1}) + buf.clear() + assert len(buf) == 0 + assert buf.estimated_bytes == 0 + + +def test_ring_buffer_tensor_bytes(): + from lerobot.rollout.ring_buffer import RolloutRingBuffer + + buf = RolloutRingBuffer(max_seconds=1.0, max_memory_mb=100.0, fps=10.0) + t = torch.zeros(100, dtype=torch.float32) # 400 bytes + buf.append({"tensor": t}) + assert buf.estimated_bytes >= 400 + + +# --------------------------------------------------------------------------- +# ThreadSafeRobot +# --------------------------------------------------------------------------- + + +def test_thread_safe_robot_delegates(): + from lerobot.rollout.robot_wrapper import ThreadSafeRobot + from tests.mocks.mock_robot import MockRobot, MockRobotConfig + + robot = MockRobot(MockRobotConfig(n_motors=3)) + robot.connect() + wrapper = ThreadSafeRobot(robot) + + obs = wrapper.get_observation() + assert "motor_1.pos" in obs + assert "motor_2.pos" in obs + assert "motor_3.pos" in obs + + action = {"motor_1.pos": 0.0, "motor_2.pos": 1.0, "motor_3.pos": 2.0} + result = wrapper.send_action(action) + assert result == action + + robot.disconnect() + + +def test_thread_safe_robot_properties(): + from lerobot.rollout.robot_wrapper import ThreadSafeRobot + from tests.mocks.mock_robot import MockRobot, MockRobotConfig + + robot = MockRobot(MockRobotConfig(n_motors=3)) + robot.connect() + wrapper = ThreadSafeRobot(robot) + + assert wrapper.name == "mock_robot" + assert "motor_1.pos" in wrapper.observation_features + assert "motor_1.pos" in wrapper.action_features + assert wrapper.is_connected is True + assert wrapper.inner is robot + + robot.disconnect() + + +# --------------------------------------------------------------------------- +# Strategy factory +# --------------------------------------------------------------------------- + + +def test_create_strategy_dispatches(): + from lerobot.rollout import ( + BaseStrategy, + BaseStrategyConfig, + DAggerStrategy, + DAggerStrategyConfig, + SentryStrategy, + SentryStrategyConfig, + create_strategy, + ) + + assert isinstance(create_strategy(BaseStrategyConfig()), BaseStrategy) + assert isinstance(create_strategy(SentryStrategyConfig()), SentryStrategy) + assert isinstance(create_strategy(DAggerStrategyConfig()), DAggerStrategy) + + +def test_create_strategy_unknown_raises(): + from lerobot.rollout import create_strategy + + cfg = MagicMock() + cfg.type = "bogus" + with pytest.raises(ValueError, match="Unknown strategy type"): + create_strategy(cfg) + + +# --------------------------------------------------------------------------- +# Inference factory +# --------------------------------------------------------------------------- + + +def test_create_inference_engine_sync(): + from lerobot.rollout import SyncInferenceConfig, SyncInferenceEngine, create_inference_engine + + engine = create_inference_engine( + SyncInferenceConfig(), + policy=MagicMock(), + preprocessor=MagicMock(), + postprocessor=MagicMock(), + robot_wrapper=MagicMock(robot_type="mock"), + hw_features={}, + dataset_features={}, + ordered_action_keys=["k"], + task="test", + fps=30.0, + device="cpu", + ) + assert isinstance(engine, SyncInferenceEngine) + + +# --------------------------------------------------------------------------- +# Pure functions +# --------------------------------------------------------------------------- + + +def test_estimate_max_episode_seconds_no_video(): + from lerobot.rollout.strategies import estimate_max_episode_seconds + + assert estimate_max_episode_seconds({}, fps=30.0) == 300.0 + + +def test_estimate_max_episode_seconds_with_video(): + from lerobot.rollout.strategies import estimate_max_episode_seconds + + features = {"cam": {"dtype": "video", "shape": (480, 640, 3)}} + result = estimate_max_episode_seconds(features, fps=30.0) + assert result > 0 + # With a real camera, duration should differ from the fallback + assert result != 300.0 + + +def test_safe_push_to_hub(): + from lerobot.rollout.strategies import safe_push_to_hub + + ds = MagicMock() + ds.num_episodes = 0 + assert safe_push_to_hub(ds) is False + ds.push_to_hub.assert_not_called() + + ds.num_episodes = 5 + assert safe_push_to_hub(ds, tags=["test"]) is True + ds.push_to_hub.assert_called_once_with(tags=["test"], private=False) + + +# --------------------------------------------------------------------------- +# DAgger state machine +# --------------------------------------------------------------------------- + + +def test_dagger_full_transition_cycle(): + from lerobot.rollout.strategies import DAggerEvents, DAggerPhase + + events = DAggerEvents() + assert events.phase == DAggerPhase.AUTONOMOUS + + # AUTONOMOUS -> PAUSED + events.request_transition("pause_resume") + old, new = events.consume_transition() + assert (old, new) == (DAggerPhase.AUTONOMOUS, DAggerPhase.PAUSED) + + # PAUSED -> CORRECTING + events.request_transition("correction") + old, new = events.consume_transition() + assert (old, new) == (DAggerPhase.PAUSED, DAggerPhase.CORRECTING) + + # CORRECTING -> PAUSED + events.request_transition("correction") + old, new = events.consume_transition() + assert (old, new) == (DAggerPhase.CORRECTING, DAggerPhase.PAUSED) + + # PAUSED -> AUTONOMOUS + events.request_transition("pause_resume") + old, new = events.consume_transition() + assert (old, new) == (DAggerPhase.PAUSED, DAggerPhase.AUTONOMOUS) + + +def test_dagger_invalid_transition_ignored(): + from lerobot.rollout.strategies import DAggerEvents, DAggerPhase + + events = DAggerEvents() + events.request_transition("correction") # Not valid from AUTONOMOUS + assert events.consume_transition() is None + assert events.phase == DAggerPhase.AUTONOMOUS + + +def test_dagger_events_reset(): + from lerobot.rollout.strategies import DAggerEvents, DAggerPhase + + events = DAggerEvents() + events.request_transition("pause_resume") + events.consume_transition() # -> PAUSED + events.upload_requested.set() + events.reset() + assert events.phase == DAggerPhase.AUTONOMOUS + assert not events.upload_requested.is_set() + + +# --------------------------------------------------------------------------- +# Context dataclass +# --------------------------------------------------------------------------- + + +def test_rollout_context_fields(): + from lerobot.rollout import RolloutContext + + field_names = {f.name for f in dataclasses.fields(RolloutContext)} + assert field_names == {"runtime", "hardware", "policy", "processors", "data"} diff --git a/tests/utils/test_process.py b/tests/utils/test_process.py index ce56db173..65b24aac4 100644 --- a/tests/utils/test_process.py +++ b/tests/utils/test_process.py @@ -24,7 +24,7 @@ import pytest pytest.importorskip("grpc") -from lerobot.rl.process import ProcessSignalHandler # noqa: E402 +from lerobot.utils.process import ProcessSignalHandler # noqa: E402 # Fixture to reset shutdown_event_counter and original signal handlers before and after each test diff --git a/tests/utils/test_sample_weighting.py b/tests/utils/test_sample_weighting.py new file mode 100644 index 000000000..4507d7f18 --- /dev/null +++ b/tests/utils/test_sample_weighting.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the sample weighting infrastructure.""" + +from unittest.mock import Mock + +import pytest + +pytest.importorskip("pandas", reason="pandas is required (install lerobot[dataset])") + +import torch + +from lerobot.utils.sample_weighting import ( + SampleWeighter, + SampleWeightingConfig, + UniformWeighter, + make_sample_weighter, +) + +# ============================================================================= +# Fixtures +# ============================================================================= + + +@pytest.fixture +def sample_progress_parquet(tmp_path): + """Create a sample progress parquet file for testing.""" + import pandas as pd + + # Create sample progress data for 2 episodes with 10 frames each + data = { + "index": list(range(20)), + "episode_index": [0] * 10 + [1] * 10, + "frame_index": list(range(10)) * 2, + "progress_sparse": [i / 10.0 for i in range(10)] * 2, + } + df = pd.DataFrame(data) + parquet_path = tmp_path / "sarm_progress.parquet" + df.to_parquet(parquet_path) + return parquet_path + + +# ============================================================================= +# SampleWeightingConfig Tests +# ============================================================================= + + +def test_config_default_values(): + """Test default configuration values.""" + config = SampleWeightingConfig() + assert config.type == "rabc" + assert config.progress_path is None + assert config.head_mode == "sparse" + assert config.kappa == 0.01 + assert config.epsilon == 1e-6 + assert config.extra_params == {} + + +def test_config_custom_values(): + """Test configuration with custom values.""" + config = SampleWeightingConfig( + type="rabc", + progress_path="/path/to/progress.parquet", + head_mode="dense", + kappa=0.05, + epsilon=1e-8, + extra_params={"fallback_weight": 0.5}, + ) + assert config.type == "rabc" + assert config.progress_path == "/path/to/progress.parquet" + assert config.head_mode == "dense" + assert config.kappa == 0.05 + assert config.epsilon == 1e-8 + assert config.extra_params == {"fallback_weight": 0.5} + + +def test_config_uniform_type(): + """Test configuration for uniform weighting.""" + config = SampleWeightingConfig(type="uniform") + assert config.type == "uniform" + + +# ============================================================================= +# UniformWeighter Tests +# ============================================================================= + + +def test_uniform_weighter_inherits_from_sample_weighter(): + """Test that UniformWeighter is a SampleWeighter.""" + weighter = UniformWeighter(device=torch.device("cpu")) + assert isinstance(weighter, SampleWeighter) + + +def test_uniform_weighter_compute_batch_weights_with_action_key(): + """Test weight computation with 'action' key in batch.""" + weighter = UniformWeighter(device=torch.device("cpu")) + batch = {"action": torch.randn(8, 10)} + + weights, stats = weighter.compute_batch_weights(batch) + + assert weights.shape == (8,) + assert torch.allclose(weights, torch.ones(8)) + assert stats["mean_weight"] == 1.0 + assert stats["type"] == "uniform" + + +def test_uniform_weighter_compute_batch_weights_with_index_key(): + """Test weight computation with 'index' key in batch.""" + weighter = UniformWeighter(device=torch.device("cpu")) + batch = {"index": torch.arange(16)} + + weights, stats = weighter.compute_batch_weights(batch) + + assert weights.shape == (16,) + assert torch.allclose(weights, torch.ones(16)) + + +def test_uniform_weighter_compute_batch_weights_no_tensor_keys(): + """Test weight computation with no tensor keys (fallback to size 1).""" + weighter = UniformWeighter(device=torch.device("cpu")) + batch = {"other_key": "some_value"} + + weights, stats = weighter.compute_batch_weights(batch) + + assert weights.shape == (1,) + assert torch.allclose(weights, torch.ones(1)) + + +def test_uniform_weighter_compute_batch_weights_empty_batch_raises(): + """Test that empty batch raises ValueError.""" + weighter = UniformWeighter(device=torch.device("cpu")) + batch = {} + + with pytest.raises(ValueError, match="empty batch"): + weighter.compute_batch_weights(batch) + + +def test_uniform_weighter_compute_batch_weights_scans_all_keys(): + """Test that batch size is determined by scanning all tensor values.""" + weighter = UniformWeighter(device=torch.device("cpu")) + # Batch with non-standard key containing a tensor + batch = {"custom_tensor": torch.randn(7, 3)} + + weights, stats = weighter.compute_batch_weights(batch) + + assert weights.shape == (7,) + assert torch.allclose(weights, torch.ones(7)) + + +def test_uniform_weighter_compute_batch_weights_on_cuda(): + """Test that weights are placed on the correct device.""" + if not torch.cuda.is_available(): + pytest.skip("CUDA not available") + + weighter = UniformWeighter(device=torch.device("cuda")) + batch = {"action": torch.randn(4, 10)} + + weights, _ = weighter.compute_batch_weights(batch) + + assert weights.device.type == "cuda" + + +def test_uniform_weighter_get_stats(): + """Test get_stats returns expected structure.""" + weighter = UniformWeighter(device=torch.device("cpu")) + stats = weighter.get_stats() + + assert stats == {"type": "uniform"} + + +# ============================================================================= +# make_sample_weighter Factory Tests +# ============================================================================= + + +def test_factory_returns_none_for_none_config(): + """Test that None config returns None weighter.""" + policy = Mock() + device = torch.device("cpu") + + result = make_sample_weighter(None, policy, device) + + assert result is None + + +def test_factory_creates_uniform_weighter(): + """Test creation of UniformWeighter.""" + config = SampleWeightingConfig(type="uniform") + policy = Mock() + device = torch.device("cpu") + + weighter = make_sample_weighter(config, policy, device) + + assert isinstance(weighter, UniformWeighter) + assert isinstance(weighter, SampleWeighter) + + +def test_factory_raises_for_unknown_type(): + """Test that unknown type raises ValueError.""" + config = SampleWeightingConfig(type="unknown_type") + policy = Mock() + device = torch.device("cpu") + + with pytest.raises(ValueError, match="Unknown sample weighting type"): + make_sample_weighter(config, policy, device) + + +def test_factory_rabc_requires_chunk_size(): + """Test that RABC weighter requires chunk_size in policy config.""" + config = SampleWeightingConfig( + type="rabc", + progress_path="/path/to/progress.parquet", + ) + policy = Mock() + policy.config = Mock() + policy.config.chunk_size = None # No chunk_size + device = torch.device("cpu") + + with pytest.raises(ValueError, match="chunk_size"): + make_sample_weighter(config, policy, device) + + +def test_factory_rabc_requires_progress_path_or_dataset_info(): + """Test that RABC weighter requires progress_path or dataset info for auto-detection.""" + config = SampleWeightingConfig( + type="rabc", + progress_path=None, # No progress path + ) + policy = Mock() + policy.config = Mock() + policy.config.chunk_size = 50 + device = torch.device("cpu") + + # Should fail when no progress_path AND no dataset info + with pytest.raises(ValueError, match="progress_path"): + make_sample_weighter(config, policy, device) + + +def test_factory_rabc_auto_detects_from_dataset_root(sample_progress_parquet): + """Test that RABC weighter auto-detects progress_path from dataset_root.""" + config = SampleWeightingConfig( + type="rabc", + progress_path=None, # Not provided, should auto-detect + ) + policy = Mock() + policy.config = Mock() + policy.config.chunk_size = 5 + device = torch.device("cpu") + + # The parquet file is at sample_progress_parquet, get its parent directory + dataset_root = sample_progress_parquet.parent + weighter = make_sample_weighter( + config, + policy, + device, + dataset_root=str(dataset_root), + ) + + assert weighter is not None + from lerobot.rewards.sarm.rabc import RABCWeights + + assert isinstance(weighter, RABCWeights) + + +def test_factory_rabc_auto_detects_from_repo_id(): + """Test that RABC weighter constructs HF path from repo_id.""" + config = SampleWeightingConfig( + type="rabc", + progress_path=None, # Not provided, should auto-detect + ) + policy = Mock() + policy.config = Mock() + policy.config.chunk_size = 50 + device = torch.device("cpu") + + # This will construct the path but fail when trying to load (file doesn't exist) + # We just verify it doesn't raise the "progress_path required" error + with pytest.raises(Exception) as exc_info: + make_sample_weighter( + config, + policy, + device, + dataset_repo_id="test-user/test-dataset", + ) + # Should NOT be the "progress_path required" error - it should try to load the file + assert ( + "progress_path" not in str(exc_info.value).lower() or "auto-detection" in str(exc_info.value).lower() + ) + + +# ============================================================================= +# Integration Tests with RABCWeights +# ============================================================================= + + +def test_rabc_weights_is_sample_weighter(sample_progress_parquet): + """Test that RABCWeights inherits from SampleWeighter.""" + from lerobot.rewards.sarm.rabc import RABCWeights + + weighter = RABCWeights( + progress_path=sample_progress_parquet, + chunk_size=5, + head_mode="sparse", + ) + assert isinstance(weighter, SampleWeighter) + + +def test_rabc_compute_batch_weights(sample_progress_parquet): + """Test RABCWeights.compute_batch_weights returns correct structure.""" + from lerobot.rewards.sarm.rabc import RABCWeights + + weighter = RABCWeights( + progress_path=sample_progress_parquet, + chunk_size=5, + head_mode="sparse", + device=torch.device("cpu"), + ) + + batch = {"index": torch.tensor([0, 1, 2, 3])} + weights, stats = weighter.compute_batch_weights(batch) + + assert isinstance(weights, torch.Tensor) + assert weights.shape == (4,) + assert isinstance(stats, dict) + assert "mean_weight" in stats + + +def test_rabc_get_stats(sample_progress_parquet): + """Test RABCWeights.get_stats returns expected structure.""" + from lerobot.rewards.sarm.rabc import RABCWeights + + weighter = RABCWeights( + progress_path=sample_progress_parquet, + chunk_size=5, + head_mode="sparse", + ) + + stats = weighter.get_stats() + + assert stats["type"] == "rabc" + assert "num_frames" in stats + assert "chunk_size" in stats + assert stats["chunk_size"] == 5 + assert "head_mode" in stats + assert stats["head_mode"] == "sparse" + assert "delta_mean" in stats + assert "delta_std" in stats + + +def test_factory_creates_rabc_weighter(sample_progress_parquet): + """Test factory creates RABCWeights with valid config.""" + from lerobot.rewards.sarm.rabc import RABCWeights + + config = SampleWeightingConfig( + type="rabc", + progress_path=str(sample_progress_parquet), + head_mode="sparse", + kappa=0.01, + ) + policy = Mock() + policy.config = Mock() + policy.config.chunk_size = 5 + device = torch.device("cpu") + + weighter = make_sample_weighter(config, policy, device) + + assert isinstance(weighter, RABCWeights) + assert isinstance(weighter, SampleWeighter) + + +def test_rabc_weights_normalization(sample_progress_parquet): + """Test that RABCWeights normalizes weights to sum to batch_size.""" + from lerobot.rewards.sarm.rabc import RABCWeights + + weighter = RABCWeights( + progress_path=sample_progress_parquet, + chunk_size=5, + head_mode="sparse", + device=torch.device("cpu"), + ) + + batch = {"index": torch.tensor([0, 1, 2, 3])} + weights, _ = weighter.compute_batch_weights(batch) + + # Weights should be normalized to sum approximately to batch_size + batch_size = 4 + assert abs(weights.sum().item() - batch_size) < 0.1 diff --git a/uv.lock b/uv.lock index 6e141f11d..d79047687 100644 --- a/uv.lock +++ b/uv.lock @@ -1,31 +1,40 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.12" resolution-markers = [ - "python_full_version >= '3.14' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'linux'", + "python_full_version >= '3.15' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", + "python_full_version >= '3.15' and platform_machine == 's390x' and sys_platform == 'linux'", + "python_full_version == '3.14.*' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", "python_full_version == '3.13.*' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", + "python_full_version == '3.14.*' and platform_machine == 's390x' and sys_platform == 'linux'", "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'linux'", "python_full_version < '3.13' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'linux'", - "(python_full_version >= '3.14' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.14' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version >= '3.14' and platform_machine == 'armv7l' and sys_platform == 'linux')", + "(python_full_version >= '3.15' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.15' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version >= '3.15' and platform_machine == 'armv7l' and sys_platform == 'linux')", + "(python_full_version == '3.14.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.14.*' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version == '3.14.*' and platform_machine == 'armv7l' and sys_platform == 'linux')", "(python_full_version == '3.13.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.13.*' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version == '3.13.*' and platform_machine == 'armv7l' and sys_platform == 'linux')", "(python_full_version < '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version < '3.13' and platform_machine == 'armv7l' and sys_platform == 'linux')", - "(python_full_version >= '3.14' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", + "(python_full_version >= '3.15' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.15' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", + "python_full_version >= '3.15' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", + "python_full_version >= '3.15' and platform_machine != 's390x' and sys_platform == 'emscripten'", + "python_full_version >= '3.15' and platform_machine == 's390x' and sys_platform == 'emscripten'", + "(python_full_version == '3.14.*' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.14.*' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", "(python_full_version == '3.13.*' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", + "python_full_version == '3.14.*' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", "(python_full_version < '3.13' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version < '3.13' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", - "python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform == 'emscripten'", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'emscripten'", + "python_full_version == '3.14.*' and platform_machine != 's390x' and sys_platform == 'emscripten'", "python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform == 'emscripten'", + "python_full_version == '3.14.*' and platform_machine == 's390x' and sys_platform == 'emscripten'", "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'emscripten'", "python_full_version < '3.13' and platform_machine != 's390x' and sys_platform == 'emscripten'", "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'emscripten'", - "(python_full_version >= '3.14' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform == 'win32')", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'win32'", + "(python_full_version >= '3.15' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.15' and platform_machine != 's390x' and sys_platform == 'win32')", + "python_full_version >= '3.15' and platform_machine == 's390x' and sys_platform == 'win32'", + "(python_full_version == '3.14.*' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.14.*' and platform_machine != 's390x' and sys_platform == 'win32')", "(python_full_version == '3.13.*' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform == 'win32')", + "python_full_version == '3.14.*' and platform_machine == 's390x' and sys_platform == 'win32'", "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'win32'", "(python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version < '3.13' and platform_machine != 's390x' and sys_platform == 'win32')", "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'win32'", @@ -383,11 +392,11 @@ css = [ [[package]] name = "certifi" -version = "2026.2.25" +version = "2026.4.22" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/af/2d/7bf41579a8986e348fa033a31cdd0e4121114f6bce2457e8876010b092dd/certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7", size = 155029, upload-time = "2026-02-25T02:54:17.342Z" } +sdist = { url = "https://files.pythonhosted.org/packages/25/ee/6caf7a40c36a1220410afe15a1cc64993a1f864871f698c0f93acb72842a/certifi-2026.4.22.tar.gz", hash = "sha256:8d455352a37b71bf76a79caa83a3d6c25afee4a385d632127b6afb3963f1c580", size = 137077, upload-time = "2026-04-22T11:26:11.191Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", size = 153684, upload-time = "2026-02-25T02:54:15.766Z" }, + { url = "https://files.pythonhosted.org/packages/22/30/7cd8fdcdfbc5b869528b079bfb76dcdf6056b1a2097a662e5e8c04f42965/certifi-2026.4.22-py3-none-any.whl", hash = "sha256:3cb2210c8f88ba2318d29b0388d1023c8492ff72ecdde4ebdaddbb13a31b1c4a", size = 135707, upload-time = "2026-04-22T11:26:09.372Z" }, ] [[package]] @@ -531,14 +540,14 @@ wheels = [ [[package]] name = "click" -version = "8.3.2" +version = "8.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/57/75/31212c6bf2503fdf920d87fee5d7a86a2e3bcf444984126f13d8e4016804/click-8.3.2.tar.gz", hash = "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5", size = 302856, upload-time = "2026-04-03T19:14:45.118Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/63/f9e1ea081ce35720d8b92acde70daaedace594dc93b693c869e0d5910718/click-8.3.3.tar.gz", hash = "sha256:398329ad4837b2ff7cbe1dd166a4c0f8900c3ca3a218de04466f38f6497f18a2", size = 328061, upload-time = "2026-04-22T15:11:27.506Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/20/71885d8b97d4f3dde17b1fdb92dbd4908b00541c5a3379787137285f602e/click-8.3.2-py3-none-any.whl", hash = "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d", size = 108379, upload-time = "2026-04-03T19:14:43.505Z" }, + { url = "https://files.pythonhosted.org/packages/ae/44/c1221527f6a71a01ec6fbad7fa78f1d50dfa02217385cf0fa3eec7087d59/click-8.3.3-py3-none-any.whl", hash = "sha256:a2bf429bb3033c89fa4936ffb35d5cb471e3719e1f3c8a7c3fff0b8314305613", size = 110502, upload-time = "2026-04-22T15:11:25.044Z" }, ] [[package]] @@ -963,10 +972,10 @@ wheels = [ [[package]] name = "cuda-pathfinder" -version = "1.5.3" +version = "1.5.4" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d3/d6/ac63065d33dd700fee7ebd7d287332401b54e31b9346e142f871e1f0b116/cuda_pathfinder-1.5.3-py3-none-any.whl", hash = "sha256:dff021123aedbb4117cc7ec81717bbfe198fb4e8b5f1ee57e0e084fec5c8577d", size = 49991, upload-time = "2026-04-14T20:09:27.037Z" }, + { url = "https://files.pythonhosted.org/packages/11/d0/c177e29701cf1d3008d7d2b16b5fc626592ce13bd535f8795c5f57187e0e/cuda_pathfinder-1.5.4-py3-none-any.whl", hash = "sha256:9563d3175ce1828531acf4b94e1c1c7d67208c347ca002493e2654878b26f4b7", size = 51657, upload-time = "2026-04-27T22:42:07.712Z" }, ] [[package]] @@ -980,7 +989,7 @@ wheels = [ [[package]] name = "datasets" -version = "4.8.4" +version = "4.8.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "dill" }, @@ -998,9 +1007,9 @@ dependencies = [ { name = "tqdm" }, { name = "xxhash" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/22/22/73e46ac7a8c25e7ef0b3bd6f10da3465021d90219a32eb0b4d2afea4c56e/datasets-4.8.4.tar.gz", hash = "sha256:a1429ed853275ce7943a01c6d2e25475b4501eb758934362106a280470df3a52", size = 604382, upload-time = "2026-03-23T14:21:17.987Z" } +sdist = { url = "https://files.pythonhosted.org/packages/66/34/14cd8e76f907f7d4dca2334cfeec9f81d30fd15c25a015f99aaea694eaed/datasets-4.8.5.tar.gz", hash = "sha256:0f0c1c3d56ffff2c93b2f4c63c95bac94f3d7e8621aea2a2a576275233bba772", size = 605649, upload-time = "2026-04-27T15:43:57.384Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/e5/247d094108e42ac26363ab8dc57f168840cf7c05774b40ffeb0d78868fcc/datasets-4.8.4-py3-none-any.whl", hash = "sha256:cdc8bee4698e549d78bf1fed6aea2eebc760b22b084f07e6fc020c6577a6ce6d", size = 526991, upload-time = "2026-03-23T14:21:15.89Z" }, + { url = "https://files.pythonhosted.org/packages/65/99/00f3196036501b53032c4b1ab8337a0b978dee832ed276dae3815df4e8b5/datasets-4.8.5-py3-none-any.whl", hash = "sha256:5079900781719c0e063a8efdd2cd95a31ad0c63209178669cd23cf1b926149ff", size = 528973, upload-time = "2026-04-27T15:43:53.702Z" }, ] [[package]] @@ -1105,13 +1114,12 @@ wheels = [ [[package]] name = "dm-control" -version = "1.0.39" +version = "1.0.40" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "absl-py" }, { name = "dm-env" }, - { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "dm-tree", version = "0.1.10", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "dm-tree" }, { name = "glfw" }, { name = "labmaze" }, { name = "lxml" }, @@ -1125,9 +1133,9 @@ dependencies = [ { name = "setuptools" }, { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c3/3b/785a1e175c7a91c1ddb297c21041278a60a45462f0abed16ea69a18cddbc/dm_control-1.0.39.tar.gz", hash = "sha256:b4c65d50a8dd2eb8c1b405fa17830a5de8d1020578b9b9045ad00cd5ace9aac9", size = 56274038, upload-time = "2026-04-14T14:43:31.316Z" } +sdist = { url = "https://files.pythonhosted.org/packages/78/27/1d3caa7fa7557b70d766f437979636b6a8c99b14f6e8b8f84795cad9f1df/dm_control-1.0.40.tar.gz", hash = "sha256:af5828af47fe50466008d53b141893a05c4e2779169fc8ef469d1828a016266e", size = 56273764, upload-time = "2026-04-25T22:05:39.202Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/56/7f/cfbd6b5c3cc44f687b0b12a1690814a47999e0a3196c3dcd7df209cc6d7b/dm_control-1.0.39-py3-none-any.whl", hash = "sha256:3f29195ab9fa9484b7ccf117bf0a2144cecd7c46570d9db0841cba716f4eff82", size = 56446358, upload-time = "2026-04-14T14:43:26.436Z" }, + { url = "https://files.pythonhosted.org/packages/2d/eb/a762d6f15e6c4faccef8fdcdae50cf8f232800a36b70aef93d38a787bb58/dm_control-1.0.40-py3-none-any.whl", hash = "sha256:cd15b1d95f5b320b7924e518715b8ac132043d575588b7e122f21016f49c7e89", size = 56446428, upload-time = "2026-04-25T22:05:33.561Z" }, ] [[package]] @@ -1136,8 +1144,7 @@ version = "1.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "absl-py" }, - { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "dm-tree", version = "0.1.10", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "dm-tree" }, { name = "numpy" }, ] sdist = { url = "https://files.pythonhosted.org/packages/62/c9/93e8d6239d5806508a2ee4b370e67c6069943ca149f59f533923737a99b7/dm-env-1.6.tar.gz", hash = "sha256:a436eb1c654c39e0c986a516cee218bea7140b510fceff63f97eb4fcff3d93de", size = 20187, upload-time = "2022-12-21T00:25:29.306Z" } @@ -1149,22 +1156,11 @@ wheels = [ name = "dm-tree" version = "0.1.9" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.14' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'linux'", - "(python_full_version >= '3.14' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.14' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version >= '3.14' and platform_machine == 'armv7l' and sys_platform == 'linux')", - "(python_full_version >= '3.14' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", - "python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform == 'emscripten'", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'emscripten'", - "(python_full_version >= '3.14' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and platform_machine != 's390x' and sys_platform == 'win32')", - "python_full_version >= '3.14' and platform_machine == 's390x' and sys_platform == 'win32'", -] dependencies = [ - { name = "absl-py", marker = "python_full_version >= '3.14'" }, - { name = "attrs", marker = "python_full_version >= '3.14'" }, - { name = "numpy", marker = "python_full_version >= '3.14'" }, - { name = "wrapt", marker = "python_full_version >= '3.14'" }, + { name = "absl-py" }, + { name = "attrs" }, + { name = "numpy" }, + { name = "wrapt" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a6/83/ce29720ccf934c6cfa9b9c95ebbe96558386e66886626066632b5e44afed/dm_tree-0.1.9.tar.gz", hash = "sha256:a4c7db3d3935a5a2d5e4b383fc26c6b0cd6f78c6d4605d3e7b518800ecd5342b", size = 35623, upload-time = "2025-01-30T20:45:37.13Z" } wheels = [ @@ -1181,58 +1177,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c5/37/15603079854394f16e3833a7b50696c1f3cbf30a2243a119f64f18a16f36/dm_tree-0.1.9-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1f5d1e96b3a7de22b25b13a5eb30f41f8cf9c02dd4479a24920de99e780903c", size = 153052, upload-time = "2025-01-30T20:45:35.907Z" }, ] -[[package]] -name = "dm-tree" -version = "0.1.10" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version == '3.13.*' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", - "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'linux'", - "python_full_version < '3.13' and platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l' and platform_machine != 's390x' and sys_platform == 'linux'", - "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'linux'", - "(python_full_version == '3.13.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.13.*' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version == '3.13.*' and platform_machine == 'armv7l' and sys_platform == 'linux')", - "(python_full_version < '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (python_full_version < '3.13' and platform_machine == 'armv7l' and sys_platform == 'linux')", - "(python_full_version == '3.13.*' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", - "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", - "(python_full_version < '3.13' and platform_machine != 's390x' and platform_machine != 'x86_64' and sys_platform == 'darwin') or (python_full_version < '3.13' and platform_machine != 's390x' and sys_platform != 'darwin' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32')", - "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform != 'emscripten' and sys_platform != 'linux' and sys_platform != 'win32'", - "python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform == 'emscripten'", - "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'emscripten'", - "python_full_version < '3.13' and platform_machine != 's390x' and sys_platform == 'emscripten'", - "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'emscripten'", - "(python_full_version == '3.13.*' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version == '3.13.*' and platform_machine != 's390x' and sys_platform == 'win32')", - "python_full_version == '3.13.*' and platform_machine == 's390x' and sys_platform == 'win32'", - "(python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'darwin') or (python_full_version < '3.13' and platform_machine != 's390x' and sys_platform == 'win32')", - "python_full_version < '3.13' and platform_machine == 's390x' and sys_platform == 'win32'", -] -dependencies = [ - { name = "absl-py", marker = "python_full_version < '3.14'" }, - { name = "attrs", marker = "python_full_version < '3.14'" }, - { name = "numpy", marker = "python_full_version < '3.14'" }, - { name = "wrapt", marker = "python_full_version < '3.14'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/5a/66/a3ec619d22b6baffa5ab853e8dc6ec9d0c837127948af59bb15b988d7312/dm_tree-0.1.10.tar.gz", hash = "sha256:22f37b599e01cc3402a17f79c257a802aebd8d326de05b54657650845956208a", size = 35748, upload-time = "2026-03-31T17:35:39.03Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/34/a1/17e0d68eec978c483db4712b14d083ee01484381b29ea85edb2b20210bd0/dm_tree-0.1.10-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:94af18e4fd22ce69eccae89eeed8ed498b6b4cc4957f4ed10b4160e59f620e1d", size = 315976, upload-time = "2026-03-31T17:35:15.21Z" }, - { url = "https://files.pythonhosted.org/packages/cc/6f/ed603715fbc29c887a8985252e2cfe0d449497aea96bac51010159771617/dm_tree-0.1.10-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b442a0c1e9d0960e0314a2e4af81fd328a87921b6d6db6dc41bfa420536884d6", size = 184053, upload-time = "2026-03-31T17:35:16.512Z" }, - { url = "https://files.pythonhosted.org/packages/83/eb/1d55c679cee9a54e552480d308535753c72e2250cf720d7aa777bff2a4fe/dm_tree-0.1.10-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:012c2b376e88d3685c73a4b5c23be41fe933e14e380dcd90172971690b0e02d2", size = 186506, upload-time = "2026-03-31T17:35:17.593Z" }, - { url = "https://files.pythonhosted.org/packages/89/2d/adef6924f8dc7f1665eea4ce066387820c14a629d0e1005568892d56ea6a/dm_tree-0.1.10-cp312-cp312-win_amd64.whl", hash = "sha256:da8d5b8995bea1b6bb93f457e0dad5d16e6e2344a6488ced55320e7f3fd50f56", size = 112708, upload-time = "2026-03-31T17:35:18.699Z" }, - { url = "https://files.pythonhosted.org/packages/d6/29/f39e8412c16740f4c914c6674a04a66ace344ce5cb99b537c2270ef4f204/dm_tree-0.1.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:4a782f0382be16d66c9ed003e6992e56674504a1d9636f44d2807123f5df6343", size = 316108, upload-time = "2026-03-31T17:35:20.139Z" }, - { url = "https://files.pythonhosted.org/packages/02/83/1b94d45351bd75a83976a88c9fcf109da6ce336f38a3b443703bb6b18e5d/dm_tree-0.1.10-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0e8f8f1354f178112732b30d2293bc53d212ea64a9556db80a926af3d4647a6b", size = 183834, upload-time = "2026-03-31T17:35:21.463Z" }, - { url = "https://files.pythonhosted.org/packages/2f/23/bd3e75cbff06a464339d32667d740acf49812b027142a013b54d2c4d830a/dm_tree-0.1.10-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6d7134c0805294c640b94d85cc725084f0c5087bcda5a7fb38eeb7f479ecc37c", size = 186187, upload-time = "2026-03-31T17:35:23.495Z" }, - { url = "https://files.pythonhosted.org/packages/aa/75/4b460253b9af862388940404b5df6a22b399800c850aab4724c95f8635f9/dm_tree-0.1.10-cp313-cp313-win_amd64.whl", hash = "sha256:b42e04482880b017d931511d7b5997be372fff26a1ee9b9be55eef03ef1c2918", size = 112768, upload-time = "2026-03-31T17:35:24.622Z" }, - { url = "https://files.pythonhosted.org/packages/cb/ca/3b40a8a50f9c3492b795b157d769180edb5f2605e3c61ae826208f917baa/dm_tree-0.1.10-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:bde02efacca66514524922538b8a0c5dc15d482565d1c796edc34a726b376830", size = 324138, upload-time = "2026-03-31T17:35:25.627Z" }, - { url = "https://files.pythonhosted.org/packages/83/e4/33c9218aa607f610e2b0334fc824c2abd5a6bc232bf0726cf275f88e639d/dm_tree-0.1.10-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:033f9a063e1e19b6c65fb5c76079bd923044f5a6095357ad2637845513d47938", size = 185110, upload-time = "2026-03-31T17:35:26.784Z" }, - { url = "https://files.pythonhosted.org/packages/6c/da/f8811666d61b6829ba1c2716c4119039428dd86078eddd120354aaf26a3b/dm_tree-0.1.10-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6d4237da7b072fff1e93db109ab545f00d2b978ead35e85e7a84908e15197826", size = 187013, upload-time = "2026-03-31T17:35:27.969Z" }, - { url = "https://files.pythonhosted.org/packages/94/8d/135ddeea875fd1a2768e7aee6c224f92c9b7643ead1ec8b68bdbee52c60a/dm_tree-0.1.10-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:f395390d6acfb5d39c564c8bbcaf35352a81eb2f0d34d449739039b2ef786e14", size = 316599, upload-time = "2026-03-31T17:35:29.339Z" }, - { url = "https://files.pythonhosted.org/packages/cb/50/1eda610e9ca8ac59950ae028080e7c5320d7abc5567d6723d0cb3623e838/dm_tree-0.1.10-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c0f54547fbd4b82e88c71694b3836c90059b97102d3e36209f5d2fa66950964", size = 184263, upload-time = "2026-03-31T17:35:30.534Z" }, - { url = "https://files.pythonhosted.org/packages/c7/59/07461ceb563702ba3943725bdf0e04be4de0ed7ef093837cdd2d67141d2a/dm_tree-0.1.10-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cf6706ac425272c9b7e05f05a23a1ff3e670fb59a787f6089a638eea2d06f1d0", size = 186328, upload-time = "2026-03-31T17:35:31.894Z" }, - { url = "https://files.pythonhosted.org/packages/88/af/d9c84787fefe9f7c35f474a945217c38396f2ca5ab06432fb566e32a7d1a/dm_tree-0.1.10-cp314-cp314-win_amd64.whl", hash = "sha256:a132047e846e769ddacefe77c42ae79bf3d0e9fce2a6adb638a0ea4cbadb8cdb", size = 114799, upload-time = "2026-03-31T17:35:33.361Z" }, - { url = "https://files.pythonhosted.org/packages/fd/2c/2aaa63a510db520cd9e0c51e053a608486169bb9710f51f4ecf5699cebb4/dm_tree-0.1.10-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:23682221f63ad011dbd762ce5314740d7900b0426a2681614ea2472369b0c49c", size = 324205, upload-time = "2026-03-31T17:35:34.679Z" }, - { url = "https://files.pythonhosted.org/packages/b0/89/a5a302bcf9c345e6bd0498627ee2aa12f0a1c3538d08a2f5884d3c6783ba/dm_tree-0.1.10-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8baeb3db1e92587d686022fb67a52f6c584a7d32bd98444ed3aafb399ad9ce67", size = 185113, upload-time = "2026-03-31T17:35:36.179Z" }, - { url = "https://files.pythonhosted.org/packages/cc/e8/2d4fbc54bb68905588945cfb47c05445c66cab2d822b05827f1c62e23a70/dm_tree-0.1.10-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2236c9a4cf64ed0b04004a94902f39341be652b70dce322b33f08ada9b146baa", size = 187009, upload-time = "2026-03-31T17:35:37.584Z" }, -] - [[package]] name = "docopt" version = "0.6.2" @@ -1375,16 +1319,16 @@ wheels = [ [[package]] name = "farama-notifications" -version = "0.0.4" +version = "0.0.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2e/2c/8384832b7a6b1fd6ba95bbdcae26e7137bb3eedc955c42fd5cdcc086cfbf/Farama-Notifications-0.0.4.tar.gz", hash = "sha256:13fceff2d14314cf80703c8266462ebf3733c7d165336eee998fc58e545efd18", size = 2131, upload-time = "2023-02-27T18:28:41.047Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/91/14397890dde30adc4bee6462158933806207bc5dd10d7b4d09d5c33845cf/farama_notifications-0.0.6.tar.gz", hash = "sha256:b19acac4bb41d76e59e03394b5dd165f4761c86fa327f56307a35cbee3b60158", size = 2517, upload-time = "2026-04-24T08:43:57.603Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/05/2c/ffc08c54c05cdce6fbed2aeebc46348dbe180c6d2c541c7af7ba0aa5f5f8/Farama_Notifications-0.0.4-py3-none-any.whl", hash = "sha256:14de931035a41961f7c056361dc7f980762a143d05791ef5794a751a2caf05ae", size = 2511, upload-time = "2023-02-27T18:28:39.447Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f0/21f81892e4ed10f4ec3ef2e7cf8635fb76e7c0907c55d0da66be50094760/farama_notifications-0.0.6-py3-none-any.whl", hash = "sha256:f84839188efa1ce5bb361c2a84881b2dc2c0d0d7fb661ff00421820170930935", size = 2897, upload-time = "2026-04-24T08:43:56.785Z" }, ] [[package]] name = "fastapi" -version = "0.136.0" +version = "0.136.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc" }, @@ -1393,9 +1337,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4e/d9/e66315807e41e69e7f6a1b42a162dada2f249c5f06ad3f1a95f84ab336ef/fastapi-0.136.0.tar.gz", hash = "sha256:cf08e067cc66e106e102d9ba659463abfac245200752f8a5b7b1e813de4ff73e", size = 396607, upload-time = "2026-04-16T11:47:13.623Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5d/45/c130091c2dfa061bbfe3150f2a5091ef1adf149f2a8d2ae769ecaf6e99a2/fastapi-0.136.1.tar.gz", hash = "sha256:7af665ad7acfa0a3baf8983d393b6b471b9da10ede59c60045f49fbc89a0fa7f", size = 397448, upload-time = "2026-04-23T16:49:44.046Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/26/a3/0bd5f0cdb0bbc92650e8dc457e9250358411ee5d1b65e42b6632387daf81/fastapi-0.136.0-py3-none-any.whl", hash = "sha256:8793d44ec7378e2be07f8a013cf7f7aa47d6327d0dfe9804862688ec4541a6b4", size = 117556, upload-time = "2026-04-16T11:47:11.922Z" }, + { url = "https://files.pythonhosted.org/packages/5a/ff/2e4eca3ade2c22fe1dea7043b8ee9dabe47753349eb1b56a202de8af6349/fastapi-0.136.1-py3-none-any.whl", hash = "sha256:a6e9d7eeada96c93a4d69cb03836b44fa34e2854accb7244a1ece36cd4781c3f", size = 117683, upload-time = "2026-04-23T16:49:42.437Z" }, ] [[package]] @@ -1418,11 +1362,11 @@ sdist = { url = "https://files.pythonhosted.org/packages/5f/8e/c53d6f9a8bf3a86a6 [[package]] name = "filelock" -version = "3.28.0" +version = "3.29.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d6/17/6e8890271880903e3538660a21d63a6c1fea969ac71d0d6b608b78727fa9/filelock-3.28.0.tar.gz", hash = "sha256:4ed1010aae813c4ee8d9c660e4792475ee60c4a0ba76073ceaf862bd317e3ca6", size = 56474, upload-time = "2026-04-14T22:54:33.625Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/fe/997687a931ab51049acce6fa1f23e8f01216374ea81374ddee763c493db5/filelock-3.29.0.tar.gz", hash = "sha256:69974355e960702e789734cb4871f884ea6fe50bd8404051a3530bc07809cf90", size = 57571, upload-time = "2026-04-19T15:39:10.068Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3b/21/2f728888c45033d34a417bfcd248ea2564c9e08ab1bfd301377cf05d5586/filelock-3.28.0-py3-none-any.whl", hash = "sha256:de9af6712788e7171df1b28b15eba2446c69721433fa427a9bee07b17820a9db", size = 39189, upload-time = "2026-04-14T22:54:32.037Z" }, + { url = "https://files.pythonhosted.org/packages/81/47/dd9a212ef6e343a6857485ffe25bba537304f1913bdbed446a23f7f592e1/filelock-3.29.0-py3-none-any.whl", hash = "sha256:96f5f6344709aa1572bbf631c640e4ebeeb519e08da902c39a001882f30ac258", size = 39812, upload-time = "2026-04-19T15:39:08.752Z" }, ] [[package]] @@ -1619,14 +1563,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.46" +version = "3.1.49" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/df/b5/59d16470a1f0dfe8c793f9ef56fd3826093fc52b3bd96d6b9d6c26c7e27b/gitpython-3.1.46.tar.gz", hash = "sha256:400124c7d0ef4ea03f7310ac2fbf7151e09ff97f2a3288d64a440c584a29c37f", size = 215371, upload-time = "2026-01-01T15:37:32.073Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/63/210aaa302d6a0a78daa67c5c15bbac2cad361722841278b0209b6da20855/gitpython-3.1.49.tar.gz", hash = "sha256:42f9399c9eb33fc581014bedd76049dfbaf6375aa2a5754575966387280315e1", size = 219367, upload-time = "2026-04-29T00:31:20.478Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/09/e21df6aef1e1ffc0c816f0522ddc3f6dcded766c3261813131c78a704470/gitpython-3.1.46-py3-none-any.whl", hash = "sha256:79812ed143d9d25b6d176a10bb511de0f9c67b1fa641d82097b0ab90398a2058", size = 208620, upload-time = "2026-01-01T15:37:30.574Z" }, + { url = "https://files.pythonhosted.org/packages/fd/6f/b842bfa6f21d6f87c57f9abf7194225e55279d96d869775e19e9f7236fc5/gitpython-3.1.49-py3-none-any.whl", hash = "sha256:024b0422d7f84d15cd794844e029ffebd4c5d42a7eb9b936b458697ef550a02c", size = 212190, upload-time = "2026-04-29T00:31:18.412Z" }, ] [[package]] @@ -1773,7 +1717,7 @@ wheels = [ [[package]] name = "gymnasium" -version = "1.2.3" +version = "1.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cloudpickle" }, @@ -1781,9 +1725,9 @@ dependencies = [ { name = "numpy" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/76/59/653a9417d98ed3e29ef9734ba52c3495f6c6823b8d5c0c75369f25111708/gymnasium-1.2.3.tar.gz", hash = "sha256:2b2cb5b5fbbbdf3afb9f38ca952cc48aa6aa3e26561400d940747fda3ad42509", size = 829230, upload-time = "2025-12-18T16:51:10.234Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4d/ff/14b6880d703dfaca204490979d3254ccd280c99550798993319902873658/gymnasium-1.3.0.tar.gz", hash = "sha256:6939e86e835d6b71b6ba6bfd360487420876deafc79bfb7bacba83a7c446bcf3", size = 830646, upload-time = "2026-04-22T13:47:14.155Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/56/d3/ea5f088e3638dbab12e5c20d6559d5b3bdaeaa1f2af74e526e6815836285/gymnasium-1.2.3-py3-none-any.whl", hash = "sha256:e6314bba8f549c7fdcc8677f7cd786b64908af6e79b57ddaa5ce1825bffb5373", size = 952113, upload-time = "2025-12-18T16:51:08.445Z" }, + { url = "https://files.pythonhosted.org/packages/e9/73/fda6a25f3beeb5e49d74330b44092b9e5a547395ccd478d1103ddcbff1fc/gymnasium-1.3.0-py3-none-any.whl", hash = "sha256:6b8c159a8540dcbcb221722d7efda24d78ebbcbc3bd2ea1c2611aa2a34471fc2", size = 953904, upload-time = "2026-04-22T13:47:12.13Z" }, ] [[package]] @@ -1986,7 +1930,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "1.11.0" +version = "1.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -1999,9 +1943,9 @@ dependencies = [ { name = "typer" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/dc/89/e7aa12d8a6b9259bed10671abb25ae6fa437c0f88a86ecbf59617bae7759/huggingface_hub-1.11.0.tar.gz", hash = "sha256:15fb3713c7f9cdff7b808a94fd91664f661ab142796bb48c9cd9493e8d166278", size = 761749, upload-time = "2026-04-16T13:07:39.73Z" } +sdist = { url = "https://files.pythonhosted.org/packages/89/ff/ec7ed2eb43bd7ce8bb2233d109cc235c3e807ffe5e469dc09db261fac05e/huggingface_hub-1.13.0.tar.gz", hash = "sha256:f6df2dac5abe82ce2fe05873d10d5ff47bc677d616a2f521f4ee26db9415d9d0", size = 781788, upload-time = "2026-04-30T11:57:33.858Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/37/02/4f3f8997d1ea7fe0146b343e5e14bd065fa87af790d07e5576d31b31cc18/huggingface_hub-1.11.0-py3-none-any.whl", hash = "sha256:42a6de0afbfeb5e022222d36398f029679db4eb4778801aafda32257ae9131ab", size = 645499, upload-time = "2026-04-16T13:07:37.716Z" }, + { url = "https://files.pythonhosted.org/packages/93/db/4b1cdae9460ae1f3ca020cd767f013430ce23eb1d9c890ae3a0609b38d26/huggingface_hub-1.13.0-py3-none-any.whl", hash = "sha256:e942cb50d6a08dd5306688b1ac05bda157fd2fcc88b63dae405f7bd0d3234005", size = 660643, upload-time = "2026-04-30T11:57:31.802Z" }, ] [[package]] @@ -2029,11 +1973,11 @@ wheels = [ [[package]] name = "idna" -version = "3.11" +version = "3.13" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ce/cc/762dfb036166873f0059f3b7de4565e1b5bc3d6f28a414c13da27e442f99/idna-3.13.tar.gz", hash = "sha256:585ea8fe5d69b9181ec1afba340451fba6ba764af97026f92a91d4eef164a242", size = 194210, upload-time = "2026-04-22T16:42:42.314Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, + { url = "https://files.pythonhosted.org/packages/5d/13/ad7d7ca3808a898b4612b6fe93cde56b53f3034dcde235acb1f0e1df24c6/idna-3.13-py3-none-any.whl", hash = "sha256:892ea0cde124a99ce773decba204c5552b69c3c67ffd5f232eb7696135bc8bb3", size = 68629, upload-time = "2026-04-22T16:42:40.909Z" }, ] [[package]] @@ -2116,7 +2060,7 @@ wheels = [ [[package]] name = "ipython" -version = "9.12.0" +version = "9.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -2126,13 +2070,14 @@ dependencies = [ { name = "matplotlib-inline" }, { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, { name = "prompt-toolkit" }, + { name = "psutil" }, { name = "pygments" }, { name = "stack-data" }, { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3a/73/7114f80a8f9cabdb13c27732dce24af945b2923dcab80723602f7c8bc2d8/ipython-9.12.0.tar.gz", hash = "sha256:01daa83f504b693ba523b5a407246cabde4eb4513285a3c6acaff11a66735ee4", size = 4428879, upload-time = "2026-03-27T09:42:45.312Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/c4/87cda5842cf5c31837c06ddb588e11c3c35d8ece89b7a0108c06b8c9b00a/ipython-9.13.0.tar.gz", hash = "sha256:7e834b6afc99f020e3f05966ced34792f40267d64cb1ea9043886dab0dde5967", size = 4430549, upload-time = "2026-04-24T12:24:55.221Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/59/22/906c8108974c673ebef6356c506cebb6870d48cedea3c41e949e2dd556bb/ipython-9.12.0-py3-none-any.whl", hash = "sha256:0f2701e8ee86e117e37f50563205d36feaa259d2e08d4a6bc6b6d74b18ce128d", size = 625661, upload-time = "2026-03-27T09:42:42.831Z" }, + { url = "https://files.pythonhosted.org/packages/b9/86/3060e8029b7cc505cce9a0137431dda81d0a3fde93a8f0f50ee0bf37a795/ipython-9.13.0-py3-none-any.whl", hash = "sha256:57f9d4639e20818d328d287c7b549af3d05f12486ea8f2e7f73e52a36ec4d201", size = 627274, upload-time = "2026-04-24T12:24:53.038Z" }, ] [[package]] @@ -2186,14 +2131,14 @@ wheels = [ [[package]] name = "jedi" -version = "0.19.2" +version = "0.20.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "parso" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } +sdist = { url = "https://files.pythonhosted.org/packages/46/b7/a3635f6a2d7cf5b5dd98064fc1d5fbbafcb25477bcea204a3a92145d158b/jedi-0.20.0.tar.gz", hash = "sha256:c3f4ccbd276696f4b19c54618d4fb18f9fc24b0aef02acf704b23f487daa1011", size = 3119416, upload-time = "2026-05-01T23:38:47.814Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, + { url = "https://files.pythonhosted.org/packages/9a/93/242e2eab5fe682ffcb8b0084bde703a41d51e17ee0f3a31ff0d9d813620a/jedi-0.20.0-py2.py3-none-any.whl", hash = "sha256:7bdd9c2634f56713299976f4cbd59cb3fa92165cc5e05ea811fb253480728b67", size = 4884812, upload-time = "2026-05-01T23:38:43.919Z" }, ] [[package]] @@ -2345,7 +2290,7 @@ wheels = [ [[package]] name = "jupyter-events" -version = "0.12.0" +version = "0.12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonschema", extra = ["format-nongpl"] }, @@ -2357,9 +2302,9 @@ dependencies = [ { name = "rfc3986-validator" }, { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9d/c3/306d090461e4cf3cd91eceaff84bede12a8e52cd821c2d20c9a4fd728385/jupyter_events-0.12.0.tar.gz", hash = "sha256:fc3fce98865f6784c9cd0a56a20644fc6098f21c8c33834a8d9fe383c17e554b", size = 62196, upload-time = "2025-02-03T17:23:41.485Z" } +sdist = { url = "https://files.pythonhosted.org/packages/18/f8/475c4241b2b75af0deaae453ed003c6c851766dbc44d332d8baf245dc931/jupyter_events-0.12.1.tar.gz", hash = "sha256:faff25f77218335752f35f23c5fe6e4a392a7bd99a5939ccb9b8fbf594636cf3", size = 62854, upload-time = "2026-04-20T23:17:50.66Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e2/48/577993f1f99c552f18a0428731a755e06171f9902fa118c379eb7c04ea22/jupyter_events-0.12.0-py3-none-any.whl", hash = "sha256:6464b2fa5ad10451c3d35fabc75eab39556ae1e2853ad0c0cc31b656731a97fb", size = 19430, upload-time = "2025-02-03T17:23:38.643Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6c/6fcde0c8f616ed360ffd3587f7db9e225a7e62b583a04494d2f069cf64ea/jupyter_events-0.12.1-py3-none-any.whl", hash = "sha256:c366585253f537a627da52fa7ca7410c5b5301fe893f511e7b077c2d93ec8bcf", size = 19512, upload-time = "2026-04-20T23:17:48.927Z" }, ] [[package]] @@ -2376,7 +2321,7 @@ wheels = [ [[package]] name = "jupyter-server" -version = "2.17.0" +version = "2.18.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2398,9 +2343,9 @@ dependencies = [ { name = "traitlets" }, { name = "websocket-client" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5b/ac/e040ec363d7b6b1f11304cc9f209dac4517ece5d5e01821366b924a64a50/jupyter_server-2.17.0.tar.gz", hash = "sha256:c38ea898566964c888b4772ae1ed58eca84592e88251d2cfc4d171f81f7e99d5", size = 731949, upload-time = "2025-08-21T14:42:54.042Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/b0/666586d557a71a58cd9960b154fb9aee0ed81dd62a50371195ab95731909/jupyter_server-2.18.1.tar.gz", hash = "sha256:f62be526369b791625e03bd658070563c1a4e9a0a2f439ea1f9dbacea5f7191a", size = 752024, upload-time = "2026-05-05T09:17:51.101Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/92/80/a24767e6ca280f5a49525d987bf3e4d7552bf67c8be07e8ccf20271f8568/jupyter_server-2.17.0-py3-none-any.whl", hash = "sha256:e8cb9c7db4251f51ed307e329b81b72ccf2056ff82d50524debde1ee1870e13f", size = 388221, upload-time = "2025-08-21T14:42:52.034Z" }, + { url = "https://files.pythonhosted.org/packages/a4/45/bfe3779fd06714a379128f2c4eaf7c99414f0eb081f9f34c135f6b3d511c/jupyter_server-2.18.1-py3-none-any.whl", hash = "sha256:db0374d52a975f88a92a7f20de44e08ef5be9763ba7e99630baf16c46ac8dbf0", size = 391844, upload-time = "2026-05-05T09:17:48.521Z" }, ] [[package]] @@ -2418,7 +2363,7 @@ wheels = [ [[package]] name = "jupyterlab" -version = "4.5.6" +version = "4.5.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "async-lru" }, @@ -2435,9 +2380,9 @@ dependencies = [ { name = "tornado" }, { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ac/d5/730628e03fff2e8a8e8ccdaedde1489ab1309f9a4fa2536248884e30b7c7/jupyterlab-4.5.6.tar.gz", hash = "sha256:642fe2cfe7f0f5922a8a558ba7a0d246c7bc133b708dfe43f7b3a826d163cf42", size = 23970670, upload-time = "2026-03-11T14:17:04.531Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2b/22/8440ec827762146e7cdecf04335bd348795899d29dc6ae82238707353a2c/jupyterlab-4.5.7.tar.gz", hash = "sha256:55a9822c4754da305f41e113452c68383e214dcf96de760146af89ce5d5117b0", size = 23992763, upload-time = "2026-04-29T16:43:51.328Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e1/1b/dad6fdcc658ed7af26fdf3841e7394072c9549a8b896c381ab49dd11e2d9/jupyterlab-4.5.6-py3-none-any.whl", hash = "sha256:d6b3dac883aa4d9993348e0f8e95b24624f75099aed64eab6a4351a9cdd1e580", size = 12447124, upload-time = "2026-03-11T14:17:00.229Z" }, + { url = "https://files.pythonhosted.org/packages/3d/aa/537b8f7d80e799af19af35fb3ddfc970b951088a13c57dd9387dcfbb7f61/jupyterlab-4.5.7-py3-none-any.whl", hash = "sha256:fba4cb0e2c44a52859669d8c98b45de029d5e515f8407bf8534d2a8fc5f0964d", size = 12450123, upload-time = "2026-04-29T16:43:46.639Z" }, ] [[package]] @@ -2793,8 +2738,7 @@ gamepad = [ groot = [ { name = "decord", marker = "platform_machine == 'AMD64' or platform_machine == 'x86_64'" }, { name = "diffusers" }, - { name = "dm-tree", version = "0.1.9", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, - { name = "dm-tree", version = "0.1.10", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "dm-tree" }, { name = "flash-attn", marker = "sys_platform != 'darwin'" }, { name = "ninja" }, { name = "peft" }, @@ -3165,7 +3109,7 @@ requires-dist = [ { name = "torchdiffeq", marker = "extra == 'wallx'", specifier = ">=0.2.4,<0.3.0" }, { name = "torchvision", specifier = ">=0.22.0,<0.26.0" }, { name = "tqdm", specifier = ">=4.66.0,<5.0.0" }, - { name = "transformers", marker = "extra == 'transformers-dep'", specifier = "==5.3.0" }, + { name = "transformers", marker = "extra == 'transformers-dep'", specifier = ">=5.4.0,<5.6.0" }, { name = "wandb", marker = "extra == 'training'", specifier = ">=0.24.0,<0.25.0" }, ] provides-extras = ["dataset", "training", "hardware", "viz", "core-scripts", "evaluation", "dataset-viz", "av-dep", "pygame-dep", "placo-dep", "transformers-dep", "grpcio-dep", "can-dep", "peft-dep", "scipy-dep", "diffusers-dep", "qwen-vl-utils-dep", "matplotlib-dep", "pyserial-dep", "deepdiff-dep", "pynput-dep", "pyzmq-dep", "feetech", "dynamixel", "damiao", "robstride", "openarms", "gamepad", "hopejr", "lekiwi", "unitree-g1", "reachy2", "kinematics", "intelrealsense", "phone", "diffusion", "wallx", "pi", "smolvla", "multi-task-dit", "groot", "sarm", "xvla", "hilserl", "async", "peft", "dev", "notebook", "test", "video-benchmark", "aloha", "pusht", "libero", "metaworld", "all"] @@ -3248,82 +3192,82 @@ wheels = [ [[package]] name = "lxml" -version = "6.0.4" +version = "6.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ce/08/1217ca4043f55c3c92993b283a7dbfa456a2058d8b57bbb416cc96b6efff/lxml-6.0.4.tar.gz", hash = "sha256:4137516be2a90775f99d8ef80ec0283f8d78b5d8bd4630ff20163b72e7e9abf2", size = 4237780, upload-time = "2026-04-12T16:28:24.182Z" } +sdist = { url = "https://files.pythonhosted.org/packages/28/30/9abc9e34c657c33834eaf6cd02124c61bdf5944d802aa48e69be8da3585d/lxml-6.1.0.tar.gz", hash = "sha256:bfd57d8008c4965709a919c3e9a98f76c2c7cb319086b3d26858250620023b13", size = 4197006, upload-time = "2026-04-18T04:32:51.613Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3d/18/4732abab49bbb041b1ded9dd913ca89735a0dcca038eacec64c44ba02163/lxml-6.0.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:af0b8459c4e21a8417db967b2e453d1855022dac79c79b61fb8214f3da50f17e", size = 8570033, upload-time = "2026-04-12T16:24:10.728Z" }, - { url = "https://files.pythonhosted.org/packages/72/7e/38523ec7178ca35376551911455d1b2766bc9d98bcc18f606a167fa9ecbb/lxml-6.0.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e0cdcea2affa53fa17dc4bf5cefc0edf72583eac987d669493a019998a623fa3", size = 4623270, upload-time = "2026-04-12T16:24:13.2Z" }, - { url = "https://files.pythonhosted.org/packages/f1/cf/f9b6c9bf9d8c63d923ef893915141767cea4cea71774f20c36d0c14e1585/lxml-6.0.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8da4d4840c1bc07da6fcd647784f7fbaf538eeb7a57ce6b2487acc54c5e33330", size = 4929471, upload-time = "2026-04-12T16:24:15.453Z" }, - { url = "https://files.pythonhosted.org/packages/e5/53/3117f988c9e20be4156d2b8e1bda82ae06878d11aeb820dea111a7cfa4e3/lxml-6.0.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fb04a997588c3980894ded9172c10c5a3e45d3f1c5410472733626d268683806", size = 5092355, upload-time = "2026-04-12T16:24:17.876Z" }, - { url = "https://files.pythonhosted.org/packages/4e/ca/05c6ac773a2bd3edb48fa8a5c5101e927ce044c4a8aed1a85ff00fab20a5/lxml-6.0.4-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ca449642a08a6ceddf6e6775b874b6aee1b6242ed80aea84124497aba28e5384", size = 5004520, upload-time = "2026-04-12T16:24:20.184Z" }, - { url = "https://files.pythonhosted.org/packages/f1/db/d8aa5aa3a51d0aa6706ef85f85027f7c972cd840fe69ba058ecaf32d093d/lxml-6.0.4-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:35b3ccdd137e62033662787dd4d2b8be900c686325d6b91e3b1ff6213d05ba11", size = 5629961, upload-time = "2026-04-12T16:24:22.242Z" }, - { url = "https://files.pythonhosted.org/packages/9d/75/8fff4444e0493aeb15ab0f4a55c767b5baed9074cf67a1835dc1161f3a1f/lxml-6.0.4-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45dc690c54b1341fec01743caed02e5f1ea49d7cfb81e3ba48903e5e844ed68a", size = 5237561, upload-time = "2026-04-12T16:24:24.572Z" }, - { url = "https://files.pythonhosted.org/packages/2a/9f/6d6cd73014f2dbf47a8aa7accd9712726f46ef4891e1c126bc285cfb94e4/lxml-6.0.4-cp312-cp312-manylinux_2_28_i686.whl", hash = "sha256:15ae922e8f74b05798a0e88cee46c0244aaec6a66b5e00be7d18648fed8c432e", size = 5349197, upload-time = "2026-04-12T16:24:26.805Z" }, - { url = "https://files.pythonhosted.org/packages/2d/43/e3e9a126e166234d1659d1dd9004dc1dd50cdc3c68575b071b0a1524b4de/lxml-6.0.4-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:ebd816653707fbf10c65e3dee3bc24dac6b691654c21533b1ae49287433f4db0", size = 4693123, upload-time = "2026-04-12T16:24:28.812Z" }, - { url = "https://files.pythonhosted.org/packages/6c/98/b146dd123a4a7b69b571ff23ea8e8c68de8d8c1b03e23d01c6374d4fd835/lxml-6.0.4-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:21284cf36b95dd8be774eb06c304b440cf49ee811800a30080ce6d93700f0383", size = 5242967, upload-time = "2026-04-12T16:24:30.811Z" }, - { url = "https://files.pythonhosted.org/packages/7e/60/8c275584452b55a902c883e8ab63d755c5ef35d7ad1f06f9e6559095521d/lxml-6.0.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0c08a2a9d0c4028ef5fc5a513b2e1e51af069a83c5b4206139edd08b3b8c2926", size = 5046810, upload-time = "2026-04-12T16:24:33.289Z" }, - { url = "https://files.pythonhosted.org/packages/19/aa/19ec216147e1105e5403fe73657c693a6e91bde855a13242dd6031e829e5/lxml-6.0.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1bc2f0f417112cf1a428599dd58125ab74d8e1c66893efd9b907cbb4a5db6e44", size = 4776383, upload-time = "2026-04-12T16:24:36.008Z" }, - { url = "https://files.pythonhosted.org/packages/41/c8/90afdb838705a736268fcffd2698c05e9a129144ce215d5e14db3bdfc295/lxml-6.0.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c0d86e328405529bc93913add9ff377e8b8ea9be878e611f19dbac7766a84483", size = 5643497, upload-time = "2026-04-12T16:24:38.276Z" }, - { url = "https://files.pythonhosted.org/packages/32/ec/1135261ec9822dafb90be0ff6fb0ec79cee0b7fe878833dfe5f2b8c393bd/lxml-6.0.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:3cce9420fe8f91eae5d457582599d282195c958cb670aa4bea313a79103ba33f", size = 5232185, upload-time = "2026-04-12T16:24:40.516Z" }, - { url = "https://files.pythonhosted.org/packages/13/f2/7380b11cae6943720f525e5a28ad9dbead96ac710417e556b7c03f3a8af3/lxml-6.0.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:96214985ec194ce97b9028414e179cfb21230cba4e2413aee7e249461bb84f4d", size = 5259968, upload-time = "2026-04-12T16:24:42.917Z" }, - { url = "https://files.pythonhosted.org/packages/65/8f/141734f2c456f2253fed4237d8d4b241e3d701129cf6f0b135ccf241a75a/lxml-6.0.4-cp312-cp312-win32.whl", hash = "sha256:b2209b310e7ed1d4cd1c00d405ec9c49722fce731c7036abc1d876bf8df78139", size = 3594958, upload-time = "2026-04-12T16:24:45.039Z" }, - { url = "https://files.pythonhosted.org/packages/b7/a9/c6d3531c6d8814af0919fbdb9bda43c9e8b5deffcb70c8534017db233512/lxml-6.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:03affcacfba4671ebc305813b02bfaf34d80b6a7c5b23eafc5d6da14a1a6e623", size = 3995897, upload-time = "2026-04-12T16:24:46.98Z" }, - { url = "https://files.pythonhosted.org/packages/03/5d/1dabeddf762e5a315a31775b2bca39811d7e7a15fc3e677d044b9da973fe/lxml-6.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:af9678e3a2a047465515d95a61690109af7a4c9486f708249119adcef7861049", size = 3658607, upload-time = "2026-04-12T16:24:49.19Z" }, - { url = "https://files.pythonhosted.org/packages/78/f6/550a1ed9afde66e24bfcf9892446ea9779152df336062c6df0f7733151a2/lxml-6.0.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecc3d55ed756ee6c3447748862a97e1f5392d2c5d7f474bace9382345e4fc274", size = 8559522, upload-time = "2026-04-12T16:24:51.563Z" }, - { url = "https://files.pythonhosted.org/packages/11/93/3f687c14d2b4d24b60fe13fd5482c8853f82a10bb87f2b577123e342ed1a/lxml-6.0.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7d5a627a368a0e861350ccc567a70ec675d2bc4d8b3b54f48995ae78d8d530e", size = 4617380, upload-time = "2026-04-12T16:24:54.042Z" }, - { url = "https://files.pythonhosted.org/packages/b5/ed/91e443366063d3fb7640ae2badd5d7b65be4095ac6d849788e39c043baae/lxml-6.0.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d385141b186cc39ebe4863c1e41936282c65df19b2d06a701dedc2a898877d6a", size = 4922791, upload-time = "2026-04-12T16:24:56.381Z" }, - { url = "https://files.pythonhosted.org/packages/30/4b/2243260b70974aca9ba0cc71bd668c0c3a79644d80ddcabbfbdb4b131848/lxml-6.0.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0132bb040e9bb5a199302e12bf942741defbc52922a2a06ce9ff7be0d0046483", size = 5080972, upload-time = "2026-04-12T16:24:58.823Z" }, - { url = "https://files.pythonhosted.org/packages/f8/c3/54c53c4f772341bc12331557f8b0882a426f53133926306cbe6d7f0ee7e4/lxml-6.0.4-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:26aee5321e4aa1f07c9090a35f6ab8b703903fb415c6c823cfdb20ee0d779855", size = 4992236, upload-time = "2026-04-12T16:25:01.099Z" }, - { url = "https://files.pythonhosted.org/packages/be/0f/416de42e22f287585abee610eb0d1c2638c9fe24cee7e15136e0b5e138f8/lxml-6.0.4-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b5652455de198ff76e02cfa57d5efc5f834fa45521aaf3fcc13d6b5a88bde23d", size = 5612398, upload-time = "2026-04-12T16:25:03.517Z" }, - { url = "https://files.pythonhosted.org/packages/7d/63/29a3fa79b8a182f5bd5b5bdcb6f625f49f08f41d60a26ca25482820a1b99/lxml-6.0.4-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:75842801fb48aea73f4c281b923a010dfb39bad75edf8ceb2198ec30c27f01cc", size = 5227480, upload-time = "2026-04-12T16:25:06.119Z" }, - { url = "https://files.pythonhosted.org/packages/7c/4a/44d1843de599b1c6dbe578e4248c2f15e7fac90c5c86eb26775eaeac0fe0/lxml-6.0.4-cp313-cp313-manylinux_2_28_i686.whl", hash = "sha256:94a1f74607a5a049ff6ff8de429fec922e643e32b5b08ec7a4fe49e8de76e17c", size = 5341001, upload-time = "2026-04-12T16:25:08.563Z" }, - { url = "https://files.pythonhosted.org/packages/0d/52/c8aebde49f169e4e3452e7756be35be1cb2903e30d961cb57aa65a27055f/lxml-6.0.4-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:173cc246d3d3b6d3b6491f0b3aaf22ebdf2eed616879482acad8bd84d73eb231", size = 4699105, upload-time = "2026-04-12T16:25:10.757Z" }, - { url = "https://files.pythonhosted.org/packages/78/60/76fc3735c31c28b70220d99452fb72052e84b618693ca2524da96f0131d8/lxml-6.0.4-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f0f2ee1be1b72e9890da87e4e422f2f703ff4638fd5ec5383055db431e8e30e9", size = 5231095, upload-time = "2026-04-12T16:25:13.305Z" }, - { url = "https://files.pythonhosted.org/packages/e5/60/448f01c52110102f23df5f07b3f4fde57c8e13e497e182a743d125324c0b/lxml-6.0.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c51a274b7e8b9ce394c3f8b471eb0b23c1914eec64fdccf674e082daf72abf11", size = 5042411, upload-time = "2026-04-12T16:25:15.541Z" }, - { url = "https://files.pythonhosted.org/packages/4a/2a/90612a001fa4fa0ff0443ebb0256a542670fe35473734c559720293e7aff/lxml-6.0.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:210ea934cba1a1ec42f88c4190c4d5c67b2d14321a8faed9b39e8378198ff99d", size = 4768431, upload-time = "2026-04-12T16:25:17.581Z" }, - { url = "https://files.pythonhosted.org/packages/84/d8/572845a7d741c8a8ffeaf928185263e14d97fbd355de164677340951d7a5/lxml-6.0.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:14fe654a59eebe16368c51778caeb0c8fda6f897adcd9afe828d87d13b5d5e51", size = 5634972, upload-time = "2026-04-12T16:25:20.111Z" }, - { url = "https://files.pythonhosted.org/packages/d7/1d/392b8c9f8cf1d502bbec50dee137c7af3dd5def5e5cd84572fbf0ba0541c/lxml-6.0.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:ec160a2b7e2b3cb71ec35010b19a1adea05785d19ba5c9c5f986b64b78fef564", size = 5222909, upload-time = "2026-04-12T16:25:22.243Z" }, - { url = "https://files.pythonhosted.org/packages/21/ab/949fc96f825cf083612aee65d5a02eacc5eaeb2815561220e33e1e160677/lxml-6.0.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d305b86ef10b23cf3a6d62a2ad23fa296f76495183ee623f64d2600f65ffe09c", size = 5249096, upload-time = "2026-04-12T16:25:24.781Z" }, - { url = "https://files.pythonhosted.org/packages/56/e8/fbe44df79ede5ff760401cc3c49c4204f49f0f529cc6b27d0af7b63f5472/lxml-6.0.4-cp313-cp313-win32.whl", hash = "sha256:a2f31380aa9a9b52591e79f1c1d3ac907688fbeb9d883ba28be70f2eb5db2277", size = 3595808, upload-time = "2026-04-12T16:25:26.747Z" }, - { url = "https://files.pythonhosted.org/packages/f8/df/e873abb881092256520edf0d67d686e36f3c86b3cf289f01b6458272dede/lxml-6.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:b8efa9f681f15043e497293d58a4a63199564b253ed2291887d92bb3f74f59ab", size = 3994635, upload-time = "2026-04-12T16:25:28.828Z" }, - { url = "https://files.pythonhosted.org/packages/23/a8/9c56c8914b9b18d89face5a7472445002baf309167f7af65d988842129fd/lxml-6.0.4-cp313-cp313-win_arm64.whl", hash = "sha256:905abe6a5888129be18f85f2aea51f0c9863fa0722fb8530dfbb687d2841d221", size = 3657374, upload-time = "2026-04-12T16:25:30.901Z" }, - { url = "https://files.pythonhosted.org/packages/10/18/36e28a809c509a67496202771f545219ac5a2f1cd61aae325991fcf5ab91/lxml-6.0.4-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:569d3b18340863f603582d2124e742a68e85755eff5e47c26a55e298521e3a01", size = 8575045, upload-time = "2026-04-12T16:25:33.57Z" }, - { url = "https://files.pythonhosted.org/packages/11/38/a168c820e3b08d3b4fa0f4e6b53b3930086b36cc11e428106d38c36778cd/lxml-6.0.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:3b6245ee5241342d45e1a54a4a8bc52ef322333ada74f24aa335c4ab36f20161", size = 4622963, upload-time = "2026-04-12T16:25:36.818Z" }, - { url = "https://files.pythonhosted.org/packages/53/e0/2c9d6abdd82358cea3c0d8d6ca272a6af0f38156abce7827efb6d5b62d17/lxml-6.0.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:79a1173ba3213a3693889a435417d4e9f3c07d96e30dc7cc3a712ed7361015fe", size = 4948832, upload-time = "2026-04-12T16:25:39.104Z" }, - { url = "https://files.pythonhosted.org/packages/96/d7/f2202852e91d7baf3a317f4523a9c14834145301e5b0f2e80c01c4bfbd49/lxml-6.0.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dc18bb975666b443ba23aedd2fcf57e9d0d97546b52a1de97a447c4061ba4110", size = 5085865, upload-time = "2026-04-12T16:25:41.226Z" }, - { url = "https://files.pythonhosted.org/packages/09/57/abee549324496e92708f71391c6060a164d3c95369656a1a15e9f20d8162/lxml-6.0.4-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2079f5dc83291ac190a52f8354b78648f221ecac19fb2972a2d056b555824de7", size = 5030001, upload-time = "2026-04-12T16:25:43.695Z" }, - { url = "https://files.pythonhosted.org/packages/c2/f8/432da7178c5917a16468af6c5da68fef7cf3357d4bd0e6f50272ec9a59b5/lxml-6.0.4-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3eda02da4ca16e9ca22bbe5654470c17fa1abcd967a52e4c2e50ff278221e351", size = 5646303, upload-time = "2026-04-12T16:25:46.577Z" }, - { url = "https://files.pythonhosted.org/packages/82/f9/e1c04ef667a6bf9c9dbd3bf04c50fa51d7ee25b258485bb748b27eb9a1c7/lxml-6.0.4-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c3787cdc3832b70e21ac2efafea2a82a8ccb5e85bec110dc68b26023e9d3caae", size = 5237940, upload-time = "2026-04-12T16:25:49.157Z" }, - { url = "https://files.pythonhosted.org/packages/d0/f0/cdea60d92df731725fc3c4f33e387b100f210acd45c92969e42d2ba993fa/lxml-6.0.4-cp314-cp314-manylinux_2_28_i686.whl", hash = "sha256:3f276d49c23103565d39440b9b3f4fc08fa22f5a96395ea4b4d4fea4458b1505", size = 5350050, upload-time = "2026-04-12T16:25:52.027Z" }, - { url = "https://files.pythonhosted.org/packages/2e/15/bf52c7a70b6081bb9e00d37cc90fcf60aa84468d9d173ad2fade38ec34c5/lxml-6.0.4-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:fdfdad73736402375b11b3a137e48cd09634177516baf5fc0bd80d1ca85f3cda", size = 4696409, upload-time = "2026-04-12T16:25:55.141Z" }, - { url = "https://files.pythonhosted.org/packages/c5/69/9bade267332cc06f9a9aa773b5a11bdfb249af485df9e142993009ea1fc4/lxml-6.0.4-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:75912421456946931daba0ec3cedfa824c756585d05bde97813a17992bfbd013", size = 5249072, upload-time = "2026-04-12T16:25:57.362Z" }, - { url = "https://files.pythonhosted.org/packages/14/ca/043bcacb096d6ed291cbbc58724e9625a453069d6edeb840b0bf18038d05/lxml-6.0.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:48cd5a88da67233fd82f2920db344503c2818255217cd6ea462c9bb8254ba7cb", size = 5083779, upload-time = "2026-04-12T16:26:00.018Z" }, - { url = "https://files.pythonhosted.org/packages/04/89/f5fb18d76985969e84af13682e489acabee399bb54738a363925ea6e7390/lxml-6.0.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:87af86a8fa55b9ff1e6ee4233d762296f2ce641ba948af783fb995c5a8a3371b", size = 4736953, upload-time = "2026-04-12T16:26:02.289Z" }, - { url = "https://files.pythonhosted.org/packages/84/ba/d1d7284bb4ba951f188c3fc0455943c1fcbd1c33d1324d6d57b7d4a45be6/lxml-6.0.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:a743714cd656ba7ccb29d199783906064c7b5ba3c0e2a79f0244ea0badc6a98c", size = 5669605, upload-time = "2026-04-12T16:26:04.694Z" }, - { url = "https://files.pythonhosted.org/packages/72/05/1463e55f2de27bb60feddc894dd7c0833bd501f8861392ed416291b38db5/lxml-6.0.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e31c76bd066fb4f81d9a32e5843bffdf939ab27afb1ffc1c924e749bfbdb00e3", size = 5236886, upload-time = "2026-04-12T16:26:07.659Z" }, - { url = "https://files.pythonhosted.org/packages/fe/fb/0b6ee9194ce3ac49db4cadaa8a9158f04779fc768b6c27c4e2945d71a99d/lxml-6.0.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f185fd6e7d550e9917d7103dccf51be589aba953e15994fb04646c1730019685", size = 5263382, upload-time = "2026-04-12T16:26:10.067Z" }, - { url = "https://files.pythonhosted.org/packages/9a/93/ec18a08e98dd82cac39f1d2511ee2bed5affb94d228356d8ef165a4ec3b9/lxml-6.0.4-cp314-cp314-win32.whl", hash = "sha256:774660028f8722a598400430d2746fb0075949f84a9a5cd9767d9152e3baaac5", size = 3656164, upload-time = "2026-04-12T16:26:59.568Z" }, - { url = "https://files.pythonhosted.org/packages/15/86/52507316abfc7150bf6bb191e39a12e301ee80334610a493884ae2f9d20d/lxml-6.0.4-cp314-cp314-win_amd64.whl", hash = "sha256:fbd7d14349413f5609c0b537b1a48117d6ccef1af37986af6b03766ad05bf43e", size = 4062512, upload-time = "2026-04-12T16:27:02.212Z" }, - { url = "https://files.pythonhosted.org/packages/f1/d5/09c593a2ef2234b8cd6cf059e2dc212e0654bf05c503f0ef2daf05adb680/lxml-6.0.4-cp314-cp314-win_arm64.whl", hash = "sha256:a61a01ec3fbfd5b73a69a7bf513271051fd6c5795d82fc5daa0255934cd8db3d", size = 3740745, upload-time = "2026-04-12T16:27:04.444Z" }, - { url = "https://files.pythonhosted.org/packages/4a/3c/42a98bf6693938bf7b285ec7f70ba2ae9d785d0e5b2cdb85d2ee29e287eb/lxml-6.0.4-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:504edb62df33cea502ea6e73847c647ba228623ca3f80a228be5723a70984dd5", size = 8826437, upload-time = "2026-04-12T16:26:12.911Z" }, - { url = "https://files.pythonhosted.org/packages/c2/c2/ad13f39b2db8709788aa2dcb6e90b81da76db3b5b2e7d35e0946cf984960/lxml-6.0.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:f01b7b0316d4c0926d49a7f003b2d30539f392b140a3374bb788bad180bc8478", size = 4734892, upload-time = "2026-04-12T16:26:15.871Z" }, - { url = "https://files.pythonhosted.org/packages/2c/6d/c559d7b5922c5b0380fc2cb5ac134b6a3f9d79d368347a624ee5d68b0816/lxml-6.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab999933e662501efe4b16e6cfb7c9f9deca7d072cd1788b99c8defde78c0dfb", size = 4969173, upload-time = "2026-04-12T16:26:18.335Z" }, - { url = "https://files.pythonhosted.org/packages/c7/78/ca521e36157f38e3e1a29276855cdf48d213138fc0c8365693ff5c876ca7/lxml-6.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67c3f084389fe75932c39b6869a377f6c8e21e818f31ae8a30c71dd2e59360e2", size = 5103134, upload-time = "2026-04-12T16:26:20.612Z" }, - { url = "https://files.pythonhosted.org/packages/28/a7/7d62d023bacaa0aaf60af8c0a77c6c05f84327396d755f3aa64b788678a9/lxml-6.0.4-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:377ea1d654f76ed6205c87d14920f829c9f4d31df83374d3cbcbdaae804d37b2", size = 5027205, upload-time = "2026-04-12T16:26:22.981Z" }, - { url = "https://files.pythonhosted.org/packages/34/be/51b194b81684f2e85e5d992771c45d70cb22ac6f7291ac6bc7b255830afe/lxml-6.0.4-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e60cd0bcacbfd1a96d63516b622183fb2e3f202300df9eb5533391a8a939dbfa", size = 5594461, upload-time = "2026-04-12T16:26:25.316Z" }, - { url = "https://files.pythonhosted.org/packages/39/24/8850f38fbf89dd072ff31ba22f9e40347aeada7cadf710ecb04b8d9f32d4/lxml-6.0.4-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e9e30fd63d41dd0bbdb020af5cdfffd5d9b554d907cb210f18e8fcdc8eac013", size = 5223378, upload-time = "2026-04-12T16:26:28.68Z" }, - { url = "https://files.pythonhosted.org/packages/2a/9b/595239ba8c719b0fdc7bc9ebdb7564459c9a6b24b8b363df4a02674aeece/lxml-6.0.4-cp314-cp314t-manylinux_2_28_i686.whl", hash = "sha256:1fb4a1606bb68c533002e7ed50d7e55e58f0ef1696330670281cb79d5ab2050d", size = 5311415, upload-time = "2026-04-12T16:26:31.513Z" }, - { url = "https://files.pythonhosted.org/packages/be/cb/aa27ac8d041acf34691577838494ad08df78e83fdfdb66948d2903e9291e/lxml-6.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:695c7708438e449d57f404db8cc1b769e77ad5b50655f32f8175686ba752f293", size = 4637953, upload-time = "2026-04-12T16:26:33.806Z" }, - { url = "https://files.pythonhosted.org/packages/f6/f2/f19114fd86825c2d1ce41cd99daad218d30cfdd2093d4de9273986fb4d68/lxml-6.0.4-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d49c35ae1e35ee9b569892cf8f8f88db9524f28d66e9daee547a5ef9f3c5f468", size = 5231532, upload-time = "2026-04-12T16:26:36.518Z" }, - { url = "https://files.pythonhosted.org/packages/9a/0e/c3fa354039ec0b6b09f40fbe1129efc572ac6239faa4906de42d5ce87c0a/lxml-6.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5801072f8967625e6249d162065d0d6011ef8ce3d0efb8754496b5246b81a74b", size = 5083767, upload-time = "2026-04-12T16:26:39.332Z" }, - { url = "https://files.pythonhosted.org/packages/b3/4b/1a0dbb6d6ffae16e54a8a3796ded0ad2f9c3bc1ff3728bde33456f4e1d63/lxml-6.0.4-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:cbf768541526eba5ef1a49f991122e41b39781eafd0445a5a110fc09947a20b5", size = 4758079, upload-time = "2026-04-12T16:26:42.138Z" }, - { url = "https://files.pythonhosted.org/packages/a9/01/a246cf5f80f96766051de4b305d6552f80bdaefb37f04e019e42af0aba69/lxml-6.0.4-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:eecce87cc09233786fc31c230268183bf6375126cfec1c8b3673fcdc8767b560", size = 5618686, upload-time = "2026-04-12T16:26:44.507Z" }, - { url = "https://files.pythonhosted.org/packages/eb/1f/b072a92369039ebef11b0a654be5134fcf3ed04c0f437faf9435ac9ba845/lxml-6.0.4-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:07dce892881179e11053066faca2da17b0eeb0bb7298f11bcf842a86db207dbd", size = 5227259, upload-time = "2026-04-12T16:26:47.083Z" }, - { url = "https://files.pythonhosted.org/packages/d5/a0/dc97034f9d4c0c4d30875147d81fd2c0c7f3d261b109db36ed746bf8ab1d/lxml-6.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e4f97aee337b947e6699e5574c90d087d3e2ce517016241c07e7e98a28dca885", size = 5246190, upload-time = "2026-04-12T16:26:49.468Z" }, - { url = "https://files.pythonhosted.org/packages/f2/ef/85cb69835113583c2516fee07d0ffb4d824b557424b06ba5872c20ba6078/lxml-6.0.4-cp314-cp314t-win32.whl", hash = "sha256:064477c0d4c695aa1ea4b9c1c4ee9043ab740d12135b74c458cc658350adcd86", size = 3896005, upload-time = "2026-04-12T16:26:52.163Z" }, - { url = "https://files.pythonhosted.org/packages/3d/5e/2231f34cc54b8422b793593138d86d3fa4588fb2297d4ea0472390f25627/lxml-6.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:25bad2d8438f4ef5a7ad4a8d8bcaadde20c0daced8bdb56d46236b0a7d1cbdd0", size = 4391037, upload-time = "2026-04-12T16:26:54.398Z" }, - { url = "https://files.pythonhosted.org/packages/39/53/8ba3cd5984f8363635450c93f63e541a0721b362bb32ae0d8237d9674aee/lxml-6.0.4-cp314-cp314t-win_arm64.whl", hash = "sha256:1dcd9e6cb9b7df808ea33daebd1801f37a8f50e8c075013ed2a2343246727838", size = 3816184, upload-time = "2026-04-12T16:26:57.011Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d4/9326838b59dc36dfae42eec9656b97520f9997eee1de47b8316aaeed169c/lxml-6.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d2f17a16cd8751e8eb233a7e41aecdf8e511712e00088bf9be455f604cd0d28d", size = 8570663, upload-time = "2026-04-18T04:27:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/d8/a4/053745ce1f8303ccbb788b86c0db3a91b973675cefc42566a188637b7c40/lxml-6.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f0cea5b1d3e6e77d71bd2b9972eb2446221a69dc52bb0b9c3c6f6e5700592d93", size = 4624024, upload-time = "2026-04-18T04:27:52.594Z" }, + { url = "https://files.pythonhosted.org/packages/90/97/a517944b20f8fd0932ad2109482bee4e29fe721416387a363306667941f6/lxml-6.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc46da94826188ed45cb53bd8e3fc076ae22675aea2087843d4735627f867c6d", size = 4930895, upload-time = "2026-04-18T04:32:56.29Z" }, + { url = "https://files.pythonhosted.org/packages/94/7c/e08a970727d556caa040a44773c7b7e3ad0f0d73dedc863543e9a8b931f2/lxml-6.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9147d8e386ec3b82c3b15d88927f734f565b0aaadef7def562b853adca45784a", size = 5093820, upload-time = "2026-04-18T04:32:58.94Z" }, + { url = "https://files.pythonhosted.org/packages/88/ee/2a5c2aa2c32016a226ca25d3e1056a8102ea6e1fe308bf50213586635400/lxml-6.1.0-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5715e0e28736a070f3f34a7ccc09e2fdcba0e3060abbcf61a1a5718ff6d6b105", size = 5005790, upload-time = "2026-04-18T04:33:01.272Z" }, + { url = "https://files.pythonhosted.org/packages/e3/38/a0db9be8f38ad6043ab9429487c128dd1d30f07956ef43040402f8da49e8/lxml-6.1.0-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4937460dc5df0cdd2f06a86c285c28afda06aefa3af949f9477d3e8df430c485", size = 5630827, upload-time = "2026-04-18T04:33:04.036Z" }, + { url = "https://files.pythonhosted.org/packages/31/ba/3c13d3fc24b7cacf675f808a3a1baabf43a30d0cd24c98f94548e9aa58eb/lxml-6.1.0-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bc783ee3147e60a25aa0445ea82b3e8aabb83b240f2b95d32cb75587ff781814", size = 5240445, upload-time = "2026-04-18T04:33:06.87Z" }, + { url = "https://files.pythonhosted.org/packages/55/ba/eeef4ccba09b2212fe239f46c1692a98db1878e0872ae320756488878a94/lxml-6.1.0-cp312-cp312-manylinux_2_28_i686.whl", hash = "sha256:40d9189f80075f2e1f88db21ef815a2b17b28adf8e50aaf5c789bfe737027f32", size = 5350121, upload-time = "2026-04-18T04:33:09.365Z" }, + { url = "https://files.pythonhosted.org/packages/7e/01/1da87c7b587c38d0cbe77a01aae3b9c1c49ed47d76918ef3db8fc151b1ca/lxml-6.1.0-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:05b9b8787e35bec69e68daf4952b2e6dfcfb0db7ecf1a06f8cdfbbac4eb71aad", size = 4694949, upload-time = "2026-04-18T04:33:11.628Z" }, + { url = "https://files.pythonhosted.org/packages/a1/88/7db0fe66d5aaf128443ee1623dec3db1576f3e4c17751ec0ef5866468590/lxml-6.1.0-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0f0f08beb0182e3e9a86fae124b3c47a7b41b7b69b225e1377db983802404e54", size = 5243901, upload-time = "2026-04-18T04:33:13.95Z" }, + { url = "https://files.pythonhosted.org/packages/00/a8/1346726af7d1f6fca1f11223ba34001462b0a3660416986d37641708d57c/lxml-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73becf6d8c81d4c76b1014dbd3584cb26d904492dcf73ca85dc8bff08dcd6d2d", size = 5048054, upload-time = "2026-04-18T04:33:16.965Z" }, + { url = "https://files.pythonhosted.org/packages/2e/b7/85057012f035d1a0c87e02f8c723ca3c3e6e0728bcf4cb62080b21b1c1e3/lxml-6.1.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1ae225f66e5938f4fa29d37e009a3bb3b13032ac57eb4eb42afa44f6e4054e69", size = 4777324, upload-time = "2026-04-18T04:33:19.832Z" }, + { url = "https://files.pythonhosted.org/packages/75/6c/ad2f94a91073ef570f33718040e8e160d5fb93331cf1ab3ca1323f939e2d/lxml-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:690022c7fae793b0489aa68a658822cea83e0d5933781811cabbf5ea3bcfe73d", size = 5645702, upload-time = "2026-04-18T04:33:22.436Z" }, + { url = "https://files.pythonhosted.org/packages/3b/89/0bb6c0bd549c19004c60eea9dc554dd78fd647b72314ef25d460e0d208c6/lxml-6.1.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:63aeafc26aac0be8aff14af7871249e87ea1319be92090bfd632ec68e03b16a5", size = 5232901, upload-time = "2026-04-18T04:33:26.21Z" }, + { url = "https://files.pythonhosted.org/packages/a1/d9/d609a11fb567da9399f525193e2b49847b5a409cdebe737f06a8b7126bdc/lxml-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:264c605ab9c0e4aa1a679636f4582c4d3313700009fac3ec9c3412ed0d8f3e1d", size = 5261333, upload-time = "2026-04-18T04:33:28.984Z" }, + { url = "https://files.pythonhosted.org/packages/a6/3a/ac3f99ec8ac93089e7dd556f279e0d14c24de0a74a507e143a2e4b496e7c/lxml-6.1.0-cp312-cp312-win32.whl", hash = "sha256:56971379bc5ee8037c5a0f09fa88f66cdb7d37c3e38af3e45cf539f41131ac1f", size = 3596289, upload-time = "2026-04-18T04:27:42.819Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a7/0a915557538593cb1bbeedcd40e13c7a261822c26fecbbdb71dad0c2f540/lxml-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:bba078de0031c219e5dd06cf3e6bf8fb8e6e64a77819b358f53bb132e3e03366", size = 3997059, upload-time = "2026-04-18T04:27:46.764Z" }, + { url = "https://files.pythonhosted.org/packages/92/96/a5dc078cf0126fbfbc35611d77ecd5da80054b5893e28fb213a5613b9e1d/lxml-6.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:c3592631e652afa34999a088f98ba7dfc7d6aff0d535c410bea77a71743f3819", size = 3659552, upload-time = "2026-04-18T04:27:51.133Z" }, + { url = "https://files.pythonhosted.org/packages/08/03/69347590f1cf4a6d5a4944bb6099e6d37f334784f16062234e1f892fdb1d/lxml-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a0092f2b107b69601adf562a57c956fbb596e05e3e6651cabd3054113b007e45", size = 8559689, upload-time = "2026-04-18T04:31:57.785Z" }, + { url = "https://files.pythonhosted.org/packages/3f/58/25e00bb40b185c974cfe156c110474d9a8a8390d5f7c92a4e328189bb60e/lxml-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fc7140d7a7386e6b545d41b7358f4d02b656d4053f5fa6859f92f4b9c2572c4d", size = 4617892, upload-time = "2026-04-18T04:32:01.78Z" }, + { url = "https://files.pythonhosted.org/packages/f5/54/92ad98a94ac318dc4f97aaac22ff8d1b94212b2ae8af5b6e9b354bf825f7/lxml-6.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:419c58fc92cc3a2c3fa5f78c63dbf5da70c1fa9c1b25f25727ecee89a96c7de2", size = 4923489, upload-time = "2026-04-18T04:33:31.401Z" }, + { url = "https://files.pythonhosted.org/packages/15/3b/a20aecfab42bdf4f9b390590d345857ad3ffd7c51988d1c89c53a0c73faf/lxml-6.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:37fabd1452852636cf38ecdcc9dd5ca4bba7a35d6c53fa09725deeb894a87491", size = 5082162, upload-time = "2026-04-18T04:33:34.262Z" }, + { url = "https://files.pythonhosted.org/packages/45/26/2cdb3d281ac1bd175603e290cbe4bad6eff127c0f8de90bafd6f8548f0fd/lxml-6.1.0-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2853c8b2170cc6cd54a6b4d50d2c1a8a7aeca201f23804b4898525c7a152cfc", size = 4993247, upload-time = "2026-04-18T04:33:36.674Z" }, + { url = "https://files.pythonhosted.org/packages/f6/05/d735aef963740022a08185c84821f689fc903acb3d50326e6b1e9886cc22/lxml-6.1.0-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8e369cbd690e788c8d15e56222d91a09c6a417f49cbc543040cba0fe2e25a79e", size = 5613042, upload-time = "2026-04-18T04:33:39.205Z" }, + { url = "https://files.pythonhosted.org/packages/ee/b8/ead7c10efff731738c72e59ed6eb5791854879fbed7ae98781a12006263a/lxml-6.1.0-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e69aa6805905807186eb00e66c6d97a935c928275182eb02ee40ba00da9623b2", size = 5228304, upload-time = "2026-04-18T04:33:41.647Z" }, + { url = "https://files.pythonhosted.org/packages/6b/10/e9842d2ec322ea65f0a7270aa0315a53abed06058b88ef1b027f620e7a5f/lxml-6.1.0-cp313-cp313-manylinux_2_28_i686.whl", hash = "sha256:4bd1bdb8a9e0e2dd229de19b5f8aebac80e916921b4b2c6ef8a52bc131d0c1f9", size = 5341578, upload-time = "2026-04-18T04:33:44.596Z" }, + { url = "https://files.pythonhosted.org/packages/89/54/40d9403d7c2775fa7301d3ddd3464689bfe9ba71acc17dfff777071b4fdc/lxml-6.1.0-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:cbd7b79cdcb4986ad78a2662625882747f09db5e4cd7b2ae178a88c9c51b3dfe", size = 4700209, upload-time = "2026-04-18T04:33:47.552Z" }, + { url = "https://files.pythonhosted.org/packages/85/b2/bbdcc2cf45dfc7dfffef4fd97e5c47b15919b6a365247d95d6f684ef5e82/lxml-6.1.0-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:43e4d297f11080ec9d64a4b1ad7ac02b4484c9f0e2179d9c4ef78e886e747b88", size = 5232365, upload-time = "2026-04-18T04:33:50.249Z" }, + { url = "https://files.pythonhosted.org/packages/48/5a/b06875665e53aaba7127611a7bed3b7b9658e20b22bc2dd217a0b7ab0091/lxml-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cc16682cc987a3da00aa56a3aa3075b08edb10d9b1e476938cfdbee8f3b67181", size = 5043654, upload-time = "2026-04-18T04:33:52.71Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9c/e71a069d09641c1a7abeb30e693f828c7c90a41cbe3d650b2d734d876f85/lxml-6.1.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d6d8efe71429635f0559579092bb5e60560d7b9115ee38c4adbea35632e7fa24", size = 4769326, upload-time = "2026-04-18T04:33:55.244Z" }, + { url = "https://files.pythonhosted.org/packages/cc/06/7a9cd84b3d4ed79adf35f874750abb697dec0b4a81a836037b36e47c091a/lxml-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e39ab3a28af7784e206d8606ec0e4bcad0190f63a492bca95e94e5a4aef7f6e", size = 5635879, upload-time = "2026-04-18T04:33:58.509Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f0/9d57916befc1e54c451712c7ee48e9e74e80ae4d03bdce49914e0aee42cd/lxml-6.1.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:9eb667bf50856c4a58145f8ca2d5e5be160191e79eb9e30855a476191b3c3495", size = 5224048, upload-time = "2026-04-18T04:34:00.943Z" }, + { url = "https://files.pythonhosted.org/packages/99/75/90c4eefda0c08c92221fe0753db2d6699a4c628f76ff4465ec20dea84cc1/lxml-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7f4a77d6f7edf9230cee3e1f7f6764722a41604ee5681844f18db9a81ea0ec33", size = 5250241, upload-time = "2026-04-18T04:34:03.365Z" }, + { url = "https://files.pythonhosted.org/packages/5e/73/16596f7e4e38fa33084b9ccbccc22a15f82a290a055126f2c1541236d2ff/lxml-6.1.0-cp313-cp313-win32.whl", hash = "sha256:28902146ffbe5222df411c5d19e5352490122e14447e98cd118907ee3fd6ee62", size = 3596938, upload-time = "2026-04-18T04:31:56.206Z" }, + { url = "https://files.pythonhosted.org/packages/8e/63/981401c5680c1eb30893f00a19641ac80db5d1e7086c62cb4b13ed813038/lxml-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:4a1503c56e4e2b38dc76f2f2da7bae69670c0f1933e27cfa34b2fa5876410b16", size = 3995728, upload-time = "2026-04-18T04:31:58.763Z" }, + { url = "https://files.pythonhosted.org/packages/e7/e8/c358a38ac3e541d16a1b527e4e9cb78c0419b0506a070ace11777e5e8404/lxml-6.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:e0af85773850417d994d019741239b901b22c6680206f46a34766926e466141d", size = 3658372, upload-time = "2026-04-18T04:32:03.629Z" }, + { url = "https://files.pythonhosted.org/packages/eb/45/cee4cf203ef0bab5c52afc118da61d6b460c928f2893d40023cfa27e0b80/lxml-6.1.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:ab863fd37458fed6456525f297d21239d987800c46e67da5ef04fc6b3dd93ac8", size = 8576713, upload-time = "2026-04-18T04:32:06.831Z" }, + { url = "https://files.pythonhosted.org/packages/8a/a7/eda05babeb7e046839204eaf254cd4d7c9130ce2bbf0d9e90ea41af5654d/lxml-6.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:6fd8b1df8254ff4fd93fd31da1fc15770bde23ac045be9bb1f87425702f61cc9", size = 4623874, upload-time = "2026-04-18T04:32:10.755Z" }, + { url = "https://files.pythonhosted.org/packages/e7/e9/db5846de9b436b91890a62f29d80cd849ea17948a49bf532d5278ee69a9e/lxml-6.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:47024feaae386a92a146af0d2aeed65229bf6fff738e6a11dda6b0015fb8fd03", size = 4949535, upload-time = "2026-04-18T04:34:06.657Z" }, + { url = "https://files.pythonhosted.org/packages/5a/ba/0d3593373dcae1d68f40dc3c41a5a92f2544e68115eb2f62319a4c2a6500/lxml-6.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3f00972f84450204cd5d93a5395965e348956aaceaadec693a22ec743f8ae3eb", size = 5086881, upload-time = "2026-04-18T04:34:09.556Z" }, + { url = "https://files.pythonhosted.org/packages/43/76/759a7484539ad1af0d125a9afe9c3fb5f82a8779fd1f5f56319d9e4ea2fd/lxml-6.1.0-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97faa0860e13b05b15a51fb4986421ef7a30f0b3334061c416e0981e9450ca4c", size = 5031305, upload-time = "2026-04-18T04:34:12.336Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b9/c1f0daf981a11e47636126901fd4ab82429e18c57aeb0fc3ad2940b42d8b/lxml-6.1.0-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:972a6451204798675407beaad97b868d0c733d9a74dafefc63120b81b8c2de28", size = 5647522, upload-time = "2026-04-18T04:34:14.89Z" }, + { url = "https://files.pythonhosted.org/packages/31/e6/1f533dcd205275363d9ba3511bcec52fa2df86abf8abe6a5f2c599f0dc31/lxml-6.1.0-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fe022f20bc4569ec66b63b3fb275a3d628d9d32da6326b2982584104db6d3086", size = 5239310, upload-time = "2026-04-18T04:34:17.652Z" }, + { url = "https://files.pythonhosted.org/packages/c3/8c/4175fb709c78a6e315ed814ed33be3defd8b8721067e70419a6cf6f971da/lxml-6.1.0-cp314-cp314-manylinux_2_28_i686.whl", hash = "sha256:75c4c7c619a744f972f4451bf5adf6d0fb00992a1ffc9fd78e13b0bc817cc99f", size = 5350799, upload-time = "2026-04-18T04:34:20.529Z" }, + { url = "https://files.pythonhosted.org/packages/fd/77/6ffdebc5994975f0dde4acb59761902bd9d9bb84422b9a0bd239a7da9ca8/lxml-6.1.0-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:3648f20d25102a22b6061c688beb3a805099ea4beb0a01ce62975d926944d292", size = 4697693, upload-time = "2026-04-18T04:34:23.541Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f1/565f36bd5c73294602d48e04d23f81ff4c8736be6ba5e1d1ec670ac9be80/lxml-6.1.0-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:77b9f99b17cbf14026d1e618035077060fc7195dd940d025149f3e2e830fbfcb", size = 5250708, upload-time = "2026-04-18T04:34:26.001Z" }, + { url = "https://files.pythonhosted.org/packages/5a/11/a68ab9dd18c5c499404deb4005f4bc4e0e88e5b72cd755ad96efec81d18d/lxml-6.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:32662519149fd7a9db354175aa5e417d83485a8039b8aaa62f873ceee7ea4cad", size = 5084737, upload-time = "2026-04-18T04:34:28.32Z" }, + { url = "https://files.pythonhosted.org/packages/ab/78/e8f41e2c74f4af564e6a0348aea69fb6daaefa64bc071ef469823d22cc18/lxml-6.1.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:73d658216fc173cf2c939e90e07b941c5e12736b0bf6a99e7af95459cfe8eabb", size = 4737817, upload-time = "2026-04-18T04:34:30.784Z" }, + { url = "https://files.pythonhosted.org/packages/06/2d/aa4e117aa2ce2f3b35d9ff246be74a2f8e853baba5d2a92c64744474603a/lxml-6.1.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ac4db068889f8772a4a698c5980ec302771bb545e10c4b095d4c8be26749616f", size = 5670753, upload-time = "2026-04-18T04:34:33.675Z" }, + { url = "https://files.pythonhosted.org/packages/08/f5/dd745d50c0409031dbfcc4881740542a01e54d6f0110bd420fa7782110b8/lxml-6.1.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:45e9dfbd1b661eb64ba0d4dbe762bd210c42d86dd1e5bd2bdf89d634231beb43", size = 5238071, upload-time = "2026-04-18T04:34:36.12Z" }, + { url = "https://files.pythonhosted.org/packages/3e/74/ad424f36d0340a904665867dab310a3f1f4c96ff4039698de83b77f44c1f/lxml-6.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:89e8d73d09ac696a5ba42ec69787913d53284f12092f651506779314f10ba585", size = 5264319, upload-time = "2026-04-18T04:34:39.035Z" }, + { url = "https://files.pythonhosted.org/packages/53/36/a15d8b3514ec889bfd6aa3609107fcb6c9189f8dc347f1c0b81eded8d87c/lxml-6.1.0-cp314-cp314-win32.whl", hash = "sha256:ebe33f4ec1b2de38ceb225a1749a2965855bffeef435ba93cd2d5d540783bf2f", size = 3657139, upload-time = "2026-04-18T04:32:20.006Z" }, + { url = "https://files.pythonhosted.org/packages/1a/a4/263ebb0710851a3c6c937180a9a86df1206fdfe53cc43005aa2237fd7736/lxml-6.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:398443df51c538bd578529aa7e5f7afc6c292644174b47961f3bf87fe5741120", size = 4064195, upload-time = "2026-04-18T04:32:23.876Z" }, + { url = "https://files.pythonhosted.org/packages/80/68/2000f29d323b6c286de077ad20b429fc52272e44eae6d295467043e56012/lxml-6.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:8c8984e1d8c4b3949e419158fda14d921ff703a9ed8a47236c6eb7a2b6cb4946", size = 3741870, upload-time = "2026-04-18T04:32:27.922Z" }, + { url = "https://files.pythonhosted.org/packages/30/e9/21383c7c8d43799f0da90224c0d7c921870d476ec9b3e01e1b2c0b8237c5/lxml-6.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:1081dd10bc6fa437db2500e13993abf7cc30716d0a2f40e65abb935f02ec559c", size = 8827548, upload-time = "2026-04-18T04:32:15.094Z" }, + { url = "https://files.pythonhosted.org/packages/a5/01/c6bc11cd587030dd4f719f65c5657960649fe3e19196c844c75bf32cd0d6/lxml-6.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:dabecc48db5f42ba348d1f5d5afdc54c6c4cc758e676926c7cd327045749517d", size = 4735866, upload-time = "2026-04-18T04:32:18.924Z" }, + { url = "https://files.pythonhosted.org/packages/f3/01/757132fff5f4acf25463b5298f1a46099f3a94480b806547b29ce5e385de/lxml-6.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e3dd5fe19c9e0ac818a9c7f132a5e43c1339ec1cbbfecb1a938bd3a47875b7c9", size = 4969476, upload-time = "2026-04-18T04:34:41.889Z" }, + { url = "https://files.pythonhosted.org/packages/fd/fb/1bc8b9d27ed64be7c8903db6c89e74dc8c2cd9ec630a7462e4654316dc5b/lxml-6.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9e7b0a4ca6dcc007a4cef00a761bba2dea959de4bd2df98f926b33c92ca5dfb9", size = 5103719, upload-time = "2026-04-18T04:34:44.797Z" }, + { url = "https://files.pythonhosted.org/packages/d5/e7/5bf82fa28133536a54601aae633b14988e89ed61d4c1eb6b899b023233aa/lxml-6.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d27bbe326c6b539c64b42638b18bc6003a8d88f76213a97ac9ed4f885efeab7", size = 5027890, upload-time = "2026-04-18T04:34:47.634Z" }, + { url = "https://files.pythonhosted.org/packages/2d/20/e048db5d4b4ea0366648aa595f26bb764b2670903fc585b87436d0a5032c/lxml-6.1.0-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4e425db0c5445ef0ad56b0eec54f89b88b2d884656e536a90b2f52aecb4ca86", size = 5596008, upload-time = "2026-04-18T04:34:51.503Z" }, + { url = "https://files.pythonhosted.org/packages/9a/c2/d10807bc8da4824b39e5bd01b5d05c077b6fd01bd91584167edf6b269d22/lxml-6.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4b89b098105b8599dc57adac95d1813409ac476d3c948a498775d3d0c6124bfb", size = 5224451, upload-time = "2026-04-18T04:34:54.263Z" }, + { url = "https://files.pythonhosted.org/packages/3c/15/2ebea45bea427e7f0057e9ce7b2d62c5aba20c6b001cca89ed0aadb3ad41/lxml-6.1.0-cp314-cp314t-manylinux_2_28_i686.whl", hash = "sha256:c4a699432846df86cc3de502ee85f445ebad748a1c6021d445f3e514d2cd4b1c", size = 5312135, upload-time = "2026-04-18T04:34:56.818Z" }, + { url = "https://files.pythonhosted.org/packages/31/e2/87eeae151b0be2a308d49a7ec444ff3eb192b14251e62addb29d0bf3778f/lxml-6.1.0-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:30e7b2ed63b6c8e97cca8af048589a788ab5c9c905f36d9cf1c2bb549f450d2f", size = 4639126, upload-time = "2026-04-18T04:34:59.704Z" }, + { url = "https://files.pythonhosted.org/packages/a3/51/8a3f6a20902ad604dd746ec7b4000311b240d389dac5e9d95adefd349e0c/lxml-6.1.0-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:022981127642fe19866d2907d76241bb07ed21749601f727d5d5dd1ce5d1b773", size = 5232579, upload-time = "2026-04-18T04:35:02.658Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d2/650d619bdbe048d2c3f2c31edb00e35670a5e2d65b4fe3b61bce37b19121/lxml-6.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:23cad0cc86046d4222f7f418910e46b89971c5a45d3c8abfad0f64b7b05e4a9b", size = 5084206, upload-time = "2026-04-18T04:35:05.175Z" }, + { url = "https://files.pythonhosted.org/packages/dd/8a/672ca1a3cbeabd1f511ca275a916c0514b747f4b85bdaae103b8fa92f307/lxml-6.1.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:21c3302068f50d1e8728c67c87ba92aa87043abee517aa2576cca1855326b405", size = 4758906, upload-time = "2026-04-18T04:35:08.098Z" }, + { url = "https://files.pythonhosted.org/packages/be/f1/ef4b691da85c916cb2feb1eec7414f678162798ac85e042fa164419ac05c/lxml-6.1.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:be10838781cb3be19251e276910cd508fe127e27c3242e50521521a0f3781690", size = 5620553, upload-time = "2026-04-18T04:35:11.23Z" }, + { url = "https://files.pythonhosted.org/packages/59/17/94e81def74107809755ac2782fdad4404420f1c92ca83433d117a6d5acf0/lxml-6.1.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:2173a7bffe97667bbf0767f8a99e587740a8c56fdf3befac4b09cb29a80276fd", size = 5229458, upload-time = "2026-04-18T04:35:14.254Z" }, + { url = "https://files.pythonhosted.org/packages/21/55/c4be91b0f830a871fc1b0d730943d56013b683d4671d5198260e2eae722b/lxml-6.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c6854e9cf99c84beb004eecd7d3a3868ef1109bf2b1df92d7bc11e96a36c2180", size = 5247861, upload-time = "2026-04-18T04:35:17.006Z" }, + { url = "https://files.pythonhosted.org/packages/c2/ca/77123e4d77df3cb1e968ade7b1f808f5d3a5c1c96b18a33895397de292c1/lxml-6.1.0-cp314-cp314t-win32.whl", hash = "sha256:00750d63ef0031a05331b9223463b1c7c02b9004cef2346a5b2877f0f9494dd2", size = 3897377, upload-time = "2026-04-18T04:32:07.656Z" }, + { url = "https://files.pythonhosted.org/packages/64/ce/3554833989d074267c063209bae8b09815e5656456a2d332b947806b05ff/lxml-6.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:80410c3a7e3c617af04de17caa9f9f20adaa817093293d69eae7d7d0522836f5", size = 4392701, upload-time = "2026-04-18T04:32:12.113Z" }, + { url = "https://files.pythonhosted.org/packages/2b/a0/9b916c68c0e57752c07f8f64b30138d9d4059dbeb27b90274dedbea128ff/lxml-6.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:26dd9f57ee3bd41e7d35b4c98a2ffd89ed11591649f421f0ec19f67d50ec67ac", size = 3817120, upload-time = "2026-04-18T04:32:15.803Z" }, ] [[package]] @@ -3412,7 +3356,7 @@ wheels = [ [[package]] name = "matplotlib" -version = "3.10.8" +version = "3.10.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "contourpy" }, @@ -3425,43 +3369,43 @@ dependencies = [ { name = "pyparsing" }, { name = "python-dateutil" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8a/76/d3c6e3a13fe484ebe7718d14e269c9569c4eb0020a968a327acb3b9a8fe6/matplotlib-3.10.8.tar.gz", hash = "sha256:2299372c19d56bcd35cf05a2738308758d32b9eaed2371898d8f5bd33f084aa3", size = 34806269, upload-time = "2025-12-10T22:56:51.155Z" } +sdist = { url = "https://files.pythonhosted.org/packages/63/1b/4be5be87d43d327a0cf4de1a56e86f7f84c89312452406cf122efe2839e6/matplotlib-3.10.9.tar.gz", hash = "sha256:fd66508e8c6877d98e586654b608a0456db8d7e8a546eb1e2600efd957302358", size = 34811233, upload-time = "2026-04-24T00:14:13.539Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/67/f997cdcbb514012eb0d10cd2b4b332667997fb5ebe26b8d41d04962fa0e6/matplotlib-3.10.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:64fcc24778ca0404ce0cb7b6b77ae1f4c7231cdd60e6778f999ee05cbd581b9a", size = 8260453, upload-time = "2025-12-10T22:55:30.709Z" }, - { url = "https://files.pythonhosted.org/packages/7e/65/07d5f5c7f7c994f12c768708bd2e17a4f01a2b0f44a1c9eccad872433e2e/matplotlib-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9a5ca4ac220a0cdd1ba6bcba3608547117d30468fefce49bb26f55c1a3d5c58", size = 8148321, upload-time = "2025-12-10T22:55:33.265Z" }, - { url = "https://files.pythonhosted.org/packages/3e/f3/c5195b1ae57ef85339fd7285dfb603b22c8b4e79114bae5f4f0fcf688677/matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04", size = 8716944, upload-time = "2025-12-10T22:55:34.922Z" }, - { url = "https://files.pythonhosted.org/packages/00/f9/7638f5cc82ec8a7aa005de48622eecc3ed7c9854b96ba15bd76b7fd27574/matplotlib-3.10.8-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24d50994d8c5816ddc35411e50a86ab05f575e2530c02752e02538122613371f", size = 9550099, upload-time = "2025-12-10T22:55:36.789Z" }, - { url = "https://files.pythonhosted.org/packages/57/61/78cd5920d35b29fd2a0fe894de8adf672ff52939d2e9b43cb83cd5ce1bc7/matplotlib-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:99eefd13c0dc3b3c1b4d561c1169e65fe47aab7b8158754d7c084088e2329466", size = 9613040, upload-time = "2025-12-10T22:55:38.715Z" }, - { url = "https://files.pythonhosted.org/packages/30/4e/c10f171b6e2f44d9e3a2b96efa38b1677439d79c99357600a62cc1e9594e/matplotlib-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:dd80ecb295460a5d9d260df63c43f4afbdd832d725a531f008dad1664f458adf", size = 8142717, upload-time = "2025-12-10T22:55:41.103Z" }, - { url = "https://files.pythonhosted.org/packages/f1/76/934db220026b5fef85f45d51a738b91dea7d70207581063cd9bd8fafcf74/matplotlib-3.10.8-cp312-cp312-win_arm64.whl", hash = "sha256:3c624e43ed56313651bc18a47f838b60d7b8032ed348911c54906b130b20071b", size = 8012751, upload-time = "2025-12-10T22:55:42.684Z" }, - { url = "https://files.pythonhosted.org/packages/3d/b9/15fd5541ef4f5b9a17eefd379356cf12175fe577424e7b1d80676516031a/matplotlib-3.10.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3f2e409836d7f5ac2f1c013110a4d50b9f7edc26328c108915f9075d7d7a91b6", size = 8261076, upload-time = "2025-12-10T22:55:44.648Z" }, - { url = "https://files.pythonhosted.org/packages/8d/a0/2ba3473c1b66b9c74dc7107c67e9008cb1782edbe896d4c899d39ae9cf78/matplotlib-3.10.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:56271f3dac49a88d7fca5060f004d9d22b865f743a12a23b1e937a0be4818ee1", size = 8148794, upload-time = "2025-12-10T22:55:46.252Z" }, - { url = "https://files.pythonhosted.org/packages/75/97/a471f1c3eb1fd6f6c24a31a5858f443891d5127e63a7788678d14e249aea/matplotlib-3.10.8-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a0a7f52498f72f13d4a25ea70f35f4cb60642b466cbb0a9be951b5bc3f45a486", size = 8718474, upload-time = "2025-12-10T22:55:47.864Z" }, - { url = "https://files.pythonhosted.org/packages/01/be/cd478f4b66f48256f42927d0acbcd63a26a893136456cd079c0cc24fbabf/matplotlib-3.10.8-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646d95230efb9ca614a7a594d4fcacde0ac61d25e37dd51710b36477594963ce", size = 9549637, upload-time = "2025-12-10T22:55:50.048Z" }, - { url = "https://files.pythonhosted.org/packages/5d/7c/8dc289776eae5109e268c4fb92baf870678dc048a25d4ac903683b86d5bf/matplotlib-3.10.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f89c151aab2e2e23cb3fe0acad1e8b82841fd265379c4cecd0f3fcb34c15e0f6", size = 9613678, upload-time = "2025-12-10T22:55:52.21Z" }, - { url = "https://files.pythonhosted.org/packages/64/40/37612487cc8a437d4dd261b32ca21fe2d79510fe74af74e1f42becb1bdb8/matplotlib-3.10.8-cp313-cp313-win_amd64.whl", hash = "sha256:e8ea3e2d4066083e264e75c829078f9e149fa119d27e19acd503de65e0b13149", size = 8142686, upload-time = "2025-12-10T22:55:54.253Z" }, - { url = "https://files.pythonhosted.org/packages/66/52/8d8a8730e968185514680c2a6625943f70269509c3dcfc0dcf7d75928cb8/matplotlib-3.10.8-cp313-cp313-win_arm64.whl", hash = "sha256:c108a1d6fa78a50646029cb6d49808ff0fc1330fda87fa6f6250c6b5369b6645", size = 8012917, upload-time = "2025-12-10T22:55:56.268Z" }, - { url = "https://files.pythonhosted.org/packages/b5/27/51fe26e1062f298af5ef66343d8ef460e090a27fea73036c76c35821df04/matplotlib-3.10.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ad3d9833a64cf48cc4300f2b406c3d0f4f4724a91c0bd5640678a6ba7c102077", size = 8305679, upload-time = "2025-12-10T22:55:57.856Z" }, - { url = "https://files.pythonhosted.org/packages/2c/1e/4de865bc591ac8e3062e835f42dd7fe7a93168d519557837f0e37513f629/matplotlib-3.10.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:eb3823f11823deade26ce3b9f40dcb4a213da7a670013929f31d5f5ed1055b22", size = 8198336, upload-time = "2025-12-10T22:55:59.371Z" }, - { url = "https://files.pythonhosted.org/packages/c6/cb/2f7b6e75fb4dce87ef91f60cac4f6e34f4c145ab036a22318ec837971300/matplotlib-3.10.8-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d9050fee89a89ed57b4fb2c1bfac9a3d0c57a0d55aed95949eedbc42070fea39", size = 8731653, upload-time = "2025-12-10T22:56:01.032Z" }, - { url = "https://files.pythonhosted.org/packages/46/b3/bd9c57d6ba670a37ab31fb87ec3e8691b947134b201f881665b28cc039ff/matplotlib-3.10.8-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b44d07310e404ba95f8c25aa5536f154c0a8ec473303535949e52eb71d0a1565", size = 9561356, upload-time = "2025-12-10T22:56:02.95Z" }, - { url = "https://files.pythonhosted.org/packages/c0/3d/8b94a481456dfc9dfe6e39e93b5ab376e50998cddfd23f4ae3b431708f16/matplotlib-3.10.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0a33deb84c15ede243aead39f77e990469fff93ad1521163305095b77b72ce4a", size = 9614000, upload-time = "2025-12-10T22:56:05.411Z" }, - { url = "https://files.pythonhosted.org/packages/bd/cd/bc06149fe5585ba800b189a6a654a75f1f127e8aab02fd2be10df7fa500c/matplotlib-3.10.8-cp313-cp313t-win_amd64.whl", hash = "sha256:3a48a78d2786784cc2413e57397981fb45c79e968d99656706018d6e62e57958", size = 8220043, upload-time = "2025-12-10T22:56:07.551Z" }, - { url = "https://files.pythonhosted.org/packages/e3/de/b22cf255abec916562cc04eef457c13e58a1990048de0c0c3604d082355e/matplotlib-3.10.8-cp313-cp313t-win_arm64.whl", hash = "sha256:15d30132718972c2c074cd14638c7f4592bd98719e2308bccea40e0538bc0cb5", size = 8062075, upload-time = "2025-12-10T22:56:09.178Z" }, - { url = "https://files.pythonhosted.org/packages/3c/43/9c0ff7a2f11615e516c3b058e1e6e8f9614ddeca53faca06da267c48345d/matplotlib-3.10.8-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b53285e65d4fa4c86399979e956235deb900be5baa7fc1218ea67fbfaeaadd6f", size = 8262481, upload-time = "2025-12-10T22:56:10.885Z" }, - { url = "https://files.pythonhosted.org/packages/6f/ca/e8ae28649fcdf039fda5ef554b40a95f50592a3c47e6f7270c9561c12b07/matplotlib-3.10.8-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:32f8dce744be5569bebe789e46727946041199030db8aeb2954d26013a0eb26b", size = 8151473, upload-time = "2025-12-10T22:56:12.377Z" }, - { url = "https://files.pythonhosted.org/packages/f1/6f/009d129ae70b75e88cbe7e503a12a4c0670e08ed748a902c2568909e9eb5/matplotlib-3.10.8-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf267add95b1c88300d96ca837833d4112756045364f5c734a2276038dae27d", size = 9553896, upload-time = "2025-12-10T22:56:14.432Z" }, - { url = "https://files.pythonhosted.org/packages/f5/26/4221a741eb97967bc1fd5e4c52b9aa5a91b2f4ec05b59f6def4d820f9df9/matplotlib-3.10.8-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2cf5bd12cecf46908f286d7838b2abc6c91cda506c0445b8223a7c19a00df008", size = 9824193, upload-time = "2025-12-10T22:56:16.29Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f3/3abf75f38605772cf48a9daf5821cd4f563472f38b4b828c6fba6fa6d06e/matplotlib-3.10.8-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:41703cc95688f2516b480f7f339d8851a6035f18e100ee6a32bc0b8536a12a9c", size = 9615444, upload-time = "2025-12-10T22:56:18.155Z" }, - { url = "https://files.pythonhosted.org/packages/93/a5/de89ac80f10b8dc615807ee1133cd99ac74082581196d4d9590bea10690d/matplotlib-3.10.8-cp314-cp314-win_amd64.whl", hash = "sha256:83d282364ea9f3e52363da262ce32a09dfe241e4080dcedda3c0db059d3c1f11", size = 8272719, upload-time = "2025-12-10T22:56:20.366Z" }, - { url = "https://files.pythonhosted.org/packages/69/ce/b006495c19ccc0a137b48083168a37bd056392dee02f87dba0472f2797fe/matplotlib-3.10.8-cp314-cp314-win_arm64.whl", hash = "sha256:2c1998e92cd5999e295a731bcb2911c75f597d937341f3030cc24ef2733d78a8", size = 8144205, upload-time = "2025-12-10T22:56:22.239Z" }, - { url = "https://files.pythonhosted.org/packages/68/d9/b31116a3a855bd313c6fcdb7226926d59b041f26061c6c5b1be66a08c826/matplotlib-3.10.8-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b5a2b97dbdc7d4f353ebf343744f1d1f1cca8aa8bfddb4262fcf4306c3761d50", size = 8305785, upload-time = "2025-12-10T22:56:24.218Z" }, - { url = "https://files.pythonhosted.org/packages/1e/90/6effe8103f0272685767ba5f094f453784057072f49b393e3ea178fe70a5/matplotlib-3.10.8-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3f5c3e4da343bba819f0234186b9004faba952cc420fbc522dc4e103c1985908", size = 8198361, upload-time = "2025-12-10T22:56:26.787Z" }, - { url = "https://files.pythonhosted.org/packages/d7/65/a73188711bea603615fc0baecca1061429ac16940e2385433cc778a9d8e7/matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f62550b9a30afde8c1c3ae450e5eb547d579dd69b25c2fc7a1c67f934c1717a", size = 9561357, upload-time = "2025-12-10T22:56:28.953Z" }, - { url = "https://files.pythonhosted.org/packages/f4/3d/b5c5d5d5be8ce63292567f0e2c43dde9953d3ed86ac2de0a72e93c8f07a1/matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:495672de149445ec1b772ff2c9ede9b769e3cb4f0d0aa7fa730d7f59e2d4e1c1", size = 9823610, upload-time = "2025-12-10T22:56:31.455Z" }, - { url = "https://files.pythonhosted.org/packages/4d/4b/e7beb6bbd49f6bae727a12b270a2654d13c397576d25bd6786e47033300f/matplotlib-3.10.8-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:595ba4d8fe983b88f0eec8c26a241e16d6376fe1979086232f481f8f3f67494c", size = 9614011, upload-time = "2025-12-10T22:56:33.85Z" }, - { url = "https://files.pythonhosted.org/packages/7c/e6/76f2813d31f032e65f6f797e3f2f6e4aab95b65015924b1c51370395c28a/matplotlib-3.10.8-cp314-cp314t-win_amd64.whl", hash = "sha256:25d380fe8b1dc32cf8f0b1b448470a77afb195438bafdf1d858bfb876f3edf7b", size = 8362801, upload-time = "2025-12-10T22:56:36.107Z" }, - { url = "https://files.pythonhosted.org/packages/5d/49/d651878698a0b67f23aa28e17f45a6d6dd3d3f933fa29087fa4ce5947b5a/matplotlib-3.10.8-cp314-cp314t-win_arm64.whl", hash = "sha256:113bb52413ea508ce954a02c10ffd0d565f9c3bc7f2eddc27dfe1731e71c7b5f", size = 8192560, upload-time = "2025-12-10T22:56:38.008Z" }, + { url = "https://files.pythonhosted.org/packages/35/c6/5581e26c72233ebb2a2a6fed2d24fb7c66b4700120b813f51b0555acf0b6/matplotlib-3.10.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f0c3c28d9fbcc1fe7a03be236d73430cf6409c41fb2383a7ac52fe932b072cb1", size = 8319908, upload-time = "2026-04-24T00:12:21.323Z" }, + { url = "https://files.pythonhosted.org/packages/b7/18/4880dd762e40cd360c1bf06e890c5a97b997e91cb324602b1a19950ad5ce/matplotlib-3.10.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41cb28c2bd769aa3e98322c6ab09854cbcc52ab69d2759d681bba3e327b2b320", size = 8216016, upload-time = "2026-04-24T00:12:23.4Z" }, + { url = "https://files.pythonhosted.org/packages/32/91/d024616abdba99e83120e07a20658976f6a343646710760c4a51df126029/matplotlib-3.10.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ae20801130378b82d647ff5047c07316295b68dc054ca6b3c13519d0ea624285", size = 8789336, upload-time = "2026-04-24T00:12:26.096Z" }, + { url = "https://files.pythonhosted.org/packages/5c/04/030a2f61ef2158f5e4c259487a92ac877732499fb33d871585d89e03c42d/matplotlib-3.10.9-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6c63ebcd8b4b169eb2f5c200552ae6b8be8999a005b6b507ed76fb8d7d674fe2", size = 9604602, upload-time = "2026-04-24T00:12:29.052Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c2/541e4d09d87bb6b5830fc28b4c887a9a8cf4e1c6cee698a8c05552ae2003/matplotlib-3.10.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d75d11c949914165976c621b2324f9ef162af7ebf4b057ddf95dd1dba7e5edcf", size = 9670966, upload-time = "2026-04-24T00:12:32.131Z" }, + { url = "https://files.pythonhosted.org/packages/04/a1/4571fc46e7702de8d0c2dc54ad1b2f8e29328dea3ee90831181f7353d93c/matplotlib-3.10.9-cp312-cp312-win_amd64.whl", hash = "sha256:d091f9d758b34aaaaa6331d13574bf01891d903b3dec59bfff458ef7551de5d6", size = 8217462, upload-time = "2026-04-24T00:12:35.226Z" }, + { url = "https://files.pythonhosted.org/packages/4b/d0/2269edb12aa30c13c8bcc9382892e39943ce1d28aab4ec296e0381798e81/matplotlib-3.10.9-cp312-cp312-win_arm64.whl", hash = "sha256:10cc5ce06d10231c36f40e875f3c7e8050362a4ee8f0ee5d29a6b3277d57bb42", size = 8136688, upload-time = "2026-04-24T00:12:37.442Z" }, + { url = "https://files.pythonhosted.org/packages/aa/d3/8d4f6afbecb49fc04e060a57c0fce39ea51cc163a6bd87303ccd698e4fa6/matplotlib-3.10.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b580440f1ff81a0e34122051a3dfabb7e4b7f9e380629929bde0eff9af72165f", size = 8320331, upload-time = "2026-04-24T00:12:39.688Z" }, + { url = "https://files.pythonhosted.org/packages/63/d9/9e14bc7564bf92d5ffa801ae5fac819ce74b925dfb55e3ebde61a3bbad3e/matplotlib-3.10.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b1b745c489cd1a77a0dc1120a05dc87af9798faebc913601feb8c73d89bf2d1e", size = 8216461, upload-time = "2026-04-24T00:12:42.494Z" }, + { url = "https://files.pythonhosted.org/packages/8a/17/4402d0d14ccf1dfc70932600b68097fbbf9c898a4871d2cbbe79c7801a32/matplotlib-3.10.9-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8f3bcac1ca5ed000a6f4337d47ba67dfddf37ed6a46c15fd7f014997f7bf865f", size = 8790091, upload-time = "2026-04-24T00:12:44.789Z" }, + { url = "https://files.pythonhosted.org/packages/3e/0b/322aeec06dd9b91411f92028b37d447342770a24392aa4813e317064dad5/matplotlib-3.10.9-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a8d66a55def891c33147ba3ba9bfcabf0b526a43764c818acbb4525e5ed0838", size = 9605027, upload-time = "2026-04-24T00:12:47.583Z" }, + { url = "https://files.pythonhosted.org/packages/74/88/5f13482f55e7b00bcfc09838b093c2456e1379978d2a146844aae05350ad/matplotlib-3.10.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d843374407c4017a6403b59c6c81606773d136f3259d5b6da3131bc814542cc2", size = 9671269, upload-time = "2026-04-24T00:12:50.878Z" }, + { url = "https://files.pythonhosted.org/packages/c5/e0/0840fd2f93da988ec660b8ad1984abe9f25d2aed22a5e394ff1c68c88307/matplotlib-3.10.9-cp313-cp313-win_amd64.whl", hash = "sha256:f4399f64b3e94cd500195490972ae1ee81170df1636fa15364d157d5bdd7b921", size = 8217588, upload-time = "2026-04-24T00:12:53.784Z" }, + { url = "https://files.pythonhosted.org/packages/47/b9/d706d06dd605c49b9f83a2aed8c13e3e5db70697d7a80b7e3d7915de6b17/matplotlib-3.10.9-cp313-cp313-win_arm64.whl", hash = "sha256:ba7b3b8ef09eab7df0e86e9ae086faa433efbfbdb46afcb3aa16aabf779469a8", size = 8136913, upload-time = "2026-04-24T00:12:56.501Z" }, + { url = "https://files.pythonhosted.org/packages/9b/45/6e32d96978264c8ca8c4b1010adb955a1a49cfaf314e212bbc8908f04a61/matplotlib-3.10.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:09218df8a93712bd6ea133e83a153c755448cf7868316c531cffcc43f69d1cc9", size = 8368019, upload-time = "2026-04-24T00:12:58.896Z" }, + { url = "https://files.pythonhosted.org/packages/86/0a/c8e3d3bba245f0f7fc424937f8ff7ef77291a36af3edb97ccd78aa93d84f/matplotlib-3.10.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:82368699727bfb7b0182e1aa13082e3c08e092fa1a25d3e1fd92405bff96f6d4", size = 8264645, upload-time = "2026-04-24T00:13:01.406Z" }, + { url = "https://files.pythonhosted.org/packages/3d/aa/5bf5a14fe4fed73a4209a155606f8096ff797aad89c6c35179026571133e/matplotlib-3.10.9-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3225f4e1edcb8c86c884ddf79ebe20ecd0a67d30188f279897554ccd8fded4dc", size = 8802194, upload-time = "2026-04-24T00:13:03.702Z" }, + { url = "https://files.pythonhosted.org/packages/dd/5e/b4be852d6bba6fd15893fadf91ff26ae49cb91aac789e95dde9d342e664f/matplotlib-3.10.9-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de2445a0c6690d21b7eb6ce071cebad6d40a2e9bdf10d039074a96ba19797b99", size = 9622684, upload-time = "2026-04-24T00:13:06.647Z" }, + { url = "https://files.pythonhosted.org/packages/4c/3d/ed428c971139112ef730f62770654d609467346d09d4b62617e1afd68a5a/matplotlib-3.10.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:b2b9516251cb89ff618d757daec0e2ed1bf21248013844a853d87ef85ab3081d", size = 9680790, upload-time = "2026-04-24T00:13:10.009Z" }, + { url = "https://files.pythonhosted.org/packages/e7/09/052e884aaf2b985c63cb79f715f1d5b6a3eaa7de78f6a52b9dbc077d5b53/matplotlib-3.10.9-cp313-cp313t-win_amd64.whl", hash = "sha256:e9fae004b941b23ff2edcf1567a857ed77bafc8086ffa258190462328434faf8", size = 8287571, upload-time = "2026-04-24T00:13:13.087Z" }, + { url = "https://files.pythonhosted.org/packages/f4/38/ae27288e788c35a4250491422f3db7750366fc8c97d6f36fbdecfc1f5518/matplotlib-3.10.9-cp313-cp313t-win_arm64.whl", hash = "sha256:6b63d9c7c769b88ab81e10dc86e4e0607cf56817b9f9e6cf24b2a5f1693b8e38", size = 8188292, upload-time = "2026-04-24T00:13:15.546Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e6/3bd8afd04949f02eabc1c17115ea5255e19cacd4d06fc5abdde4eeb0052c/matplotlib-3.10.9-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:172db52c9e683f5d12eaf57f0f54834190e12581fe1cc2a19595a8f5acb4e77d", size = 8321276, upload-time = "2026-04-24T00:13:18.318Z" }, + { url = "https://files.pythonhosted.org/packages/41/86/86231232fff41c9f8e4a1a7d7a597d349a02527109c3af7d618366122139/matplotlib-3.10.9-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:97e35e8d39ccc85859095e01a53847432ba9a53ddf7986f7a54a11b73d0e143f", size = 8218218, upload-time = "2026-04-24T00:13:20.974Z" }, + { url = "https://files.pythonhosted.org/packages/85/8f/becc9722cafc64f5d2eb0b7c1bf5f585271c618a45dbd8fabeb021f898b6/matplotlib-3.10.9-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aba1615dabe83188e19d4f75a253c6a08423e04c1425e64039f800050a69de6b", size = 9608145, upload-time = "2026-04-24T00:13:23.228Z" }, + { url = "https://files.pythonhosted.org/packages/32/5d/f7e914f7d9325abff4057cee62c0fa70263683189f774473cbfb534cd13b/matplotlib-3.10.9-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34cf8167e023ad956c15f36302911d5406bd99a9862c1a8499ea6f7c0e015dc2", size = 9885085, upload-time = "2026-04-24T00:13:25.849Z" }, + { url = "https://files.pythonhosted.org/packages/a5/fd/fa69f2221534e80cc5772ac2b7d222011a2acafc2ec7216d5dd174c864ae/matplotlib-3.10.9-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:59476c6d29d612b8e9bb6ce8c5b631be6ba8f9e3a2421f22a02b192c7dd28716", size = 9672358, upload-time = "2026-04-24T00:13:28.906Z" }, + { url = "https://files.pythonhosted.org/packages/ab/1a/5a4f747a8b271cbb024946d2dd3c913ab5032ba430626f8c3528ada96b4b/matplotlib-3.10.9-cp314-cp314-win_amd64.whl", hash = "sha256:336b9acc64d309063126edcdaca00db9373af3c476bb94388fe9c5a53ad13e6f", size = 8349970, upload-time = "2026-04-24T00:13:31.904Z" }, + { url = "https://files.pythonhosted.org/packages/64/dc/95d60ecaefe30680a154b52ea96ab4b0dab547f1fd6aa12f5fb655e89cae/matplotlib-3.10.9-cp314-cp314-win_arm64.whl", hash = "sha256:2dc9477819ffd78ad12a20df1d9d6a6bd4fec6aaa9072681465fddca052f1456", size = 8272785, upload-time = "2026-04-24T00:13:34.511Z" }, + { url = "https://files.pythonhosted.org/packages/70/a0/005d68bc8b8418300ce6591f18586910a8526806e2ab663933d9f20a41e9/matplotlib-3.10.9-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:da4e09638420548f31c354032a6250e473c68e5a4e96899b4844cf39ddea23fe", size = 8367999, upload-time = "2026-04-24T00:13:36.962Z" }, + { url = "https://files.pythonhosted.org/packages/22/05/1236cc9290be70b2498af20ca348add76e3fffe7f67b477db5133a84f3ea/matplotlib-3.10.9-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:345f6f68ecc8da0ca56fad2ea08fde1a115eda530079eca185d50a7bc3e146c6", size = 8264543, upload-time = "2026-04-24T00:13:39.851Z" }, + { url = "https://files.pythonhosted.org/packages/cd/c2/071f5a5ff6c5bd63aaaf2f45c811d9bf2ced94bde188d9e1a519e21d0cba/matplotlib-3.10.9-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4edcfbd8565339aa62f1cd4012f7180926fdbe71850f7b0d3c379c175cd6b66c", size = 9622800, upload-time = "2026-04-24T00:13:42.296Z" }, + { url = "https://files.pythonhosted.org/packages/95/57/da7d1f10a85624b9e7db68e069dd94e58dc41dbf9463c5921632ecbe3661/matplotlib-3.10.9-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6be157fe17fc37cb95ac1d7374cf717ce9259616edec911a78d9d26dae8522d4", size = 9888561, upload-time = "2026-04-24T00:13:45.026Z" }, + { url = "https://files.pythonhosted.org/packages/67/b2/ef8d6bb59b0edb6c16c968b70f548aa13b54348972def5aa6ac85df67145/matplotlib-3.10.9-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4e42042d54db34fda4e95a7bd3e5789c2a995d2dad3eb8850232ee534092fbbf", size = 9680884, upload-time = "2026-04-24T00:13:48.066Z" }, + { url = "https://files.pythonhosted.org/packages/61/1c/d21bfeb9931881ebe96bcfcff27c7ae4b160ae0ec291a714c42641a56d75/matplotlib-3.10.9-cp314-cp314t-win_amd64.whl", hash = "sha256:c27df8b3848f32a83d1767566595e43cfaa4460380974da06f4279a7ec143c39", size = 8432333, upload-time = "2026-04-24T00:13:51.008Z" }, + { url = "https://files.pythonhosted.org/packages/78/23/92493c3e6e1b635ccfff146f7b99e674808787915420373ac399283764c2/matplotlib-3.10.9-cp314-cp314t-win_arm64.whl", hash = "sha256:a49f1eadc84ca85fd72fa4e89e70e61bf86452df6f971af04b12c60761a0772c", size = 8324785, upload-time = "2026-04-24T00:13:53.633Z" }, ] [[package]] @@ -3542,11 +3486,11 @@ wheels = [ [[package]] name = "mistune" -version = "3.2.0" +version = "3.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9d/55/d01f0c4b45ade6536c51170b9043db8b2ec6ddf4a35c7ea3f5f559ac935b/mistune-3.2.0.tar.gz", hash = "sha256:708487c8a8cdd99c9d90eb3ed4c3ed961246ff78ac82f03418f5183ab70e398a", size = 95467, upload-time = "2025-12-23T11:36:34.994Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/84/620cc3f7e3adf6f5067e10f4dbae71295d8f9e16d5d3f9ef97c40f2f592c/mistune-3.2.1.tar.gz", hash = "sha256:7c8e5501d38bac1582e067e46c8343f17d57ea1aaa735823f3aba1fd59c88a28", size = 98003, upload-time = "2026-05-03T14:33:22.312Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/f7/4a5e785ec9fbd65146a27b6b70b6cdc161a66f2024e4b04ac06a67f5578b/mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1", size = 53598, upload-time = "2025-12-23T11:36:33.211Z" }, + { url = "https://files.pythonhosted.org/packages/2a/7f/a946aa4f8752b37102b41e64dca18a1976ac705c3a0d1dfe74d820a02552/mistune-3.2.1-py3-none-any.whl", hash = "sha256:78cdb0ba5e938053ccf63651b352508d2efa9411dc8810bfb05f2dc5140c0048", size = 53749, upload-time = "2026-05-03T14:33:20.551Z" }, ] [[package]] @@ -3605,7 +3549,7 @@ wheels = [ [[package]] name = "mujoco" -version = "3.7.0" +version = "3.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "absl-py" }, @@ -3614,18 +3558,23 @@ dependencies = [ { name = "numpy" }, { name = "pyopengl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/49/6e/9cba0cf43410aee5123ed670ce6e57f901974cc6a59093ce49200494b604/mujoco-3.7.0.tar.gz", hash = "sha256:d325c5448702a919db5b3d0050fff0af86d47da146d59678722b2112f7b55084", size = 917551, upload-time = "2026-04-14T12:50:31.331Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/d8/9aae1a021b6e15ee69d805d893e01dda71cbaae1c75d5f8ec8e12916cb7c/mujoco-3.8.0.tar.gz", hash = "sha256:250afe57458d6881b2d7659fa0029a128cb57cbbb620268d95647fb9ad742183", size = 918250, upload-time = "2026-04-24T22:59:07.531Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/dc/7d/e1ad3b1b604b009c0df7246b50b5c5cf9e8e0689a10279661951c69850ea/mujoco-3.7.0-cp312-cp312-macosx_10_16_x86_64.whl", hash = "sha256:9193b8bc478708f199c2decb7bdeb06a962849f550ac182c35168ac9938ca859", size = 7217455, upload-time = "2026-04-14T12:50:08.756Z" }, - { url = "https://files.pythonhosted.org/packages/5b/fc/225a068a2bec3de5029ba08866e03df9f159719cecedfc0cf100d4db6a18/mujoco-3.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:52b60fd634885c43c494868a992f696c078402c32b9c7a11bffa0fbf385687a7", size = 7162325, upload-time = "2026-04-14T12:50:11.025Z" }, - { url = "https://files.pythonhosted.org/packages/b8/4d/a92e635cf1d38140c04fd504d07bacfd5d814753449fa75f3e7187660adb/mujoco-3.7.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a7216abee4fafe4bd6121548cb22dcacdb27f466da2f645bd9cbc404c0a29c9", size = 6709175, upload-time = "2026-04-14T12:50:13.188Z" }, - { url = "https://files.pythonhosted.org/packages/97/fa/a8698b7bb0168483845f8e492991ee5cf01695eb0d1f20ea7d8bf3d61344/mujoco-3.7.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:79f741d93e2b4f315f32e7199ab1b21eb7b7ed82acd9daf7be24382e436f98b7", size = 7201272, upload-time = "2026-04-14T12:50:15.264Z" }, - { url = "https://files.pythonhosted.org/packages/32/b9/96a06d51d1b8a13ce010642c5e9a1b83b2364d6c290c2aba2d8c515f9871/mujoco-3.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:b0e31da299ff182d1063f9991ad21c21a8b086593b8dee588d19e910f2e49833", size = 5757244, upload-time = "2026-04-14T12:50:17.562Z" }, - { url = "https://files.pythonhosted.org/packages/4b/c8/1fedcd0d0b7dd86dd238502bc8ea228c0d48a901ffffc5d39272351c515c/mujoco-3.7.0-cp313-cp313-macosx_10_16_x86_64.whl", hash = "sha256:79730bec1e23a324cf66ccfa93585b5a8d3ba162d813cc0bfa6a42f776079983", size = 7217812, upload-time = "2026-04-14T12:50:19.583Z" }, - { url = "https://files.pythonhosted.org/packages/0f/75/8a54a9de7581f46e8dfb248d8cd2f972ef0d1db6ecfd8a70abb4ab12d56b/mujoco-3.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:70e008a2080156121caa60a7e0736c20b2278108cb35d1ab732f98e2e7c334f1", size = 7162437, upload-time = "2026-04-14T12:50:21.983Z" }, - { url = "https://files.pythonhosted.org/packages/64/bf/2180496f94c7a96b4520ad06e54505ef39b84b205ad674494c0d014584be/mujoco-3.7.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a81480e6dbbdcb5a5f4027c825935d5bcd34e3b9865bb818698701ee089e12f", size = 6709051, upload-time = "2026-04-14T12:50:24.407Z" }, - { url = "https://files.pythonhosted.org/packages/0f/de/4ebdd81f66f2d3879335e0f21f7be9b552d7edbc80c53f31472706d4e338/mujoco-3.7.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:246f61c885d8ac291a4f3b8e10d1deb3820302ab7a48b1edbf8a3539722841c3", size = 7201646, upload-time = "2026-04-14T12:50:26.473Z" }, - { url = "https://files.pythonhosted.org/packages/c8/24/313f0d31628123593af23441df7124ca1cc26dd0611ed5d31675899b190e/mujoco-3.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:67cae63b4c2c439ebffce7e185a0e129446a636af538dd3ccb5c9cd3f674033c", size = 5757431, upload-time = "2026-04-14T12:50:29.027Z" }, + { url = "https://files.pythonhosted.org/packages/b4/0d/35aad24bef1f36e9ebf63367938b16abec82407338d612c37624ff20b0e3/mujoco-3.8.0-cp312-cp312-macosx_10_16_x86_64.whl", hash = "sha256:a495da0cd01aff6ac94ec97f0a1d913e1afe071daf107e220f81814435227982", size = 7265096, upload-time = "2026-04-24T22:58:33.475Z" }, + { url = "https://files.pythonhosted.org/packages/60/d7/2ee5a123431eb50f234de2759e46f3d0c02876e0b1ffce1b26102ed388e7/mujoco-3.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cc1d25b0cd47248fd39681310950b2bea0f6098f57358c0c02730d365bb80ba1", size = 7204862, upload-time = "2026-04-24T22:58:35.978Z" }, + { url = "https://files.pythonhosted.org/packages/aa/62/a488e6e0963e0210b8262650d25e51c4c597ff7beed4fe01a7e88e3abfc5/mujoco-3.8.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:980ab5a2210777cf766e53eb574726f9360e2a87e47d83a6c8d801fb71f2fe52", size = 6743542, upload-time = "2026-04-24T22:58:38.137Z" }, + { url = "https://files.pythonhosted.org/packages/6f/de/bc2271210dad5c6ab73af294779226308e9cf4ed8bc2dbe59922eb8702ed/mujoco-3.8.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:323fedd14905b73cfe56ea8ff916716ccf8b57cff348a7aa6932c8983a465d64", size = 7226045, upload-time = "2026-04-24T22:58:41.117Z" }, + { url = "https://files.pythonhosted.org/packages/65/87/198a88747ff0c01e35070c0c80ae0c05ff8d1a61d6e6f379a4e5ff3e6185/mujoco-3.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:8db22dbc5a6c98241549c8161f20a2b0c2ccc5d08fa42595e7a4b594e35a70dd", size = 5813167, upload-time = "2026-04-24T22:58:43.469Z" }, + { url = "https://files.pythonhosted.org/packages/11/7d/41c73ebe93565ed196ec5ad012232138e3d10850e841ccd77d459afc4383/mujoco-3.8.0-cp313-cp313-macosx_10_16_x86_64.whl", hash = "sha256:d4a080aab0be4d02162e6fe3bcd7163c01cc751638f5a84ba05477b512d95cc0", size = 7265494, upload-time = "2026-04-24T22:58:45.119Z" }, + { url = "https://files.pythonhosted.org/packages/7c/05/d21b43c31c5d9179c2d33e0d38896775b262a9d78729b760717927a02e28/mujoco-3.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5aa987e70a6601ebf02d123d9842f2d1b8f8057163feec1f0a5a049de1cbe252", size = 7205049, upload-time = "2026-04-24T22:58:46.874Z" }, + { url = "https://files.pythonhosted.org/packages/6d/9c/af181776d0ffb70ad6a4365f0613529f268782850dedab0569c6cce83fcc/mujoco-3.8.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8bd33b7e2382605012dfe41c00a8d3bb358153e5b019d920a52c31664472ce20", size = 6743578, upload-time = "2026-04-24T22:58:48.862Z" }, + { url = "https://files.pythonhosted.org/packages/89/8a/c9b28784a7e51926d609b70842e16b85e286df87ad861dbbb26c4e49cacf/mujoco-3.8.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f2b3de0c9fed950c5080ea4b3ff1fb5c89f88e22798f1e1693ec8dbbd36de00b", size = 7226464, upload-time = "2026-04-24T22:58:51.039Z" }, + { url = "https://files.pythonhosted.org/packages/50/3f/0a72c74dd766524b9f1b79f0d6d327b9a797d87b44fe62b3068b44123b54/mujoco-3.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:de03d173f4d9c7341b5dcc10a8eddb36bb19989df68f24369dc7c782dc053f11", size = 5813265, upload-time = "2026-04-24T22:58:53.316Z" }, + { url = "https://files.pythonhosted.org/packages/81/f7/afdbcb4ad50786ed7500205f29ffb5c3a5ef9d42e6b3ad8f9636c4911687/mujoco-3.8.0-cp314-cp314-macosx_10_16_x86_64.whl", hash = "sha256:09c27fc6ce1560912e920789bc121290e4c84919ae30f7b54da5efed4cd2804a", size = 7320672, upload-time = "2026-04-24T22:58:55.469Z" }, + { url = "https://files.pythonhosted.org/packages/4a/8a/6299f6209084dda9469374461c77adad5d63041427b9b9bd4fadaf0c35b0/mujoco-3.8.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5fc4e930cd1414f965381ac97ec054a001539e7aa462836145f6a3201b0dfe88", size = 7249791, upload-time = "2026-04-24T22:58:57.126Z" }, + { url = "https://files.pythonhosted.org/packages/df/3c/a74d169b7725aee971962238d2aee767f64496ede1367cd558361c97d5ca/mujoco-3.8.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:14c32992267906d422ed2127e99aa9ad036a62324139da2a3bd25df1e928d0ee", size = 6754009, upload-time = "2026-04-24T22:59:00.375Z" }, + { url = "https://files.pythonhosted.org/packages/66/36/f3610724bb35f6cfb2ffebe9d8d315975e5fa9722146ad58298df442da7e/mujoco-3.8.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1373c4744a3424f1aa224d2ad5201497ea150c4698beb9aaeb8c3560efa60fdb", size = 7227764, upload-time = "2026-04-24T22:59:02.722Z" }, + { url = "https://files.pythonhosted.org/packages/92/5a/80f5347c322300e4402b08df74e7489756e9af060bb8b8d342086dd5b41c/mujoco-3.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:f8da8fc4a2861f9d1eef64d83adcd783bfe5c02bdc78af2d963d942d097dfdce", size = 6144239, upload-time = "2026-04-24T22:59:05.329Z" }, ] [[package]] @@ -3746,7 +3695,7 @@ wheels = [ [[package]] name = "mypy" -version = "1.20.1" +version = "1.20.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, @@ -3754,37 +3703,37 @@ dependencies = [ { name = "pathspec" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0b/3d/5b373635b3146264eb7a68d09e5ca11c305bbb058dfffbb47c47daf4f632/mypy-1.20.1.tar.gz", hash = "sha256:6fc3f4ecd52de81648fed1945498bf42fa2993ddfad67c9056df36ae5757f804", size = 3815892, upload-time = "2026-04-13T02:46:51.474Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/af/e3d4b3e9ec91a0ff9aabfdb38692952acf49bbb899c2e4c29acb3a6da3ae/mypy-1.20.2.tar.gz", hash = "sha256:e8222c26daaafd9e8626dec58ae36029f82585890589576f769a650dd20fd665", size = 3817349, upload-time = "2026-04-21T17:12:28.473Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/1b/75a7c825a02781ca10bc2f2f12fba2af5202f6d6005aad8d2d1f264d8d78/mypy-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:36ee2b9c6599c230fea89bbd79f401f9f9f8e9fcf0c777827789b19b7da90f51", size = 14494077, upload-time = "2026-04-13T02:45:55.085Z" }, - { url = "https://files.pythonhosted.org/packages/b0/54/5e5a569ea5c2b4d48b729fb32aa936eeb4246e4fc3e6f5b3d36a2dfbefb9/mypy-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fba3fb0968a7b48806b0c90f38d39296f10766885a94c83bd21399de1e14eb28", size = 13319495, upload-time = "2026-04-13T02:45:29.674Z" }, - { url = "https://files.pythonhosted.org/packages/6f/a4/a1945b19f33e91721b59deee3abb484f2fa5922adc33bb166daf5325d76d/mypy-1.20.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef1415a637cd3627d6304dfbeddbadd21079dafc2a8a753c477ce4fc0c2af54f", size = 13696948, upload-time = "2026-04-13T02:46:15.006Z" }, - { url = "https://files.pythonhosted.org/packages/b2/c6/75e969781c2359b2f9c15b061f28ec6d67c8b61865ceda176e85c8e7f2de/mypy-1.20.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef3461b1ad5cd446e540016e90b5984657edda39f982f4cc45ca317b628f5a37", size = 14706744, upload-time = "2026-04-13T02:46:00.482Z" }, - { url = "https://files.pythonhosted.org/packages/a8/6e/b221b1de981fc4262fe3e0bf9ec272d292dfe42394a689c2d49765c144c4/mypy-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:542dd63c9e1339b6092eb25bd515f3a32a1453aee8c9521d2ddb17dacd840237", size = 14949035, upload-time = "2026-04-13T02:45:06.021Z" }, - { url = "https://files.pythonhosted.org/packages/ca/4b/298ba2de0aafc0da3ff2288da06884aae7ba6489bc247c933f87847c41b3/mypy-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:1d55c7cd8ca22e31f93af2a01160a9e95465b5878de23dba7e48116052f20a8d", size = 10883216, upload-time = "2026-04-13T02:45:47.232Z" }, - { url = "https://files.pythonhosted.org/packages/c7/f9/5e25b8f0b8cb92f080bfed9c21d3279b2a0b6a601cdca369a039ba84789d/mypy-1.20.1-cp312-cp312-win_arm64.whl", hash = "sha256:f5b84a79070586e0d353ee07b719d9d0a4aa7c8ee90c0ea97747e98cbe193019", size = 9814299, upload-time = "2026-04-13T02:45:21.934Z" }, - { url = "https://files.pythonhosted.org/packages/21/e8/ef0991aa24c8f225df10b034f3c2681213cb54cf247623c6dec9a5744e70/mypy-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f3886c03e40afefd327bd70b3f634b39ea82e87f314edaa4d0cce4b927ddcc1", size = 14500739, upload-time = "2026-04-13T02:46:05.442Z" }, - { url = "https://files.pythonhosted.org/packages/23/73/416ebec3047636ed89fa871dc8c54bf05e9e20aa9499da59790d7adb312d/mypy-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e860eb3904f9764e83bafd70c8250bdffdc7dde6b82f486e8156348bf7ceb184", size = 13314735, upload-time = "2026-04-13T02:46:47.154Z" }, - { url = "https://files.pythonhosted.org/packages/10/1e/1505022d9c9ac2e014a384eb17638fb37bf8e9d0a833ea60605b66f8f7ba/mypy-1.20.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4b5aac6e785719da51a84f5d09e9e843d473170a9045b1ea7ea1af86225df4b", size = 13704356, upload-time = "2026-04-13T02:45:19.773Z" }, - { url = "https://files.pythonhosted.org/packages/98/91/275b01f5eba5c467a3318ec214dd865abb66e9c811231c8587287b92876a/mypy-1.20.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f37b6cd0fe2ad3a20f05ace48ca3523fc52ff86940e34937b439613b6854472e", size = 14696420, upload-time = "2026-04-13T02:45:24.205Z" }, - { url = "https://files.pythonhosted.org/packages/a1/57/b3779e134e1b7250d05f874252780d0a88c068bc054bcff99ca20a3a2986/mypy-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4bbb0f6b54ce7cc350ef4a770650d15fa70edd99ad5267e227133eda9c94218", size = 14936093, upload-time = "2026-04-13T02:45:32.087Z" }, - { url = "https://files.pythonhosted.org/packages/be/33/81b64991b0f3f278c3b55c335888794af190b2d59031a5ad1401bcb69f1e/mypy-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:c3dc20f8ec76eecd77148cdd2f1542ed496e51e185713bf488a414f862deb8f2", size = 10889659, upload-time = "2026-04-13T02:46:02.926Z" }, - { url = "https://files.pythonhosted.org/packages/1b/fd/7adcb8053572edf5ef8f3db59599dfeeee3be9cc4c8c97e2d28f66f42ac5/mypy-1.20.1-cp313-cp313-win_arm64.whl", hash = "sha256:a9d62bbac5d6d46718e2b0330b25e6264463ed832722b8f7d4440ff1be3ca895", size = 9815515, upload-time = "2026-04-13T02:46:32.103Z" }, - { url = "https://files.pythonhosted.org/packages/40/cd/db831e84c81d57d4886d99feee14e372f64bbec6a9cb1a88a19e243f2ef5/mypy-1.20.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:12927b9c0ed794daedcf1dab055b6c613d9d5659ac511e8d936d96f19c087d12", size = 14483064, upload-time = "2026-04-13T02:45:26.901Z" }, - { url = "https://files.pythonhosted.org/packages/d5/82/74e62e7097fa67da328ac8ece8de09133448c04d20ddeaeba251a3000f01/mypy-1.20.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:752507dd481e958b2c08fc966d3806c962af5a9433b5bf8f3bdd7175c20e34fe", size = 13335694, upload-time = "2026-04-13T02:46:12.514Z" }, - { url = "https://files.pythonhosted.org/packages/74/c4/97e9a0abe4f3cdbbf4d079cb87a03b786efeccf5bf2b89fe4f96939ab2e6/mypy-1.20.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c614655b5a065e56274c6cbbe405f7cf7e96c0654db7ba39bc680238837f7b08", size = 13726365, upload-time = "2026-04-13T02:45:17.422Z" }, - { url = "https://files.pythonhosted.org/packages/d7/aa/a19d884a8d28fcd3c065776323029f204dbc774e70ec9c85eba228b680de/mypy-1.20.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c3f6221a76f34d5100c6d35b3ef6b947054123c3f8d6938a4ba00b1308aa572", size = 14693472, upload-time = "2026-04-13T02:46:41.253Z" }, - { url = "https://files.pythonhosted.org/packages/84/44/cc9324bd21cf786592b44bf3b5d224b3923c1230ec9898d508d00241d465/mypy-1.20.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4bdfc06303ac06500af71ea0cdbe995c502b3c9ba32f3f8313523c137a25d1b6", size = 14919266, upload-time = "2026-04-13T02:46:28.37Z" }, - { url = "https://files.pythonhosted.org/packages/6e/dc/779abb25a8c63e8f44bf5a336217fa92790fa17e0c40e0c725d10cb01bbd/mypy-1.20.1-cp314-cp314-win_amd64.whl", hash = "sha256:0131edd7eba289973d1ba1003d1a37c426b85cdef76650cd02da6420898a5eb3", size = 11049713, upload-time = "2026-04-13T02:45:57.673Z" }, - { url = "https://files.pythonhosted.org/packages/28/08/4172be2ad7de9119b5a92ca36abbf641afdc5cb1ef4ae0c3a8182f29674f/mypy-1.20.1-cp314-cp314-win_arm64.whl", hash = "sha256:33f02904feb2c07e1fdf7909026206396c9deeb9e6f34d466b4cfedb0aadbbe4", size = 9999819, upload-time = "2026-04-13T02:46:35.039Z" }, - { url = "https://files.pythonhosted.org/packages/2d/af/af9e46b0c8eabbce9fc04a477564170f47a1c22b308822282a59b7ff315f/mypy-1.20.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:168472149dd8cc505c98cefd21ad77e4257ed6022cd5ed2fe2999bed56977a5a", size = 15547508, upload-time = "2026-04-13T02:46:25.588Z" }, - { url = "https://files.pythonhosted.org/packages/a7/cd/39c9e4ad6ba33e069e5837d772a9e6c304b4a5452a14a975d52b36444650/mypy-1.20.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:eb674600309a8f22790cca883a97c90299f948183ebb210fbef6bcee07cb1986", size = 14399557, upload-time = "2026-04-13T02:46:10.021Z" }, - { url = "https://files.pythonhosted.org/packages/83/c1/3fd71bdc118ffc502bf57559c909927bb7e011f327f7bb8e0488e98a5870/mypy-1.20.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef2b2e4cc464ba9795459f2586923abd58a0055487cbe558cb538ea6e6bc142a", size = 15045789, upload-time = "2026-04-13T02:45:10.81Z" }, - { url = "https://files.pythonhosted.org/packages/8e/73/6f07ff8b57a7d7b3e6e5bf34685d17632382395c8bb53364ec331661f83e/mypy-1.20.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dee461d396dd46b3f0ed5a098dbc9b8860c81c46ad44fa071afcfbc149f167c9", size = 15850795, upload-time = "2026-04-13T02:45:03.349Z" }, - { url = "https://files.pythonhosted.org/packages/ec/e2/f7dffec1c7767078f9e9adf0c786d1fe0ff30964a77eb213c09b8b58cb76/mypy-1.20.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e364926308b3e66f1361f81a566fc1b2f8cd47fc8525e8136d4058a65a4b4f02", size = 16088539, upload-time = "2026-04-13T02:46:17.841Z" }, - { url = "https://files.pythonhosted.org/packages/1a/76/e0dee71035316e75a69d73aec2f03c39c21c967b97e277fd0ef8fd6aec66/mypy-1.20.1-cp314-cp314t-win_amd64.whl", hash = "sha256:a0c17fbd746d38c70cbc42647cfd884f845a9708a4b160a8b4f7e70d41f4d7fa", size = 12575567, upload-time = "2026-04-13T02:45:34.795Z" }, - { url = "https://files.pythonhosted.org/packages/22/a8/7ed43c9d9c3d1468f86605e323a5d97e411a448790a00f07e779f3211a46/mypy-1.20.1-cp314-cp314t-win_arm64.whl", hash = "sha256:db2cb89654626a912efda69c0d5c1d22d948265e2069010d3dde3abf751c7d08", size = 10378823, upload-time = "2026-04-13T02:45:13.35Z" }, - { url = "https://files.pythonhosted.org/packages/d8/28/926bd972388e65a39ee98e188ccf67e81beb3aacfd5d6b310051772d974b/mypy-1.20.1-py3-none-any.whl", hash = "sha256:1aae28507f253fe82d883790d1c0a0d35798a810117c88184097fe8881052f06", size = 2636553, upload-time = "2026-04-13T02:46:30.45Z" }, + { url = "https://files.pythonhosted.org/packages/71/4e/7560e4528db9e9b147e4c0f22660466bf30a0a1fe3d63d1b9d3b0fd354ee/mypy-1.20.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4dbfcf869f6b0517f70cf0030ba6ea1d6645e132337a7d5204a18d8d5636c02b", size = 14539393, upload-time = "2026-04-21T17:07:12.52Z" }, + { url = "https://files.pythonhosted.org/packages/32/d9/34a5efed8124f5a9234f55ac6a4ced4201e2c5b81e1109c49ad23190ec8c/mypy-1.20.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b6481b228d072315b053210b01ac320e1be243dc17f9e5887ef167f23f5fae4", size = 13361642, upload-time = "2026-04-21T17:06:53.742Z" }, + { url = "https://files.pythonhosted.org/packages/d1/14/eb377acf78c03c92d566a1510cda8137348215b5335085ef662ab82ecd3a/mypy-1.20.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34397cdced6b90b836e38182076049fdb41424322e0b0728c946b0939ebdf9f6", size = 13740347, upload-time = "2026-04-21T17:12:04.73Z" }, + { url = "https://files.pythonhosted.org/packages/b9/94/7e4634a32b641aa1c112422eed1bbece61ee16205f674190e8b536f884de/mypy-1.20.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a5da6976f20cae27059ea8d0c86e7cef3de720e04c4bb9ee18e3690fdb792066", size = 14734042, upload-time = "2026-04-21T17:07:43.16Z" }, + { url = "https://files.pythonhosted.org/packages/7a/f3/f7e62395cb7f434541b4491a01149a4439e28ace4c0c632bbf5431e92d1f/mypy-1.20.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:56908d7e08318d39f85b1f0c6cfd47b0cac1a130da677630dac0de3e0623e102", size = 14964958, upload-time = "2026-04-21T17:11:00.665Z" }, + { url = "https://files.pythonhosted.org/packages/3e/0d/47e3c3a0ec2a876e35aeac365df3cac7776c36bbd4ed18cc521e1b9d255b/mypy-1.20.2-cp312-cp312-win_amd64.whl", hash = "sha256:d52ad8d78522da1d308789df651ee5379088e77c76cb1994858d40a426b343b9", size = 10911340, upload-time = "2026-04-21T17:10:49.179Z" }, + { url = "https://files.pythonhosted.org/packages/d6/b2/6c852d72e0ea8b01f49da817fb52539993cde327e7d010e0103dc12d0dac/mypy-1.20.2-cp312-cp312-win_arm64.whl", hash = "sha256:785b08db19c9f214dc37d65f7c165d19a30fcecb48abfa30f31b01b5acaabb58", size = 9833947, upload-time = "2026-04-21T17:09:05.267Z" }, + { url = "https://files.pythonhosted.org/packages/5b/c4/b93812d3a192c9bcf5df405bd2f30277cd0e48106a14d1023c7f6ed6e39b/mypy-1.20.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:edfbfca868cdd6bd8d974a60f8a3682f5565d3f5c99b327640cedd24c4264026", size = 14524670, upload-time = "2026-04-21T17:10:30.737Z" }, + { url = "https://files.pythonhosted.org/packages/f3/47/42c122501bff18eaf1e8f457f5c017933452d8acdc52918a9f59f6812955/mypy-1.20.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e2877a02380adfcdbc69071a0f74d6e9dbbf593c0dc9d174e1f223ffd5281943", size = 13336218, upload-time = "2026-04-21T17:08:44.069Z" }, + { url = "https://files.pythonhosted.org/packages/92/8f/75bbc92f41725fbd585fb17b440b1119b576105df1013622983e18640a93/mypy-1.20.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7488448de6007cd5177c6cea0517ac33b4c0f5ee9b5e9f2be51ce75511a85517", size = 13724906, upload-time = "2026-04-21T17:08:01.02Z" }, + { url = "https://files.pythonhosted.org/packages/a1/32/4c49da27a606167391ff0c39aa955707a00edc500572e562f7c36c08a71f/mypy-1.20.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bb9c2fa06887e21d6a3a868762acb82aec34e2c6fd0174064f27c93ede68ad15", size = 14726046, upload-time = "2026-04-21T17:11:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/7f/fc/4e354a1bd70216359deb0c9c54847ee6b32ef78dfb09f5131ff99b494078/mypy-1.20.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d56a78b646f2e3daa865bc70cd5ec5a46c50045801ca8ff17a0c43abc97e3ee", size = 14955587, upload-time = "2026-04-21T17:12:16.033Z" }, + { url = "https://files.pythonhosted.org/packages/62/b2/c0f2056e9eb8f08c62cafd9715e4584b89132bdc832fcf85d27d07b5f3e5/mypy-1.20.2-cp313-cp313-win_amd64.whl", hash = "sha256:2a4102b03bb7481d9a91a6da8d174740c9c8c4401024684b9ca3b7cc5e49852f", size = 10922681, upload-time = "2026-04-21T17:06:35.842Z" }, + { url = "https://files.pythonhosted.org/packages/e5/14/065e333721f05de8ef683d0aa804c23026bcc287446b61cac657b902ccac/mypy-1.20.2-cp313-cp313-win_arm64.whl", hash = "sha256:a95a9248b0c6fd933a442c03c3b113c3b61320086b88e2c444676d3fd1ca3330", size = 9830560, upload-time = "2026-04-21T17:07:51.023Z" }, + { url = "https://files.pythonhosted.org/packages/ae/d1/b4ec96b0ecc620a4443570c6e95c867903428cfcde4206518eafdd5880c3/mypy-1.20.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:419413398fe250aae057fd2fe50166b61077083c9b82754c341cf4fd73038f30", size = 14524561, upload-time = "2026-04-21T17:06:27.325Z" }, + { url = "https://files.pythonhosted.org/packages/3a/63/d2c2ff4fa66bc49477d32dfa26e8a167ba803ea6a69c5efb416036909d30/mypy-1.20.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e73c07f23009962885c197ccb9b41356a30cc0e5a1d0c2ea8fd8fb1362d7f924", size = 13363883, upload-time = "2026-04-21T17:11:11.239Z" }, + { url = "https://files.pythonhosted.org/packages/2a/56/983916806bf4eddeaaa2c9230903c3669c6718552a921154e1c5182c701f/mypy-1.20.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c64e5973df366b747646fc98da921f9d6eba9716d57d1db94a83c026a08e0fb", size = 13742945, upload-time = "2026-04-21T17:08:34.181Z" }, + { url = "https://files.pythonhosted.org/packages/19/65/0cd9285ab010ee8214c83d67c6b49417c40d86ce46f1aa109457b5a9b8d7/mypy-1.20.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a65aa591af023864fd08a97da9974e919452cfe19cb146c8a5dc692626445dc", size = 14706163, upload-time = "2026-04-21T17:05:15.51Z" }, + { url = "https://files.pythonhosted.org/packages/94/97/48ff3b297cafcc94d185243a9190836fb1b01c1b0918fff64e941e973cc9/mypy-1.20.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4fef51b01e638974a6e69885687e9bd40c8d1e09a6cd291cca0619625cf1f558", size = 14938677, upload-time = "2026-04-21T17:05:39.562Z" }, + { url = "https://files.pythonhosted.org/packages/fd/a1/1b4233d255bdd0b38a1f284feeb1c143ca508c19184964e22f8d837ec851/mypy-1.20.2-cp314-cp314-win_amd64.whl", hash = "sha256:913485a03f1bcf5d279409a9d2b9ed565c151f61c09f29991e5faa14033da4c8", size = 11089322, upload-time = "2026-04-21T17:06:44.29Z" }, + { url = "https://files.pythonhosted.org/packages/78/c2/ce7ee2ba36aeb954ba50f18fa25d9c1188578654b97d02a66a15b6f09531/mypy-1.20.2-cp314-cp314-win_arm64.whl", hash = "sha256:c3bae4f855d965b5453784300c12ffc63a548304ac7f99e55d4dc7c898673aa3", size = 10017775, upload-time = "2026-04-21T17:07:20.732Z" }, + { url = "https://files.pythonhosted.org/packages/4e/a1/9d93a7d0b5859af0ead82b4888b46df6c8797e1bc5e1e262a08518c6d48e/mypy-1.20.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:2de3dcea53babc1c3237a19002bc3d228ce1833278f093b8d619e06e7cc79609", size = 15549002, upload-time = "2026-04-21T17:08:23.107Z" }, + { url = "https://files.pythonhosted.org/packages/00/d2/09a6a10ee1bf0008f6c144d9676f2ca6a12512151b4e0ad0ff6c4fac5337/mypy-1.20.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:52b176444e2e5054dfcbcb8c75b0b719865c96247b37407184bbfca5c353f2c2", size = 14401942, upload-time = "2026-04-21T17:07:31.837Z" }, + { url = "https://files.pythonhosted.org/packages/57/da/9594b75c3c019e805250bed3583bdf4443ff9e6ef08f97e39ae308cb06f2/mypy-1.20.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:688c3312e5dadb573a2c69c82af3a298d43ecf9e6d264e0f95df960b5f6ac19c", size = 15041649, upload-time = "2026-04-21T17:09:34.653Z" }, + { url = "https://files.pythonhosted.org/packages/97/77/f75a65c278e6e8eba2071f7f5a90481891053ecc39878cc444634d892abe/mypy-1.20.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29752dbbf8cc53f89f6ac096d363314333045c257c9c75cbd189ca2de0455744", size = 15864588, upload-time = "2026-04-21T17:11:44.936Z" }, + { url = "https://files.pythonhosted.org/packages/d7/46/1a4e1c66e96c1a3246ddf5403d122ac9b0a8d2b7e65730b9d6533ba7a6d3/mypy-1.20.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:803203d2b6ea644982c644895c2f78b28d0e208bba7b27d9b921e0ec5eb207c6", size = 16093956, upload-time = "2026-04-21T17:10:17.683Z" }, + { url = "https://files.pythonhosted.org/packages/5a/2c/78a8851264dec38cd736ca5b8bc9380674df0dd0be7792f538916157716c/mypy-1.20.2-cp314-cp314t-win_amd64.whl", hash = "sha256:9bcb8aa397ff0093c824182fd76a935a9ba7ad097fcbef80ae89bf6c1731d8ec", size = 12568661, upload-time = "2026-04-21T17:11:54.473Z" }, + { url = "https://files.pythonhosted.org/packages/83/01/cd7318aa03493322ce275a0e14f4f52b8896335e4e79d4fb8153a7ad2b77/mypy-1.20.2-cp314-cp314t-win_arm64.whl", hash = "sha256:e061b58443f1736f8a37c48978d7ab581636d6ab03e3d4f99e3fa90463bb9382", size = 10389240, upload-time = "2026-04-21T17:09:42.719Z" }, + { url = "https://files.pythonhosted.org/packages/28/9a/f23c163e25b11074188251b0b5a0342625fc1cdb6af604757174fa9acc9b/mypy-1.20.2-py3-none-any.whl", hash = "sha256:a94c5a76ab46c5e6257c7972b6c8cff0574201ca7dc05647e33e795d78680563", size = 2637314, upload-time = "2026-04-21T17:05:54.5Z" }, ] [[package]] @@ -3906,7 +3855,7 @@ wheels = [ [[package]] name = "notebook" -version = "7.5.5" +version = "7.5.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jupyter-server" }, @@ -3915,9 +3864,9 @@ dependencies = [ { name = "notebook-shim" }, { name = "tornado" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1f/6d/41052c48d6f6349ca0a7c4d1f6a78464de135e6d18f5829ba2510e62184c/notebook-7.5.5.tar.gz", hash = "sha256:dc0bfab0f2372c8278c457423d3256c34154ac2cc76bf20e9925260c461013c3", size = 14169167, upload-time = "2026-03-11T16:32:51.922Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/c2/cf59bd2e6f2c8b976b52477e3e53bf6f97bc714ed046a51821afb428eaee/notebook-7.5.6.tar.gz", hash = "sha256:621174aade80108f0020b0f00738000b215f75fa3cd90771ad7aa0f24536a4e1", size = 14170814, upload-time = "2026-04-30T11:46:26.613Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f8/aa/cbd1deb9f07446241e88f8d5fecccd95b249bca0b4e5482214a4d1714c49/notebook-7.5.5-py3-none-any.whl", hash = "sha256:a7c14dbeefa6592e87f72290ca982e0c10f5bbf3786be2a600fda9da2764a2b8", size = 14578929, upload-time = "2026-03-11T16:32:48.021Z" }, + { url = "https://files.pythonhosted.org/packages/e9/d6/1fd0646b9bbd9efbb0b8ae21b2325fbef515769a5621c03e31d8eb8da587/notebook-7.5.6-py3-none-any.whl", hash = "sha256:4dde3f8fb55fa8fb7946d58c6e869ce9baf46d00fc070664f62604569d0faca0", size = 14581730, upload-time = "2026-04-30T11:46:22.342Z" }, ] [[package]] @@ -3946,22 +3895,22 @@ wheels = [ [[package]] name = "numba" -version = "0.65.0" +version = "0.65.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llvmlite", marker = "sys_platform == 'linux'" }, { name = "numpy", marker = "sys_platform == 'linux'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/49/61/7299643b9c18d669e04be7c5bcb64d985070d07553274817b45b049e7bfe/numba-0.65.0.tar.gz", hash = "sha256:edad0d9f6682e93624c00125a471ae4df186175d71fd604c983c377cdc03e68b", size = 2764131, upload-time = "2026-04-01T03:52:01.946Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/c5/db2ac3685833d626c0dcae6bd2330cd68433e1fd248d15f70998160d3ad7/numba-0.65.1.tar.gz", hash = "sha256:19357146c32fe9ed25059ab915e8465fb13951cf6b0aace3826b76886373ab23", size = 2765600, upload-time = "2026-04-24T02:02:56.551Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/73/36/88406bd58600cc696417b8e5dd6a056478da808f3eaf48d18e2421e0c2d9/numba-0.65.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a52d92ffd297c10364bce60cd1fcb88f99284ab5df085f2c6bcd1cb33b529a6f", size = 3801411, upload-time = "2026-04-01T03:51:34.321Z" }, - { url = "https://files.pythonhosted.org/packages/0c/61/ce753a1d7646dd477e16d15e89473703faebb8995d2f71d7ad69a540b565/numba-0.65.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da8e371e328c06d0010c3d8b44b21858652831b85bcfba78cb22c042e22dbd8e", size = 3501622, upload-time = "2026-04-01T03:51:36.348Z" }, - { url = "https://files.pythonhosted.org/packages/1b/8f/3d116e4b8e92f6abace431afa4b2b944f4d65bdee83af886f5c4b263df95/numba-0.65.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b8a9008411615c69d083d1dcf477f75a5aa727b30beb16e139799e2be945cdfd", size = 3809537, upload-time = "2026-04-01T03:51:41.42Z" }, - { url = "https://files.pythonhosted.org/packages/b5/2c/6a3ca4128e253cb67affe06deb47688f51ce968f5111e2a06d010e6f1fa6/numba-0.65.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af96c0cba53664efcb361528b8c75e011a6556c859c7e08424c2715201c6cf7a", size = 3508615, upload-time = "2026-04-01T03:51:43.444Z" }, - { url = "https://files.pythonhosted.org/packages/24/8d/e12d6ff4b9119db3cbf7b2db1ce257576441bd3c76388c786dea74f20b02/numba-0.65.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:05c0a9fdf75d85f57dee47b719e8d6415707b80aae45d75f63f9dc1b935c29f7", size = 3778456, upload-time = "2026-04-01T03:51:48.552Z" }, - { url = "https://files.pythonhosted.org/packages/17/89/abcd83e76f6a773276fe76244140671bcc5bf820f6e2ae1a15362ae4c8c9/numba-0.65.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:583680e0e8faf124d362df23b4b593f3221a8996341a63d1b664c122401bec2f", size = 3478464, upload-time = "2026-04-01T03:51:50.527Z" }, - { url = "https://files.pythonhosted.org/packages/ff/e5/8267b0adb0c01b52b553df5062fbbb42c30ed5362d08b85cc913a36f838f/numba-0.65.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c7fa502960f7a2f3f5cb025bc7bff888a3551277b92431bfdc5ba2f11a375749", size = 3816373, upload-time = "2026-04-01T03:51:56.18Z" }, - { url = "https://files.pythonhosted.org/packages/b0/f5/b8397ca360971669a93706b9274592b6864e4367a37d498fbbcb62aa2d48/numba-0.65.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5046c63f783ca3eb6195f826a50797465e7c4ce811daa17c9bea47e310c9b964", size = 3532782, upload-time = "2026-04-01T03:51:58.387Z" }, + { url = "https://files.pythonhosted.org/packages/69/47/a415af0283e4db0398104c6d1c11c9861a98dc67a7aa442a7769ed5d6196/numba-0.65.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:52bc6f3ceb8fcaff9b2ae26b4c6b1e9fee39db8d355534c0fe4f39a901246b84", size = 3802467, upload-time = "2026-04-24T02:02:27.712Z" }, + { url = "https://files.pythonhosted.org/packages/46/36/246f73ec99cfeab2f2cb2ce7d4218766cc36a2da418901223f4f4da9c813/numba-0.65.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90ca10b3463bae0bd70589726fe3c77d01d6b5fc86bee54bcdf9fb6b47c28977", size = 3502628, upload-time = "2026-04-24T02:02:29.763Z" }, + { url = "https://files.pythonhosted.org/packages/a0/22/b8d873f6466b20aa563fc9b33acd48dec89a07803ddaa2f1c8ca1cd33126/numba-0.65.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c09f49117ef255e1f1c6dad0c7a1ed39868243862a73be5706793241a3755f1b", size = 3810619, upload-time = "2026-04-24T02:02:36.041Z" }, + { url = "https://files.pythonhosted.org/packages/62/08/e16a8b5d9a018962ebb5c66be662317cde32b9f5dab08441f90bed5522fb/numba-0.65.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:594a8680b3fadac99e97e489b1fd89007177e5336713745c3b769528c635a464", size = 3509783, upload-time = "2026-04-24T02:02:38.245Z" }, + { url = "https://files.pythonhosted.org/packages/87/96/f3eb235fafa82a34e2ab5dd7dc9ffff998ebf5f0bbc23fa56a96aeb44da6/numba-0.65.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:71e73029bf53a62cc6afcf96be4bd942290d8b4c55f0a454fb536158115790f7", size = 3779602, upload-time = "2026-04-24T02:02:43.726Z" }, + { url = "https://files.pythonhosted.org/packages/09/90/b0f09b48752d23640b8284f22aa597737e8adaddc7fbfacc4708b7f73a4c/numba-0.65.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a07635e0be926b9bdbffb09137c230fb13f6ec0e564914ba937cee12ce3eb35", size = 3479532, upload-time = "2026-04-24T02:02:45.427Z" }, + { url = "https://files.pythonhosted.org/packages/03/36/98ddbcf3e4f04a6dd07e1c67249955920579ba4af6bb6868e3088f4ed282/numba-0.65.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:78abc28feff2c2ff8307fff3975b6438352759c9acb797ecd6b1fb6e7e39e31d", size = 3817198, upload-time = "2026-04-24T02:02:51.266Z" }, + { url = "https://files.pythonhosted.org/packages/a3/83/0dad21057ece5a835599f5d24099b091703995e23dbbf894f259e91c010b/numba-0.65.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee7676cb389555805f9b9a1840cbcd1ea6c8bd5376ab6918e3a29c5ea1dbda20", size = 3533862, upload-time = "2026-04-24T02:02:52.987Z" }, ] [[package]] @@ -4181,35 +4130,34 @@ wheels = [ [[package]] name = "onnxruntime" -version = "1.24.4" +version = "1.25.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "flatbuffers" }, { name = "numpy" }, { name = "packaging" }, { name = "protobuf" }, - { name = "sympy" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/38/31db1b232b4ba960065a90c1506ad7a56995cd8482033184e97fadca17cc/onnxruntime-1.24.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:cad1c2b3f455c55678ab2a8caa51fb420c25e6e3cf10f4c23653cdabedc8de78", size = 17341875, upload-time = "2026-03-17T22:05:51.669Z" }, - { url = "https://files.pythonhosted.org/packages/aa/60/c4d1c8043eb42f8a9aa9e931c8c293d289c48ff463267130eca97d13357f/onnxruntime-1.24.4-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1a5c5a544b22f90859c88617ecb30e161ee3349fcc73878854f43d77f00558b5", size = 15172485, upload-time = "2026-03-17T22:03:32.182Z" }, - { url = "https://files.pythonhosted.org/packages/6d/ab/5b68110e0460d73fad814d5bd11c7b1ddcce5c37b10177eb264d6a36e331/onnxruntime-1.24.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d640eb9f3782689b55cfa715094474cd5662f2f137be6a6f847a594b6e9705c", size = 17244912, upload-time = "2026-03-17T22:04:37.251Z" }, - { url = "https://files.pythonhosted.org/packages/8b/f4/6b89e297b93704345f0f3f8c62229bee323ef25682a3f9b4f89a39324950/onnxruntime-1.24.4-cp312-cp312-win_amd64.whl", hash = "sha256:535b29475ca42b593c45fbb2152fbf1cdf3f287315bf650e6a724a0a1d065cdb", size = 12596856, upload-time = "2026-03-17T22:05:41.224Z" }, - { url = "https://files.pythonhosted.org/packages/43/06/8b8ec6e9e6a474fcd5d772453f627ad4549dfe3ab8c0bf70af5afcde551b/onnxruntime-1.24.4-cp312-cp312-win_arm64.whl", hash = "sha256:e6214096e14b7b52e3bee1903dc12dc7ca09cb65e26664668a4620cc5e6f9a90", size = 12270275, upload-time = "2026-03-17T22:05:31.132Z" }, - { url = "https://files.pythonhosted.org/packages/e9/f0/8a21ec0a97e40abb7d8da1e8b20fb9e1af509cc6d191f6faa75f73622fb2/onnxruntime-1.24.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e99a48078baaefa2b50fe5836c319499f71f13f76ed32d0211f39109147a49e0", size = 17341922, upload-time = "2026-03-17T22:03:56.364Z" }, - { url = "https://files.pythonhosted.org/packages/8b/25/d7908de8e08cee9abfa15b8aa82349b79733ae5865162a3609c11598805d/onnxruntime-1.24.4-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4aaed1e5e1aaacf2343c838a30a7c3ade78f13eeb16817411f929d04040a13", size = 15172290, upload-time = "2026-03-17T22:03:37.124Z" }, - { url = "https://files.pythonhosted.org/packages/7f/72/105ec27a78c5aa0154a7c0cd8c41c19a97799c3b12fc30392928997e3be3/onnxruntime-1.24.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e30c972bc02e072911aabb6891453ec73795386c0af2b761b65444b8a4c4745f", size = 17244738, upload-time = "2026-03-17T22:04:40.625Z" }, - { url = "https://files.pythonhosted.org/packages/05/fb/a592736d968c2f58e12de4d52088dda8e0e724b26ad5c0487263adb45875/onnxruntime-1.24.4-cp313-cp313-win_amd64.whl", hash = "sha256:3b6ba8b0181a3aa88edab00eb01424ffc06f42e71095a91186c2249415fcff93", size = 12597435, upload-time = "2026-03-17T22:05:43.826Z" }, - { url = "https://files.pythonhosted.org/packages/ad/04/ae2479e9841b64bd2eb44f8a64756c62593f896514369a11243b1b86ca5c/onnxruntime-1.24.4-cp313-cp313-win_arm64.whl", hash = "sha256:71d6a5c1821d6e8586a024000ece458db8f2fc0ecd050435d45794827ce81e19", size = 12269852, upload-time = "2026-03-17T22:05:33.353Z" }, - { url = "https://files.pythonhosted.org/packages/b4/af/a479a536c4398ffaf49fbbe755f45d5b8726bdb4335ab31b537f3d7149b8/onnxruntime-1.24.4-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1700f559c8086d06b2a4d5de51e62cb4ff5e2631822f71a36db8c72383db71ee", size = 15176861, upload-time = "2026-03-17T22:03:40.143Z" }, - { url = "https://files.pythonhosted.org/packages/be/13/19f5da70c346a76037da2c2851ecbf1266e61d7f0dcdb887c667210d4608/onnxruntime-1.24.4-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c74e268dc808e61e63784d43f9ddcdaf50a776c2819e8bd1d1b11ef64bf7e36", size = 17247454, upload-time = "2026-03-17T22:04:46.643Z" }, - { url = "https://files.pythonhosted.org/packages/89/db/b30dbbd6037847b205ab75d962bc349bf1e46d02a65b30d7047a6893ffd6/onnxruntime-1.24.4-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:fbff2a248940e3398ae78374c5a839e49a2f39079b488bc64439fa0ec327a3e4", size = 17343300, upload-time = "2026-03-17T22:03:59.223Z" }, - { url = "https://files.pythonhosted.org/packages/61/88/1746c0e7959961475b84c776d35601a21d445f463c93b1433a409ec3e188/onnxruntime-1.24.4-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2b7969e72d8cb53ffc88ab6d49dd5e75c1c663bda7be7eb0ece192f127343d1", size = 15175936, upload-time = "2026-03-17T22:03:43.671Z" }, - { url = "https://files.pythonhosted.org/packages/5f/ba/4699cde04a52cece66cbebc85bd8335a0d3b9ad485abc9a2e15946a1349d/onnxruntime-1.24.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14ed1f197fab812b695a5eaddb536c635e58a2fbbe50a517c78f082cc6ce9177", size = 17246432, upload-time = "2026-03-17T22:04:49.58Z" }, - { url = "https://files.pythonhosted.org/packages/ef/60/4590910841bb28bd3b4b388a9efbedf4e2d2cca99ddf0c863642b4e87814/onnxruntime-1.24.4-cp314-cp314-win_amd64.whl", hash = "sha256:311e309f573bf3c12aa5723e23823077f83d5e412a18499d4485c7eb41040858", size = 12903276, upload-time = "2026-03-17T22:05:46.349Z" }, - { url = "https://files.pythonhosted.org/packages/7f/6f/60e2c0acea1e1ac09b3e794b5a19c166eebf91c0b860b3e6db8e74983fda/onnxruntime-1.24.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f0b910e86b759a4732663ec61fd57ac42ee1b0066f68299de164220b660546d", size = 12594365, upload-time = "2026-03-17T22:05:35.795Z" }, - { url = "https://files.pythonhosted.org/packages/cf/68/0c05d10f8f6c40fe0912ebec0d5a33884aaa2af2053507e864dab0883208/onnxruntime-1.24.4-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa12ddc54c9c4594073abcaa265cd9681e95fb89dae982a6f508a794ca42e661", size = 15176889, upload-time = "2026-03-17T22:03:48.021Z" }, - { url = "https://files.pythonhosted.org/packages/6c/1d/1666dc64e78d8587d168fec4e3b7922b92eb286a2ddeebcf6acb55c7dc82/onnxruntime-1.24.4-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1cc6a518255f012134bc791975a6294806be9a3b20c4a54cca25194c90cf731", size = 17247021, upload-time = "2026-03-17T22:04:52.377Z" }, + { url = "https://files.pythonhosted.org/packages/c0/52/8b2a10e8dedf5d486332bc2b3bca0b1ed8049c0b9e4a5cced95413aadfdd/onnxruntime-1.25.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:66e52f7a30d1f780a34aa84d68a0a04d382d9f5b141884ecbf45b7566b9fbde9", size = 17770987, upload-time = "2026-04-27T22:00:47.985Z" }, + { url = "https://files.pythonhosted.org/packages/3f/87/a424d2867477c42ef8c60172709281120797f7b0f1fd33cc36b24329c825/onnxruntime-1.25.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a5f41779f044d1ff75593df5c10a4d311bc82563687796d5218e2685b8f9da25", size = 15871829, upload-time = "2026-04-27T21:59:39.088Z" }, + { url = "https://files.pythonhosted.org/packages/d4/55/7819e64c515f17c86005447ede8122b974ca851255a94125e2119376f0f8/onnxruntime-1.25.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:905409e9eb2ef87f8226e073f56e71faf731c3e480ebd34952cf953730e4a4ff", size = 18024586, upload-time = "2026-04-27T22:00:05.359Z" }, + { url = "https://files.pythonhosted.org/packages/89/36/b4f3eb5e95c66389aafd490950b5255e87c9333742cf90516eb50898e1dc/onnxruntime-1.25.1-cp312-cp312-win_amd64.whl", hash = "sha256:d4097b75b77486bb45835a8ed25b9a67976040ec6c258aeabae6aadfbdd1201c", size = 12905112, upload-time = "2026-04-27T22:00:36.478Z" }, + { url = "https://files.pythonhosted.org/packages/38/fa/e5c43397632a399f542663ed3e3e37763ee203ba845b10b266cd2ede8925/onnxruntime-1.25.1-cp312-cp312-win_arm64.whl", hash = "sha256:b6c7aa5cae606d5c90a392679fac074b60f80025a2e83e1e90fdf882bd2a97f0", size = 12634433, upload-time = "2026-04-27T22:00:25.918Z" }, + { url = "https://files.pythonhosted.org/packages/d2/ee/db3ac55ef770347a926ac0f1317df0ab42c8bc604350833b30c7356bf936/onnxruntime-1.25.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e9d9b3b1694196bc3c5bc66f760a237a5e27d7688aaa2e2c9c0f66abd0486699", size = 17770761, upload-time = "2026-04-27T21:59:54.853Z" }, + { url = "https://files.pythonhosted.org/packages/dc/9a/33225481a94a59906fce44e27ab12fc3bddd2aaecdc6160bd73341ca1aba/onnxruntime-1.25.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:311d29b943e46a55ca72ca1ea48d7815c993122bfc359f68215fddeb9583fff4", size = 15871542, upload-time = "2026-04-27T21:59:41.881Z" }, + { url = "https://files.pythonhosted.org/packages/8b/09/f20aac60f6fcf840543be54d4e9252cfeb7e8c2bb6d22477aaeb180e763e/onnxruntime-1.25.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:98016a038b31160db23208706139fa3b99cd60bc1c5ffdade77aafd6a37a92ad", size = 18036960, upload-time = "2026-04-27T22:00:10.739Z" }, + { url = "https://files.pythonhosted.org/packages/50/83/47964ac7e2f7e2f9e83c69ec466642c6835466252cc2ef0561eafeb56b66/onnxruntime-1.25.1-cp313-cp313-win_amd64.whl", hash = "sha256:08717d6eee2820807ba60b1b17032af207bd7aaca5b6c4abaee71f83feae877b", size = 12904886, upload-time = "2026-04-27T22:00:39.878Z" }, + { url = "https://files.pythonhosted.org/packages/d4/6c/a6c5aea47dc95fca7728f8a5af67c184ec9e7d4e7882125c7062e4bba8dd/onnxruntime-1.25.1-cp313-cp313-win_arm64.whl", hash = "sha256:84f8963d70e00167bae273ab7e80e9795bfc5eb94f6b23236a99c5c11af00844", size = 12634117, upload-time = "2026-04-27T22:00:29.15Z" }, + { url = "https://files.pythonhosted.org/packages/a8/8a/3b65e7911eec86c125e3d6f43d690a6f68671500543c0390ecd6eb59b771/onnxruntime-1.25.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:03e800b3a4b48d9f3a2d23aacc4fa95486a3b406b14e51d1a9b8b6981d9adf9c", size = 15882935, upload-time = "2026-04-27T21:59:44.912Z" }, + { url = "https://files.pythonhosted.org/packages/3c/bb/410a760694f8ae7bbfc5fa81ccbeb7da241e6d520ee02a333a439cf462a2/onnxruntime-1.25.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd83ef5c10cfc051a1cb465db692d57b996a1bc75a2a97b161398e29cdbc47ff", size = 18021727, upload-time = "2026-04-27T22:00:13.846Z" }, + { url = "https://files.pythonhosted.org/packages/fb/aa/04530bd38e31e26970fa1212346d76cf81705dc16a8ee5e6f4fb24634c11/onnxruntime-1.25.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:395eb662c437fa2407f44266e4778b75bff261b17c2a6fef042421f9069f871d", size = 17773721, upload-time = "2026-04-27T21:59:59.24Z" }, + { url = "https://files.pythonhosted.org/packages/ef/7f/ec79ab5cece6a688c944a7fa214a8511d548b9d5142a15d1a3d730b705f1/onnxruntime-1.25.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9ae85395f41b291ae3e61780ec5092640181d369ef6c268aa8141c478b509e69", size = 15875954, upload-time = "2026-04-27T21:59:49.394Z" }, + { url = "https://files.pythonhosted.org/packages/67/fe/20428215d822099ea2c1e3cf35c295cf1a58f467bf18b6c607597a39c18a/onnxruntime-1.25.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:828e1b12710fbedb6dfab5e7bae6f11563617cddf3c2e7e8d84c64de566a4a3a", size = 18038703, upload-time = "2026-04-27T22:00:16.199Z" }, + { url = "https://files.pythonhosted.org/packages/5a/b1/b15db965e6a68bc47ca7eb584de4e6b3d2d2f484d46cc57f715b596f6528/onnxruntime-1.25.1-cp314-cp314-win_amd64.whl", hash = "sha256:2affc9d2fd9ab013b9c9637464e649a0cca870d57ae18bfef74180eee65c3369", size = 13218513, upload-time = "2026-04-27T22:00:42.506Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f9/25cd2d1b29cdc8140eee4afbb6fb930b69125526632b1d579bc747975306/onnxruntime-1.25.1-cp314-cp314-win_arm64.whl", hash = "sha256:3387d75d1a815b4b2495b4e47a05ef1b3bcb64a817ddc68587e0bfcb9702bcf6", size = 12969835, upload-time = "2026-04-27T22:00:31.504Z" }, + { url = "https://files.pythonhosted.org/packages/8d/0e/6c507d1e65b2421fb44e241cbba577c7276792279485024fb1752b43f5c5/onnxruntime-1.25.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:06280b06604660595037f783c6d24bc70cbe5c6093975f194cd1482e77d450de", size = 15883298, upload-time = "2026-04-27T21:59:51.991Z" }, + { url = "https://files.pythonhosted.org/packages/df/4e/1c9df57496409dc86b320bd38f29ad7a34b7115e4f35b8fca44a827568a7/onnxruntime-1.25.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7e79fd5ce7db10ebcc24e020e2ed0159476e69e2326b9b7828e5aadcf6184212", size = 18021249, upload-time = "2026-04-27T22:00:18.954Z" }, ] [[package]] @@ -4324,20 +4272,20 @@ wheels = [ [[package]] name = "parso" -version = "0.8.6" +version = "0.8.7" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/81/76/a1e769043c0c0c9fe391b702539d594731a4362334cdf4dc25d0c09761e7/parso-0.8.6.tar.gz", hash = "sha256:2b9a0332696df97d454fa67b81618fd69c35a7b90327cbe6ba5c92d2c68a7bfd", size = 401621, upload-time = "2026-02-09T15:45:24.425Z" } +sdist = { url = "https://files.pythonhosted.org/packages/30/4b/90c937815137d43ce71ba043cd3566221e9df6b9c805f24b5d138c9d40a7/parso-0.8.7.tar.gz", hash = "sha256:eaaac4c9fdd5e9e8852dc778d2d7405897ec510f2a298071453e5e3a07914bb1", size = 401824, upload-time = "2026-05-01T23:13:02.138Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b6/61/fae042894f4296ec49e3f193aff5d7c18440da9e48102c3315e1bc4519a7/parso-0.8.6-py2.py3-none-any.whl", hash = "sha256:2c549f800b70a5c4952197248825584cb00f033b29c692671d3bf08bf380baff", size = 106894, upload-time = "2026-02-09T15:45:21.391Z" }, + { url = "https://files.pythonhosted.org/packages/99/5d/8268b644392ee874ee82a635cd0df1773de230bde356c38de28e298392cc/parso-0.8.7-py2.py3-none-any.whl", hash = "sha256:a8926eb2a1b915486941fdbd31e86a4baf88fe8c210f25f2f35ecec5b574ca1c", size = 107025, upload-time = "2026-05-01T23:12:58.867Z" }, ] [[package]] name = "pathspec" -version = "1.0.4" +version = "1.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fa/36/e27608899f9b8d4dff0617b2d9ab17ca5608956ca44461ac14ac48b44015/pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", size = 131200, upload-time = "2026-01-27T03:59:46.938Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/82/42f767fc1c1143d6fd36efb827202a2d997a375e160a71eb2888a925aac1/pathspec-1.1.1.tar.gz", hash = "sha256:17db5ecd524104a120e173814c90367a96a98d07c45b2e10c2f3919fff91bf5a", size = 135180, upload-time = "2026-04-27T01:46:08.907Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" }, + { url = "https://files.pythonhosted.org/packages/f1/d9/7fb5aa316bc299258e68c73ba3bddbc499654a07f151cba08f6153988714/pathspec-1.1.1-py3-none-any.whl", hash = "sha256:a00ce642f577bf7f473932318056212bc4f8bfdf53128c78bbd5af0b9b20b189", size = 57328, upload-time = "2026-04-27T01:46:07.06Z" }, ] [[package]] @@ -4508,7 +4456,7 @@ wheels = [ [[package]] name = "pre-commit" -version = "4.5.1" +version = "4.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cfgv" }, @@ -4517,9 +4465,9 @@ dependencies = [ { name = "pyyaml" }, { name = "virtualenv" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/40/f1/6d86a29246dfd2e9b6237f0b5823717f60cad94d47ddc26afa916d21f525/pre_commit-4.5.1.tar.gz", hash = "sha256:eb545fcff725875197837263e977ea257a402056661f09dae08e4b149b030a61", size = 198232, upload-time = "2025-12-16T21:14:33.552Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/22/2de9408ac81acbb8a7d05d4cc064a152ccf33b3d480ebe0cd292153db239/pre_commit-4.6.0.tar.gz", hash = "sha256:718d2208cef53fdc38206e40524a6d4d9576d103eb16f0fec11c875e7716e9d9", size = 198525, upload-time = "2026-04-21T20:31:41.613Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/19/fd3ef348460c80af7bb4669ea7926651d1f95c23ff2df18b9d24bab4f3fa/pre_commit-4.5.1-py2.py3-none-any.whl", hash = "sha256:3b3afd891e97337708c1674210f8eba659b52a38ea5f822ff142d10786221f77", size = 226437, upload-time = "2025-12-16T21:14:32.409Z" }, + { url = "https://files.pythonhosted.org/packages/80/6e/4b28b62ecb6aae56769c34a8ff1d661473ec1e9519e2d5f8b2c150086b26/pre_commit-4.6.0-py2.py3-none-any.whl", hash = "sha256:e2cf246f7299edcabcf15f9b0571fdce06058527f0a06535068a86d38089f29b", size = 226472, upload-time = "2026-04-21T20:31:40.092Z" }, ] [[package]] @@ -4689,45 +4637,45 @@ wheels = [ [[package]] name = "pyarrow" -version = "23.0.1" +version = "24.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/88/22/134986a4cc224d593c1afde5494d18ff629393d74cc2eddb176669f234a4/pyarrow-23.0.1.tar.gz", hash = "sha256:b8c5873e33440b2bc2f4a79d2b47017a89c5a24116c055625e6f2ee50523f019", size = 1167336, upload-time = "2026-02-16T10:14:12.39Z" } +sdist = { url = "https://files.pythonhosted.org/packages/91/13/13e1069b351bdc3881266e11147ffccf687505dbb0ea74036237f5d454a5/pyarrow-24.0.0.tar.gz", hash = "sha256:85fe721a14dd823aca09127acbb06c3ca723efbd436c004f16bca601b04dcc83", size = 1180261, upload-time = "2026-04-21T10:51:25.837Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/4b/4166bb5abbfe6f750fc60ad337c43ecf61340fa52ab386da6e8dbf9e63c4/pyarrow-23.0.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f4b0dbfa124c0bb161f8b5ebb40f1a680b70279aa0c9901d44a2b5a20806039f", size = 34214575, upload-time = "2026-02-16T10:09:56.225Z" }, - { url = "https://files.pythonhosted.org/packages/e1/da/3f941e3734ac8088ea588b53e860baeddac8323ea40ce22e3d0baa865cc9/pyarrow-23.0.1-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:7707d2b6673f7de054e2e83d59f9e805939038eebe1763fe811ee8fa5c0cd1a7", size = 35832540, upload-time = "2026-02-16T10:10:03.428Z" }, - { url = "https://files.pythonhosted.org/packages/88/7c/3d841c366620e906d54430817531b877ba646310296df42ef697308c2705/pyarrow-23.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:86ff03fb9f1a320266e0de855dee4b17da6794c595d207f89bba40d16b5c78b9", size = 44470940, upload-time = "2026-02-16T10:10:10.704Z" }, - { url = "https://files.pythonhosted.org/packages/2c/a5/da83046273d990f256cb79796a190bbf7ec999269705ddc609403f8c6b06/pyarrow-23.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:813d99f31275919c383aab17f0f455a04f5a429c261cc411b1e9a8f5e4aaaa05", size = 47586063, upload-time = "2026-02-16T10:10:17.95Z" }, - { url = "https://files.pythonhosted.org/packages/5b/3c/b7d2ebcff47a514f47f9da1e74b7949138c58cfeb108cdd4ee62f43f0cf3/pyarrow-23.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bf5842f960cddd2ef757d486041d57c96483efc295a8c4a0e20e704cbbf39c67", size = 48173045, upload-time = "2026-02-16T10:10:25.363Z" }, - { url = "https://files.pythonhosted.org/packages/43/b2/b40961262213beaba6acfc88698eb773dfce32ecdf34d19291db94c2bd73/pyarrow-23.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564baf97c858ecc03ec01a41062e8f4698abc3e6e2acd79c01c2e97880a19730", size = 50621741, upload-time = "2026-02-16T10:10:33.477Z" }, - { url = "https://files.pythonhosted.org/packages/f6/70/1fdda42d65b28b078e93d75d371b2185a61da89dda4def8ba6ba41ebdeb4/pyarrow-23.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:07deae7783782ac7250989a7b2ecde9b3c343a643f82e8a4df03d93b633006f0", size = 27620678, upload-time = "2026-02-16T10:10:39.31Z" }, - { url = "https://files.pythonhosted.org/packages/47/10/2cbe4c6f0fb83d2de37249567373d64327a5e4d8db72f486db42875b08f6/pyarrow-23.0.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:6b8fda694640b00e8af3c824f99f789e836720aa8c9379fb435d4c4953a756b8", size = 34210066, upload-time = "2026-02-16T10:10:45.487Z" }, - { url = "https://files.pythonhosted.org/packages/cb/4f/679fa7e84dadbaca7a65f7cdba8d6c83febbd93ca12fa4adf40ba3b6362b/pyarrow-23.0.1-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:8ff51b1addc469b9444b7c6f3548e19dc931b172ab234e995a60aea9f6e6025f", size = 35825526, upload-time = "2026-02-16T10:10:52.266Z" }, - { url = "https://files.pythonhosted.org/packages/f9/63/d2747d930882c9d661e9398eefc54f15696547b8983aaaf11d4a2e8b5426/pyarrow-23.0.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:71c5be5cbf1e1cb6169d2a0980850bccb558ddc9b747b6206435313c47c37677", size = 44473279, upload-time = "2026-02-16T10:11:01.557Z" }, - { url = "https://files.pythonhosted.org/packages/b3/93/10a48b5e238de6d562a411af6467e71e7aedbc9b87f8d3a35f1560ae30fb/pyarrow-23.0.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:9b6f4f17b43bc39d56fec96e53fe89d94bac3eb134137964371b45352d40d0c2", size = 47585798, upload-time = "2026-02-16T10:11:09.401Z" }, - { url = "https://files.pythonhosted.org/packages/5c/20/476943001c54ef078dbf9542280e22741219a184a0632862bca4feccd666/pyarrow-23.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fc13fc6c403d1337acab46a2c4346ca6c9dec5780c3c697cf8abfd5e19b6b37", size = 48179446, upload-time = "2026-02-16T10:11:17.781Z" }, - { url = "https://files.pythonhosted.org/packages/4b/b6/5dd0c47b335fcd8edba9bfab78ad961bd0fd55ebe53468cc393f45e0be60/pyarrow-23.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5c16ed4f53247fa3ffb12a14d236de4213a4415d127fe9cebed33d51671113e2", size = 50623972, upload-time = "2026-02-16T10:11:26.185Z" }, - { url = "https://files.pythonhosted.org/packages/d5/09/a532297c9591a727d67760e2e756b83905dd89adb365a7f6e9c72578bcc1/pyarrow-23.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:cecfb12ef629cf6be0b1887f9f86463b0dd3dc3195ae6224e74006be4736035a", size = 27540749, upload-time = "2026-02-16T10:12:23.297Z" }, - { url = "https://files.pythonhosted.org/packages/a5/8e/38749c4b1303e6ae76b3c80618f84861ae0c55dd3c2273842ea6f8258233/pyarrow-23.0.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:29f7f7419a0e30264ea261fdc0e5fe63ce5a6095003db2945d7cd78df391a7e1", size = 34471544, upload-time = "2026-02-16T10:11:32.535Z" }, - { url = "https://files.pythonhosted.org/packages/a3/73/f237b2bc8c669212f842bcfd842b04fc8d936bfc9d471630569132dc920d/pyarrow-23.0.1-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:33d648dc25b51fd8055c19e4261e813dfc4d2427f068bcecc8b53d01b81b0500", size = 35949911, upload-time = "2026-02-16T10:11:39.813Z" }, - { url = "https://files.pythonhosted.org/packages/0c/86/b912195eee0903b5611bf596833def7d146ab2d301afeb4b722c57ffc966/pyarrow-23.0.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:cd395abf8f91c673dd3589cadc8cc1ee4e8674fa61b2e923c8dd215d9c7d1f41", size = 44520337, upload-time = "2026-02-16T10:11:47.764Z" }, - { url = "https://files.pythonhosted.org/packages/69/c2/f2a717fb824f62d0be952ea724b4f6f9372a17eed6f704b5c9526f12f2f1/pyarrow-23.0.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:00be9576d970c31defb5c32eb72ef585bf600ef6d0a82d5eccaae96639cf9d07", size = 47548944, upload-time = "2026-02-16T10:11:56.607Z" }, - { url = "https://files.pythonhosted.org/packages/84/a7/90007d476b9f0dc308e3bc57b832d004f848fd6c0da601375d20d92d1519/pyarrow-23.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c2139549494445609f35a5cda4eb94e2c9e4d704ce60a095b342f82460c73a83", size = 48236269, upload-time = "2026-02-16T10:12:04.47Z" }, - { url = "https://files.pythonhosted.org/packages/b0/3f/b16fab3e77709856eb6ac328ce35f57a6d4a18462c7ca5186ef31b45e0e0/pyarrow-23.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:7044b442f184d84e2351e5084600f0d7343d6117aabcbc1ac78eb1ae11eb4125", size = 50604794, upload-time = "2026-02-16T10:12:11.797Z" }, - { url = "https://files.pythonhosted.org/packages/e9/a1/22df0620a9fac31d68397a75465c344e83c3dfe521f7612aea33e27ab6c0/pyarrow-23.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a35581e856a2fafa12f3f54fce4331862b1cfb0bef5758347a858a4aa9d6bae8", size = 27660642, upload-time = "2026-02-16T10:12:17.746Z" }, - { url = "https://files.pythonhosted.org/packages/8d/1b/6da9a89583ce7b23ac611f183ae4843cd3a6cf54f079549b0e8c14031e73/pyarrow-23.0.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:5df1161da23636a70838099d4aaa65142777185cc0cdba4037a18cee7d8db9ca", size = 34238755, upload-time = "2026-02-16T10:12:32.819Z" }, - { url = "https://files.pythonhosted.org/packages/ae/b5/d58a241fbe324dbaeb8df07be6af8752c846192d78d2272e551098f74e88/pyarrow-23.0.1-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:fa8e51cb04b9f8c9c5ace6bab63af9a1f88d35c0d6cbf53e8c17c098552285e1", size = 35847826, upload-time = "2026-02-16T10:12:38.949Z" }, - { url = "https://files.pythonhosted.org/packages/54/a5/8cbc83f04aba433ca7b331b38f39e000efd9f0c7ce47128670e737542996/pyarrow-23.0.1-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:0b95a3994f015be13c63148fef8832e8a23938128c185ee951c98908a696e0eb", size = 44536859, upload-time = "2026-02-16T10:12:45.467Z" }, - { url = "https://files.pythonhosted.org/packages/36/2e/c0f017c405fcdc252dbccafbe05e36b0d0eb1ea9a958f081e01c6972927f/pyarrow-23.0.1-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:4982d71350b1a6e5cfe1af742c53dfb759b11ce14141870d05d9e540d13bc5d1", size = 47614443, upload-time = "2026-02-16T10:12:55.525Z" }, - { url = "https://files.pythonhosted.org/packages/af/6b/2314a78057912f5627afa13ba43809d9d653e6630859618b0fd81a4e0759/pyarrow-23.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c250248f1fe266db627921c89b47b7c06fee0489ad95b04d50353537d74d6886", size = 48232991, upload-time = "2026-02-16T10:13:04.729Z" }, - { url = "https://files.pythonhosted.org/packages/40/f2/1bcb1d3be3460832ef3370d621142216e15a2c7c62602a4ea19ec240dd64/pyarrow-23.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5f4763b83c11c16e5f4c15601ba6dfa849e20723b46aa2617cb4bffe8768479f", size = 50645077, upload-time = "2026-02-16T10:13:14.147Z" }, - { url = "https://files.pythonhosted.org/packages/eb/3f/b1da7b61cd66566a4d4c8383d376c606d1c34a906c3f1cb35c479f59d1aa/pyarrow-23.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:3a4c85ef66c134161987c17b147d6bffdca4566f9a4c1d81a0a01cdf08414ea5", size = 28234271, upload-time = "2026-02-16T10:14:09.397Z" }, - { url = "https://files.pythonhosted.org/packages/b5/78/07f67434e910a0f7323269be7bfbf58699bd0c1d080b18a1ab49ba943fe8/pyarrow-23.0.1-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:17cd28e906c18af486a499422740298c52d7c6795344ea5002a7720b4eadf16d", size = 34488692, upload-time = "2026-02-16T10:13:21.541Z" }, - { url = "https://files.pythonhosted.org/packages/50/76/34cf7ae93ece1f740a04910d9f7e80ba166b9b4ab9596a953e9e62b90fe1/pyarrow-23.0.1-cp314-cp314t-macosx_12_0_x86_64.whl", hash = "sha256:76e823d0e86b4fb5e1cf4a58d293036e678b5a4b03539be933d3b31f9406859f", size = 35964383, upload-time = "2026-02-16T10:13:28.63Z" }, - { url = "https://files.pythonhosted.org/packages/46/90/459b827238936d4244214be7c684e1b366a63f8c78c380807ae25ed92199/pyarrow-23.0.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:a62e1899e3078bf65943078b3ad2a6ddcacf2373bc06379aac61b1e548a75814", size = 44538119, upload-time = "2026-02-16T10:13:35.506Z" }, - { url = "https://files.pythonhosted.org/packages/28/a1/93a71ae5881e99d1f9de1d4554a87be37da11cd6b152239fb5bd924fdc64/pyarrow-23.0.1-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:df088e8f640c9fae3b1f495b3c64755c4e719091caf250f3a74d095ddf3c836d", size = 47571199, upload-time = "2026-02-16T10:13:42.504Z" }, - { url = "https://files.pythonhosted.org/packages/88/a3/d2c462d4ef313521eaf2eff04d204ac60775263f1fb08c374b543f79f610/pyarrow-23.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:46718a220d64677c93bc243af1d44b55998255427588e400677d7192671845c7", size = 48259435, upload-time = "2026-02-16T10:13:49.226Z" }, - { url = "https://files.pythonhosted.org/packages/cc/f1/11a544b8c3d38a759eb3fbb022039117fd633e9a7b19e4841cc3da091915/pyarrow-23.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a09f3876e87f48bc2f13583ab551f0379e5dfb83210391e68ace404181a20690", size = 50629149, upload-time = "2026-02-16T10:13:57.238Z" }, - { url = "https://files.pythonhosted.org/packages/50/f2/c0e76a0b451ffdf0cf788932e182758eb7558953f4f27f1aff8e2518b653/pyarrow-23.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:527e8d899f14bd15b740cd5a54ad56b7f98044955373a17179d5956ddb93d9ce", size = 28365807, upload-time = "2026-02-16T10:14:03.892Z" }, + { url = "https://files.pythonhosted.org/packages/b4/a9/9686d9f07837f91f775e8932659192e02c74f9d8920524b480b85212cc68/pyarrow-24.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:6233c9ed9ab9d1db47de57d9753256d9dcffbf42db341576099f0fd9f6bf4810", size = 34981559, upload-time = "2026-04-21T10:47:22.17Z" }, + { url = "https://files.pythonhosted.org/packages/80/b6/0ddf0e9b6ead3474ab087ae598c76b031fc45532bf6a63f3a553440fb258/pyarrow-24.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:f7616236ec1bc2b15bfdec22a71ab38851c86f8f05ff64f379e1278cf20c634a", size = 36663654, upload-time = "2026-04-21T10:47:28.315Z" }, + { url = "https://files.pythonhosted.org/packages/7c/3b/926382efe8ce27ba729071d3566ade6dfb86bdf112f366000196b2f5780a/pyarrow-24.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:1617043b99bd33e5318ae18eb2919af09c71322ef1ca46566cdafc6e6712fb66", size = 45679394, upload-time = "2026-04-21T10:47:34.821Z" }, + { url = "https://files.pythonhosted.org/packages/b3/7a/829f7d9dfd37c207206081d6dad474d81dde29952401f07f2ba507814818/pyarrow-24.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6165461f55ef6314f026de6638d661188e3455d3ec49834556a0ebbdbace18bb", size = 48863122, upload-time = "2026-04-21T10:47:42.056Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e8/f88ce625fe8babaae64e8db2d417c7653adb3019b08aae85c5ed787dc816/pyarrow-24.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3b13dedfe76a0ad2d1d859b0811b53827a4e9d93a0bcb05cf59333ab4980cc7e", size = 49376032, upload-time = "2026-04-21T10:47:48.967Z" }, + { url = "https://files.pythonhosted.org/packages/36/7a/82c363caa145fff88fb475da50d3bf52bb024f61917be5424c3392eaf878/pyarrow-24.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:25ea65d868eb04015cd18e6df2fbe98f07e5bda2abefabcb88fce39a947716f6", size = 51929490, upload-time = "2026-04-21T10:47:55.981Z" }, + { url = "https://files.pythonhosted.org/packages/66/1c/e3e72c8014ad2743ca64a701652c733cc5cbcee15c0463a32a8c55518d9e/pyarrow-24.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:295f0a7f2e242dabd513737cf076007dc5b2d59237e3eca37b05c0c6446f3826", size = 27355660, upload-time = "2026-04-21T10:48:01.718Z" }, + { url = "https://files.pythonhosted.org/packages/6f/d3/a1abf004482026ddc17f4503db227787fa3cfe41ec5091ff20e4fea55e57/pyarrow-24.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:02b001b3ed4723caa44f6cd1af2d5c86aa2cf9971dacc2ffa55b21237713dfba", size = 34976759, upload-time = "2026-04-21T10:48:07.258Z" }, + { url = "https://files.pythonhosted.org/packages/4f/4a/34f0a36d28a2dd32225301b79daad44e243dc1a2bb77d43b60749be255c4/pyarrow-24.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:04920d6a71aabd08a0417709efce97d45ea8e6fb733d9ca9ecffb13c67839f68", size = 36658471, upload-time = "2026-04-21T10:48:13.347Z" }, + { url = "https://files.pythonhosted.org/packages/1f/78/543b94712ae8bb1a6023bcc1acf1a740fbff8286747c289cd9468fced2a5/pyarrow-24.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:a964266397740257f16f7bb2e4f08a0c81454004beab8ff59dd531b73610e9f2", size = 45675981, upload-time = "2026-04-21T10:48:20.201Z" }, + { url = "https://files.pythonhosted.org/packages/84/9f/8fb7c222b100d314137fa40ec050de56cd8c6d957d1cfff685ce72f15b17/pyarrow-24.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6f066b179d68c413374294bc1735f68475457c933258df594443bb9d88ddc2a0", size = 48859172, upload-time = "2026-04-21T10:48:27.541Z" }, + { url = "https://files.pythonhosted.org/packages/a7/d3/1ea72538e6c8b3b475ed78d1049a2c518e655761ea50fe1171fc855fcab7/pyarrow-24.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1183baeb14c5f587b1ec52831e665718ce632caab84b7cd6b85fd44f96114495", size = 49385733, upload-time = "2026-04-21T10:48:34.7Z" }, + { url = "https://files.pythonhosted.org/packages/c3/be/c3d8b06a1ba35f2260f8e1f771abbee7d5e345c0937aab90675706b1690a/pyarrow-24.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:806f24b4085453c197a5078218d1ee08783ebbba271badd153d1ae22a3ee804f", size = 51934335, upload-time = "2026-04-21T10:48:42.099Z" }, + { url = "https://files.pythonhosted.org/packages/9c/62/89e07a1e7329d2cde3e3c6994ba0839a24977a2beda8be6005ea3d860b99/pyarrow-24.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:e4505fc6583f7b05ab854934896bcac8253b04ac1171a77dfb73efef92076d91", size = 27271748, upload-time = "2026-04-21T10:49:42.532Z" }, + { url = "https://files.pythonhosted.org/packages/17/1a/cff3a59f80b5b1658549d46611b67163f65e0664431c076ad728bf9d5af4/pyarrow-24.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:1a4e45017efbf115032e4475ee876d525e0e36c742214fbe405332480ecd6275", size = 35238554, upload-time = "2026-04-21T10:48:48.526Z" }, + { url = "https://files.pythonhosted.org/packages/a8/99/cce0f42a327bfef2c420fb6078a3eb834826e5d6697bf3009fe11d2ad051/pyarrow-24.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:7986f1fa71cee060ad00758bcc79d3a93bab8559bf978fab9e53472a2e25a17b", size = 36782301, upload-time = "2026-04-21T10:48:55.181Z" }, + { url = "https://files.pythonhosted.org/packages/2a/66/8e560d5ff6793ca29aca213c53eec0dd482dd46cb93b2819e5aab52e4252/pyarrow-24.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:d3e0b61e8efb24ed38898e5cdc5fffa9124be480008d401a1f8071500494ae42", size = 45721929, upload-time = "2026-04-21T10:49:03.676Z" }, + { url = "https://files.pythonhosted.org/packages/27/0c/a26e25505d030716e078d9f16eb74973cbf0b33b672884e9f9da1c83b871/pyarrow-24.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:55a3bc1e3df3b5567b7d27ef551b2283f0c68a5e86f1cd56abc569da4f31335b", size = 48825365, upload-time = "2026-04-21T10:49:11.714Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/771f9ecb0c65e73fe9dccdd1717901b9594f08c4515d000c7c62df573811/pyarrow-24.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:641f795b361874ac9da5294f8f443dfdbee355cf2bd9e3b8d97aaac2306b9b37", size = 49451819, upload-time = "2026-04-21T10:49:21.474Z" }, + { url = "https://files.pythonhosted.org/packages/48/da/61ae89a88732f5a785646f3ec6125dbb640fa98a540eb2b9889caa561403/pyarrow-24.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8adc8e6ce5fccf5dc707046ae4914fd537def529709cc0d285d37a7f9cd442ca", size = 51909252, upload-time = "2026-04-21T10:49:31.164Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1a/8dd5cafab7b66573fa91c03d06d213356ad4edd71813aa75e08ce2b3a844/pyarrow-24.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:9b18371ad2f44044b81a8d23bc2d8a9b6a6226dca775e8e16cfee640473d6c5d", size = 27388127, upload-time = "2026-04-21T10:49:37.334Z" }, + { url = "https://files.pythonhosted.org/packages/ad/80/d022a34ff05d2cbedd8ccf841fc1f532ecfa9eb5ed1711b56d0e0ea71fc9/pyarrow-24.0.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:1cc9057f0319e26333b357e17f3c2c022f1a83739b48a88b25bfd5fa2dc18838", size = 35007997, upload-time = "2026-04-21T10:49:48.796Z" }, + { url = "https://files.pythonhosted.org/packages/1a/ff/f01485fda6f4e5d441afb8dd5e7681e4db18826c1e271852f5d3957d6a80/pyarrow-24.0.0-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:e6f1278ee4785b6db21229374a1c9e54ec7c549de5d1efc9630b6207de7e170b", size = 36678720, upload-time = "2026-04-21T10:49:55.858Z" }, + { url = "https://files.pythonhosted.org/packages/9e/c2/2d2d5fea814237923f71b36495211f20b43a1576f9a4d6da7e751a64ec6f/pyarrow-24.0.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:adbbedc55506cbdabb830890444fb856bfb0060c46c6f8026c6c2f2cf86ae795", size = 45741852, upload-time = "2026-04-21T10:50:04.624Z" }, + { url = "https://files.pythonhosted.org/packages/8e/3a/28ba9c1c1ebdbb5f1b94dfebb46f207e52e6a554b7fe4132540fde29a3a0/pyarrow-24.0.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:ae8a1145af31d903fa9bb166824d7abe9b4681a000b0159c9fb99c11bc11ad26", size = 48889852, upload-time = "2026-04-21T10:50:12.293Z" }, + { url = "https://files.pythonhosted.org/packages/df/51/4a389acfd31dca009f8fb82d7f510bb4130f2b3a8e18cf00194d0687d8ac/pyarrow-24.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d7027eba1df3b2069e2e8d80f644fa0918b68c46432af3d088ddd390d063ecde", size = 49445207, upload-time = "2026-04-21T10:50:20.677Z" }, + { url = "https://files.pythonhosted.org/packages/19/4b/0bab2b23d2ae901b1b9a03c0efd4b2d070256f8ce3fc43f6e58c167b2081/pyarrow-24.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e56a1ffe9bf7b727432b89104cc0849c21582949dd7bdcb34f17b2001a351a76", size = 51954117, upload-time = "2026-04-21T10:50:29.14Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/f4e9145da0417b3d2c12035a8492b35ff4a3dbc653e614fcfb51d9dedb38/pyarrow-24.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:38be1808cdd068605b787e6ca9119b27eb275a0234e50212c3492331680c3b1e", size = 28001155, upload-time = "2026-04-21T10:51:22.337Z" }, + { url = "https://files.pythonhosted.org/packages/79/4f/46a49a63f43526da895b1a45bbb51d5baf8e4d77159f8528fc3e5490007f/pyarrow-24.0.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:418e48ce50a45a6a6c73c454677203a9c75c966cb1e92ca3370959185f197a05", size = 35250387, upload-time = "2026-04-21T10:50:35.552Z" }, + { url = "https://files.pythonhosted.org/packages/a0/da/d5e0cd5ef00796922404806d5f00325cdadc3441ce2c13fe7115f2df9a64/pyarrow-24.0.0-cp314-cp314t-macosx_12_0_x86_64.whl", hash = "sha256:2f16197705a230a78270cdd4ea8a1d57e86b2fdcbc34a1f6aebc72e65c986f9a", size = 36797102, upload-time = "2026-04-21T10:50:42.417Z" }, + { url = "https://files.pythonhosted.org/packages/34/c7/5904145b0a593a05236c882933d439b5720f0a145381179063722fbfc123/pyarrow-24.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:fb24ac194bfc5e86839d7dcd52092ee31e5fe6733fe11f5e3b06ef0812b20072", size = 45745118, upload-time = "2026-04-21T10:50:49.324Z" }, + { url = "https://files.pythonhosted.org/packages/13/d3/cca42fe166d1c6e4d5b80e530b7949104d10e17508a90ae202dac205ce2a/pyarrow-24.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:9700ebd9a51f5895ce75ff4ac4b3c47a7d4b42bc618be8e713e5d56bacf5f931", size = 48844765, upload-time = "2026-04-21T10:50:55.579Z" }, + { url = "https://files.pythonhosted.org/packages/b0/49/942c3b79878ba928324d1e17c274ed84581db8c0a749b24bcf4cbdf15bd3/pyarrow-24.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d8ddd2768da81d3ee08cfea9b597f4abb4e8e1dc8ae7e204b608d23a0d3ab699", size = 49471890, upload-time = "2026-04-21T10:51:02.439Z" }, + { url = "https://files.pythonhosted.org/packages/76/97/ff71431000a75d84135a1ace5ca4ba11726a231a8007bbb320a4c54075d5/pyarrow-24.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:61a3d7eaa97a14768b542f3d284dc6400dd2470d9f080708b13cd46b6ae18136", size = 51932250, upload-time = "2026-04-21T10:51:10.576Z" }, + { url = "https://files.pythonhosted.org/packages/51/be/6f79d55816d5c22557cf27533543d5d70dfe692adfbee4b99f2760674f38/pyarrow-24.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:c91d00057f23b8d353039520dc3a6c09d8608164c692e9f59a175a42b2ae0c19", size = 28131282, upload-time = "2026-04-21T10:51:16.815Z" }, ] [[package]] @@ -4741,7 +4689,7 @@ wheels = [ [[package]] name = "pydantic" -version = "2.13.2" +version = "2.13.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-types" }, @@ -4749,84 +4697,84 @@ dependencies = [ { name = "typing-extensions" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/09/e5/06d23afac9973109d1e3c8ad38e1547a12e860610e327c05ee686827dc37/pydantic-2.13.2.tar.gz", hash = "sha256:b418196607e61081c3226dcd4f0672f2a194828abb9109e9cfb84026564df2d1", size = 843836, upload-time = "2026-04-17T09:31:59.636Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d9/e4/40d09941a2cebcb20609b86a559817d5b9291c49dd6f8c87e5feffbe703a/pydantic-2.13.3.tar.gz", hash = "sha256:af09e9d1d09f4e7fe37145c1f577e1d61ceb9a41924bf0094a36506285d0a84d", size = 844068, upload-time = "2026-04-20T14:46:43.632Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/ca/b45c378e6e8d0b90577288b533e04e95b7afd61bb1d51b6c263176435489/pydantic-2.13.2-py3-none-any.whl", hash = "sha256:a525087f4c03d7e7456a3de89b64cd693d2229933bb1068b9af6befd5563694e", size = 471947, upload-time = "2026-04-17T09:31:57.541Z" }, + { url = "https://files.pythonhosted.org/packages/f3/0a/fd7d723f8f8153418fb40cf9c940e82004fce7e987026b08a68a36dd3fe7/pydantic-2.13.3-py3-none-any.whl", hash = "sha256:6db14ac8dfc9a1e57f87ea2c0de670c251240f43cb0c30a5130e9720dc612927", size = 471981, upload-time = "2026-04-20T14:46:41.402Z" }, ] [[package]] name = "pydantic-core" -version = "2.46.2" +version = "2.46.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/43/bb/4742f05b739b2478459bb16fa8470549518c802e06ddcf3f106c5081315e/pydantic_core-2.46.2.tar.gz", hash = "sha256:37bb079f9ee3f1a519392b73fda2a96379b31f2013c6b467fe693e7f2987f596", size = 471269, upload-time = "2026-04-17T09:10:07.017Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/ef/f7abb56c49382a246fd2ce9c799691e3c3e7175ec74b14d99e798bcddb1a/pydantic_core-2.46.3.tar.gz", hash = "sha256:41c178f65b8c29807239d47e6050262eb6bf84eb695e41101e62e38df4a5bc2c", size = 471412, upload-time = "2026-04-20T14:40:56.672Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/97/ec/2fafa4c86f5d2a69372c7cddef30925fd0e370b1efaf556609c1a0196d8a/pydantic_core-2.46.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ea1ad8c89da31512fe2d249cf0638fb666925bda341901541bc5f3311c6fcc9e", size = 2101729, upload-time = "2026-04-17T09:12:30.042Z" }, - { url = "https://files.pythonhosted.org/packages/cf/55/be5386c2c4b49af346e8a26b748194ff25757bbb6cf544130854e997af7a/pydantic_core-2.46.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b308da17b92481e0587244631c5529e5d91d04cb2b08194825627b1eca28e21e", size = 1951546, upload-time = "2026-04-17T09:10:10.585Z" }, - { url = "https://files.pythonhosted.org/packages/29/92/89e273a055ce440e6636c756379af35ad86da9d336a560049c3ba5e41c80/pydantic_core-2.46.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d333a50bdd814a917d8d6a7ee35ba2395d53ddaa882613bc24e54a9d8b129095", size = 1976178, upload-time = "2026-04-17T09:11:49.619Z" }, - { url = "https://files.pythonhosted.org/packages/91/b3/e4664469cf70c0cb0f7b2f5719d64e5968bb6f38217042c2afa3d3c4ba17/pydantic_core-2.46.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d00b99590c5bd1fabbc5d28b170923e32c1b1071b1f1de1851a4d14d89eb192", size = 2051697, upload-time = "2026-04-17T09:12:04.917Z" }, - { url = "https://files.pythonhosted.org/packages/98/58/dbf68213ee06ce51cdd6d8c95f97980e646858c45bd96bd2dfb40433be73/pydantic_core-2.46.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f0e686960ffe9e65066395af856ac2d52c159043144433602c50c221d81c1ba", size = 2233160, upload-time = "2026-04-17T09:12:00.956Z" }, - { url = "https://files.pythonhosted.org/packages/f5/d3/68092aa0ee6c60ff4de4740eb82db3d4ce338ec89b3cecb978c532472f12/pydantic_core-2.46.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d1128da41c9cb474e0a4701f9c363ec645c9d1a02229904c76bf4e0a194fde2", size = 2298398, upload-time = "2026-04-17T09:10:29.694Z" }, - { url = "https://files.pythonhosted.org/packages/e4/51/5d6155eb737db55b0ad354ca5f333ef009f75feb67df2d79a84bace45af6/pydantic_core-2.46.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48649cf2d8c358d79586e9fb2f8235902fcaa2d969ec1c5301f2d1873b2f8321", size = 2094058, upload-time = "2026-04-17T09:12:10.995Z" }, - { url = "https://files.pythonhosted.org/packages/6b/f3/eb4a986197d71319430464ff181226c95adc8f06d932189b158bae5a82f5/pydantic_core-2.46.2-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:b902f0fc7c2cf503865a05718b68147c6cd5d0a3867af38c527be574a9fa6e9d", size = 2130388, upload-time = "2026-04-17T09:12:41.159Z" }, - { url = "https://files.pythonhosted.org/packages/56/00/44a9c4fe6d0f64b5786d6a8c649d6f0e34ba6c89b3663add1066e54451a2/pydantic_core-2.46.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e80011f808b03d1d87a8f1e76ae3da19a18eb706c823e17981dcf1fae43744fc", size = 2184245, upload-time = "2026-04-17T09:12:36.532Z" }, - { url = "https://files.pythonhosted.org/packages/78/6b/685b98a834d5e3d1c34a1bde1627525559dd223b75075bc7490cdb24eb33/pydantic_core-2.46.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b839d5c802e31348b949b6473f8190cddbf7d47475856d8ac995a373ee16ec59", size = 2186842, upload-time = "2026-04-17T09:13:04.054Z" }, - { url = "https://files.pythonhosted.org/packages/22/64/caa2f5a2ac8b6113adaa410ccdf31ba7f54897a6e54cd0d726fc7e780c88/pydantic_core-2.46.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:c6b1064f3f9cf9072e1d59dd2936f9f3b668bec1c37039708c9222db703c0d5b", size = 2336066, upload-time = "2026-04-17T09:12:13.006Z" }, - { url = "https://files.pythonhosted.org/packages/ee/f9/7d2701bf82945b5b9e7df8347be97ef6a36da2846bfe5b4afec299ffe27b/pydantic_core-2.46.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:37a68e6f2ac95578ce3c0564802404b27b24988649616e556c07e77111ed3f1d", size = 2363691, upload-time = "2026-04-17T09:13:42.972Z" }, - { url = "https://files.pythonhosted.org/packages/3b/65/0dab11574101522941055109419db3cc09db871643dc3fc74e2413215e5b/pydantic_core-2.46.2-cp312-cp312-win32.whl", hash = "sha256:d9ffa75a7ef4b97d6e5e205fabd4304ef01fec09e6f1bdde04b9ad1b07d20289", size = 1958801, upload-time = "2026-04-17T09:11:31.981Z" }, - { url = "https://files.pythonhosted.org/packages/13/2b/df84baa609c676f6450b8ecad44ea59146c805e3371b7b52443c0899f989/pydantic_core-2.46.2-cp312-cp312-win_amd64.whl", hash = "sha256:0551f2d2ddb68af5a00e26497f8025c538f73ef3cb698f8e5a487042cd2792a8", size = 2072634, upload-time = "2026-04-17T09:11:02.407Z" }, - { url = "https://files.pythonhosted.org/packages/d1/4e/e1ce8029fc438086a946739bf9d596f70ff470aad4a8345555920618cabe/pydantic_core-2.46.2-cp312-cp312-win_arm64.whl", hash = "sha256:83aef30f106edcc21a6a4cc44b82d3169a1dbe255508db788e778f3c804d3583", size = 2026188, upload-time = "2026-04-17T09:13:11.083Z" }, - { url = "https://files.pythonhosted.org/packages/07/2b/662e48254479a2d3450ba24b1e25061108b64339794232f503990c519144/pydantic_core-2.46.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:d26e9eea3715008a09a74585fe9becd0c67fbb145dc4df9756d597d7230a652c", size = 2101762, upload-time = "2026-04-17T09:10:13.87Z" }, - { url = "https://files.pythonhosted.org/packages/73/ab/bafd7c7503757ccc8ec4d1911e106fe474c629443648c51a88f08b0fe91a/pydantic_core-2.46.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:48b36e3235140510dc7861f0cd58b714b1cdd3d48f75e10ce52e69866b746f10", size = 1951814, upload-time = "2026-04-17T09:12:25.934Z" }, - { url = "https://files.pythonhosted.org/packages/92/cc/7549c2d57ba2e9a42caa5861a2d398dbe31c02c6aca783253ace59ce84f8/pydantic_core-2.46.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36b1f99dc451f1a3981f236151465bcf995bbe712d0727c9f7b236fe228a8133", size = 1977329, upload-time = "2026-04-17T09:13:37.605Z" }, - { url = "https://files.pythonhosted.org/packages/18/50/7ed4a8a0d478a4dca8f0134a5efa7193f03cc8520dd4c9509339fb2e5002/pydantic_core-2.46.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8641c8d535c2d95b45c2e19b646ecd23ebba35d461e0ae48a3498277006250ab", size = 2051832, upload-time = "2026-04-17T09:12:49.771Z" }, - { url = "https://files.pythonhosted.org/packages/dc/16/bb35b193741c0298ddc5f5e4234269efdc0c65e2bcd198aa0de9b68845e4/pydantic_core-2.46.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:20fb194788a0a50993e87013e693494ba183a2af5b44e99cf060bbae10912b11", size = 2233127, upload-time = "2026-04-17T09:11:04.449Z" }, - { url = "https://files.pythonhosted.org/packages/91/a5/98f4b637149185addea19e1785ea20c373cca31b202f589111d8209d9873/pydantic_core-2.46.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9262d11d0cd11ee3303a95156939402bed6cedfe5ed0e331b95a283a4da6eb8b", size = 2297418, upload-time = "2026-04-17T09:11:25.929Z" }, - { url = "https://files.pythonhosted.org/packages/36/90/93a5d21990b152da7b7507b7fddb0b935f6a0984d57ac3ec45a6e17777a2/pydantic_core-2.46.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac204542736aa295fa25f713b7fad6fc50b46ab7764d16087575c85f085174f3", size = 2093735, upload-time = "2026-04-17T09:12:06.908Z" }, - { url = "https://files.pythonhosted.org/packages/14/22/b8b1ffdddf08b4e84380bcb67f41dbbf4c171377c1d36fc6290794bb2094/pydantic_core-2.46.2-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9a7c43a0584742dface3ca0daf6f719d46c1ac2f87cf080050f9ae052c75e1b2", size = 2127570, upload-time = "2026-04-17T09:11:53.906Z" }, - { url = "https://files.pythonhosted.org/packages/c6/26/e60d72b4e2d0ce1fa811044a974412ac1c567fe067d97b3e6b290530786e/pydantic_core-2.46.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fd05e1edb6a90ad446fa268ab09e59202766b837597b714b2492db11ee87fab9", size = 2183524, upload-time = "2026-04-17T09:11:30.092Z" }, - { url = "https://files.pythonhosted.org/packages/35/32/36bec7584a1eefb17dec4dfa1c946d3fe4440f466c5705b8adfda69c9a9f/pydantic_core-2.46.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:91155b110788b5501abc7ea954f1d08606219e4e28e3c73a94124307c06efb80", size = 2185408, upload-time = "2026-04-17T09:10:57.228Z" }, - { url = "https://files.pythonhosted.org/packages/fc/d6/1a5689d873620efd67d6b163db0c444c056adb0849b5bc33e2b9f09665a6/pydantic_core-2.46.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:e4e2c72a529fa03ff228be1d2b76944013f428220b764e03cc50ada67e17a42c", size = 2335171, upload-time = "2026-04-17T09:11:43.369Z" }, - { url = "https://files.pythonhosted.org/packages/3e/8e/675104802abe8ef502b072050ee5f2e915251aa1a3af87e1015ce31ec42d/pydantic_core-2.46.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:56291ec1a11c3499890c99a8fd9053b47e60fe837a77ec72c0671b1b8b3dce24", size = 2362743, upload-time = "2026-04-17T09:10:18.333Z" }, - { url = "https://files.pythonhosted.org/packages/8d/bc/86c5dde4fa6e24467680eef5047da3c1a19be0a527d0d8e14aa76b39307c/pydantic_core-2.46.2-cp313-cp313-win32.whl", hash = "sha256:b50f9c5f826ddca1246f055148df939f5f3f2d0d96db73de28e2233f22210d4c", size = 1958074, upload-time = "2026-04-17T09:12:38.622Z" }, - { url = "https://files.pythonhosted.org/packages/2a/97/2537e8c1282b2c4eb062580c0d7a4339e10b072b803d1ee0b7f1f0a5c22c/pydantic_core-2.46.2-cp313-cp313-win_amd64.whl", hash = "sha256:251a57788823230ca8cbc99e6245d1a2ed6e180ec4864f251c94182c580c7f2e", size = 2071741, upload-time = "2026-04-17T09:13:32.405Z" }, - { url = "https://files.pythonhosted.org/packages/da/aa/2ee75798706f9dbc4e76dbe59e41a396c5c311e3d6223b9cf6a5fa7780be/pydantic_core-2.46.2-cp313-cp313-win_arm64.whl", hash = "sha256:315d32d1a71494d6b4e1e14a9fa7a4329597b4c4340088ad7e1a9dafbeed92a9", size = 2025955, upload-time = "2026-04-17T09:10:15.567Z" }, - { url = "https://files.pythonhosted.org/packages/d0/96/a50ccb6b539ae780f73cea74905468777680e30c6c3bdf714b9d4c116ea0/pydantic_core-2.46.2-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:4f59b45f3ef8650c0c736a57f59031d47ed9df4c0a64e83796849d7d14863a2d", size = 2097111, upload-time = "2026-04-17T09:10:49.617Z" }, - { url = "https://files.pythonhosted.org/packages/34/5f/fdead7b3afa822ab6e5a18ee0ecffd54937de1877c01ed13a342e0fb3f07/pydantic_core-2.46.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3a075a29ebef752784a91532a1a85be6b234ccffec0a9d7978a92696387c3da6", size = 1951904, upload-time = "2026-04-17T09:12:32.062Z" }, - { url = "https://files.pythonhosted.org/packages/95/e0/1c5d547e550cdab1bec737492aa08865337af6fe7fc9b96f7f45f17d9519/pydantic_core-2.46.2-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d12d786e30c04a9d307c5d7080bf720d9bac7f1668191d8e37633a9562749e2", size = 1978667, upload-time = "2026-04-17T09:11:35.589Z" }, - { url = "https://files.pythonhosted.org/packages/0e/cb/665ce629e218c8228302cb94beff4f6531082a2c87d3ecc3d5e63a26f392/pydantic_core-2.46.2-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0d5e6d6343b0b5dcacb3503b5de90022968da8ed0ab9ab39d3eda71c20cbf84e", size = 2046721, upload-time = "2026-04-17T09:11:47.725Z" }, - { url = "https://files.pythonhosted.org/packages/77/e9/6cb2cf60f54c1472bbdfce19d957553b43dbba79d1d7b2930a195c594785/pydantic_core-2.46.2-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:233eebac0999b6b9ba76eb56f3ec8fce13164aa16b6d2225a36a79e0f95b5973", size = 2228483, upload-time = "2026-04-17T09:12:08.837Z" }, - { url = "https://files.pythonhosted.org/packages/0d/2a/93e018dd5571f781ebaeda8c0cf65398489d5bee9b1f484df0b6149b43b9/pydantic_core-2.46.2-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cc0eee720dd2f14f3b7c349469402b99ad81a174ab49d3533974529e9d93992", size = 2294663, upload-time = "2026-04-17T09:12:52.053Z" }, - { url = "https://files.pythonhosted.org/packages/5e/4f/49e57ca55c770c93d9bb046666a54949b42e3c9099a0c5fe94557873fe30/pydantic_core-2.46.2-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83ee76bf2c9910513dbc19e7d82367131fa7508dedd6186a462393071cc11059", size = 2098742, upload-time = "2026-04-17T09:13:45.472Z" }, - { url = "https://files.pythonhosted.org/packages/c6/b0/6e46b5cd3332af665f794b8cdeea206618a8630bd9e7bcc36864518fce81/pydantic_core-2.46.2-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:d61db38eb4ee5192f0c261b7f2d38e420b554df8912245e3546aee5c45e2fd78", size = 2125922, upload-time = "2026-04-17T09:12:54.304Z" }, - { url = "https://files.pythonhosted.org/packages/06/d1/40850c81585be443a2abfdf7f795f8fae831baf8e2f9b2133c8246ac671c/pydantic_core-2.46.2-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8f09a713d17bcd55da8ab02ebd9110c5246a49c44182af213b5212800af8bc83", size = 2183000, upload-time = "2026-04-17T09:10:59.027Z" }, - { url = "https://files.pythonhosted.org/packages/04/af/8493d7dfa03ebb7866909e577c6aa65ea0de7377b86023cc51d0c8e11db3/pydantic_core-2.46.2-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:30cacc5fb696e64b8ef6fd31d9549d394dd7d52760db072eecb98e37e3af1677", size = 2180335, upload-time = "2026-04-17T09:12:57.01Z" }, - { url = "https://files.pythonhosted.org/packages/72/5b/1f6a344c4ffdf284da41c6067b82d5ebcbd11ce1b515ae4b662d4adb6f61/pydantic_core-2.46.2-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:7ccfb105fcfe91a22bbb5563ad3dc124bc1aa75bfd2e53a780ab05f78cdf6108", size = 2330002, upload-time = "2026-04-17T09:12:02.958Z" }, - { url = "https://files.pythonhosted.org/packages/25/ff/9a694126c12d6d2f48a0cafa6f8eef88ef0d8825600e18d03ff2e896c3b2/pydantic_core-2.46.2-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:13ffef637dc8370c249e5b26bd18e9a80a4fca3d809618c44e18ec834a7ca7a8", size = 2359920, upload-time = "2026-04-17T09:10:27.764Z" }, - { url = "https://files.pythonhosted.org/packages/51/c8/3a35c763d68a9cb2675eb10ef242cf66c5d4701b28ae12e688d67d2c180e/pydantic_core-2.46.2-cp314-cp314-win32.whl", hash = "sha256:1b0ab6d756ca2704a938e6c31b53f290c2f9c10d3914235410302a149de1a83e", size = 1953701, upload-time = "2026-04-17T09:13:30.021Z" }, - { url = "https://files.pythonhosted.org/packages/1a/6a/f2726a780365f7dfd89d62036f984f7acb99978c60c5e1fa7c0cb898ed11/pydantic_core-2.46.2-cp314-cp314-win_amd64.whl", hash = "sha256:99ebade8c9ada4df975372d8dd25883daa0e379a05f1cd0c99aa0c04368d01a6", size = 2071867, upload-time = "2026-04-17T09:10:39.205Z" }, - { url = "https://files.pythonhosted.org/packages/e1/79/76baacb9feba3d7c399b245ca1a29c74ea0db04ea693811374827eec2290/pydantic_core-2.46.2-cp314-cp314-win_arm64.whl", hash = "sha256:de87422197cf7f83db91d89c86a21660d749b3cd76cd8a45d115b8e675670f02", size = 2017252, upload-time = "2026-04-17T09:10:26.175Z" }, - { url = "https://files.pythonhosted.org/packages/f1/3b/77c26938f817668d9ad9bab1a905cb23f11d9a3d4bf724d429b3e55a8eaf/pydantic_core-2.46.2-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:236f22b4a206b5b61db955396b7cf9e2e1ff77f372efe9570128ccfcd6a525eb", size = 2094545, upload-time = "2026-04-17T09:12:19.339Z" }, - { url = "https://files.pythonhosted.org/packages/fe/de/42c13f590e3c260966aa49bcdb1674774f975467c49abd51191e502bea28/pydantic_core-2.46.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c2012f64d2cd7cca50f49f22445aa5a88691ac2b4498ee0a9a977f8ca4f7289f", size = 1933953, upload-time = "2026-04-17T09:09:55.889Z" }, - { url = "https://files.pythonhosted.org/packages/4e/84/ebe3ebb3e2d8db656937cfa6f97f544cb7132f2307a4a7dfdcd0ea102a12/pydantic_core-2.46.2-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d07d6c63106d3a9c9a333e2636f9c82c703b1a9e3b079299e58747964e4fdb72", size = 1974435, upload-time = "2026-04-17T09:10:12.371Z" }, - { url = "https://files.pythonhosted.org/packages/b9/15/0bf51ca6709477cd4ef86148b6d7844f3308f029eac361dd0383f1e17b1a/pydantic_core-2.46.2-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c326a2b4b85e959d9a1fc3a11f32f84611b6ec07c053e1828a860edf8d068208", size = 2031113, upload-time = "2026-04-17T09:10:00.752Z" }, - { url = "https://files.pythonhosted.org/packages/02/ae/b7b5af9b79db036d9e61a44c481c17a213dc8fc4b8b71fe6875a72fc778b/pydantic_core-2.46.2-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac8a65e798f2462552c00d2e013d532c94d646729dda98458beaf51f9ec7b120", size = 2236325, upload-time = "2026-04-17T09:10:33.227Z" }, - { url = "https://files.pythonhosted.org/packages/a6/ae/ecef7477b5a03d4a499708f7e75d2836452ebb70b776c2d64612b334f57a/pydantic_core-2.46.2-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a3c2bc1cc8164bedbc160b7bb1e8cc1e8b9c27f69ae4f9ae2b976cdae02b2dd", size = 2278135, upload-time = "2026-04-17T09:10:23.287Z" }, - { url = "https://files.pythonhosted.org/packages/db/e4/2f9d82faa47af6c39fc3f120145fd915971e1e0cb6b55b494fad9fdf8275/pydantic_core-2.46.2-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e69aa5e10b7e8b1bb4a6888650fd12fcbf11d396ca11d4a44de1450875702830", size = 2109071, upload-time = "2026-04-17T09:11:06.149Z" }, - { url = "https://files.pythonhosted.org/packages/f1/9c/677cf10873fbd0b116575ab7b97c90482b21564f8a8040beb18edef7a577/pydantic_core-2.46.2-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:4e6df5c3301e65fb42bc5338bf9a1027a02b0a31dc7f54c33775229af474daf0", size = 2106028, upload-time = "2026-04-17T09:10:51.525Z" }, - { url = "https://files.pythonhosted.org/packages/d6/53/6a06183544daba51c059123a2064a99039df25f115a06bdb26f2ea177038/pydantic_core-2.46.2-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c2f6e32548ac8d559b47944effcf8ae4d81c161f6b6c885edc53bc08b8f192d", size = 2164816, upload-time = "2026-04-17T09:11:56.187Z" }, - { url = "https://files.pythonhosted.org/packages/57/6f/10fcdd9e3eca66fc828eef0f6f5850f2dd3bca2c59e6e041fb8bc3da39be/pydantic_core-2.46.2-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:b089a81c58e6ea0485562bbbbbca4f65c0549521606d5ef27fba217aac9b665a", size = 2166130, upload-time = "2026-04-17T09:10:03.804Z" }, - { url = "https://files.pythonhosted.org/packages/29/83/92d3fd0e0156cad2e3cb5c26de73794af78ac9fa0c22ab666e566dd67061/pydantic_core-2.46.2-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:7f700a6d6f64112ae9193709b84303bbab84424ad4b47d0253301aabce9dfc70", size = 2316605, upload-time = "2026-04-17T09:12:45.249Z" }, - { url = "https://files.pythonhosted.org/packages/97/f1/facffdb970981068219582e499b8d0871ed163ffcc6b347de5c412669e4c/pydantic_core-2.46.2-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:67db6814beaa5fefe91101ec7eb9efda613795767be96f7cf58b1ca8c9ca9972", size = 2358385, upload-time = "2026-04-17T09:09:54.657Z" }, - { url = "https://files.pythonhosted.org/packages/8b/a1/b8160b2f22b2199467bc68581a4ed380643c16b348a27d6165c6c242d694/pydantic_core-2.46.2-cp314-cp314t-win32.whl", hash = "sha256:32fbc7447be8e3be99bf7869f7066308f16be55b61f9882c2cefc7931f5c7664", size = 1942373, upload-time = "2026-04-17T09:12:59.594Z" }, - { url = "https://files.pythonhosted.org/packages/0d/90/db89acabe5b150e11d1b59fe3d947dda2ef6abbfef5c82f056ff63802f5d/pydantic_core-2.46.2-cp314-cp314t-win_amd64.whl", hash = "sha256:b317a2b97019c0b95ce99f4f901ae383f40132da6706cdf1731066a73394c25c", size = 2052078, upload-time = "2026-04-17T09:10:19.96Z" }, - { url = "https://files.pythonhosted.org/packages/97/32/e19b83ceb07a3f1bb21798407790bbc9a31740158fd132b94139cb84e16c/pydantic_core-2.46.2-cp314-cp314t-win_arm64.whl", hash = "sha256:7dcb9d40930dfad7ab6b20bcc6ca9d2b030b0f347a0cd9909b54bd53ead521b1", size = 2016941, upload-time = "2026-04-17T09:12:34.447Z" }, - { url = "https://files.pythonhosted.org/packages/f3/d2/66c146f421178641bda880b0267c0d57dd84f5fec9ecc8e46be17b480742/pydantic_core-2.46.2-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e9fcabd1857492b5bf16f90258babde50f618f55d046b1309972da2396321ff9", size = 2091621, upload-time = "2026-04-17T09:12:47.501Z" }, - { url = "https://files.pythonhosted.org/packages/ee/b2/c28419aa9fc8055f4ac8e801d1d11c6357351bfa4321ed9bafab3eb98087/pydantic_core-2.46.2-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:fb3ec2c7f54c07b30d89983ce78dc32c37dd06a972448b8716d609493802d628", size = 1937059, upload-time = "2026-04-17T09:10:53.554Z" }, - { url = "https://files.pythonhosted.org/packages/30/ce/cd0824a2db213dc17113291b7a09b9b0ccd9fbf97daa4b81548703341baf/pydantic_core-2.46.2-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130a6c837d819ef33e8c2bf702ed2c3429237ea69807f1140943d6f4bdaf52fa", size = 1997278, upload-time = "2026-04-17T09:12:23.784Z" }, - { url = "https://files.pythonhosted.org/packages/c9/69/47283fe3c0c967d3e9e9cd6c42b70907610c8a6f8d6e8381f1bb55f8006c/pydantic_core-2.46.2-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2e25417cec5cd9bddb151e33cb08c50160f317479ecc02b22a95ec18f8fe004", size = 2147096, upload-time = "2026-04-17T09:12:43.124Z" }, + { url = "https://files.pythonhosted.org/packages/4b/cb/5b47425556ecc1f3fe18ed2a0083188aa46e1dd812b06e406475b3a5d536/pydantic_core-2.46.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b11b59b3eee90a80a36701ddb4576d9ae31f93f05cb9e277ceaa09e6bf074a67", size = 2101946, upload-time = "2026-04-20T14:40:52.581Z" }, + { url = "https://files.pythonhosted.org/packages/a1/4f/2fb62c2267cae99b815bbf4a7b9283812c88ca3153ef29f7707200f1d4e5/pydantic_core-2.46.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:af8653713055ea18a3abc1537fe2ebc42f5b0bbb768d1eb79fd74eb47c0ac089", size = 1951612, upload-time = "2026-04-20T14:42:42.996Z" }, + { url = "https://files.pythonhosted.org/packages/50/6e/b7348fd30d6556d132cddd5bd79f37f96f2601fe0608afac4f5fb01ec0b3/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75a519dab6d63c514f3a81053e5266c549679e4aa88f6ec57f2b7b854aceb1b0", size = 1977027, upload-time = "2026-04-20T14:42:02.001Z" }, + { url = "https://files.pythonhosted.org/packages/82/11/31d60ee2b45540d3fb0b29302a393dbc01cd771c473f5b5147bcd353e593/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a6cd87cb1575b1ad05ba98894c5b5c96411ef678fa2f6ed2576607095b8d9789", size = 2063008, upload-time = "2026-04-20T14:44:17.952Z" }, + { url = "https://files.pythonhosted.org/packages/8a/db/3a9d1957181b59258f44a2300ab0f0be9d1e12d662a4f57bb31250455c52/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f80a55484b8d843c8ada81ebf70a682f3f00a3d40e378c06cf17ecb44d280d7d", size = 2233082, upload-time = "2026-04-20T14:40:57.934Z" }, + { url = "https://files.pythonhosted.org/packages/9c/e1/3277c38792aeb5cfb18c2f0c5785a221d9ff4e149abbe1184d53d5f72273/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3861f1731b90c50a3266316b9044f5c9b405eecb8e299b0a7120596334e4fe9c", size = 2304615, upload-time = "2026-04-20T14:42:12.584Z" }, + { url = "https://files.pythonhosted.org/packages/5e/d5/e3d9717c9eba10855325650afd2a9cba8e607321697f18953af9d562da2f/pydantic_core-2.46.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb528e295ed31570ac3dcc9bfdd6e0150bc11ce6168ac87a8082055cf1a67395", size = 2094380, upload-time = "2026-04-20T14:43:05.522Z" }, + { url = "https://files.pythonhosted.org/packages/a1/20/abac35dedcbfd66c6f0b03e4e3564511771d6c9b7ede10a362d03e110d9b/pydantic_core-2.46.3-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:367508faa4973b992b271ba1494acaab36eb7e8739d1e47be5035fb1ea225396", size = 2135429, upload-time = "2026-04-20T14:41:55.549Z" }, + { url = "https://files.pythonhosted.org/packages/6c/a5/41bfd1df69afad71b5cf0535055bccc73022715ad362edbc124bc1e021d7/pydantic_core-2.46.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ad3c826fe523e4becf4fe39baa44286cff85ef137c729a2c5e269afbfd0905d", size = 2174582, upload-time = "2026-04-20T14:41:45.96Z" }, + { url = "https://files.pythonhosted.org/packages/79/65/38d86ea056b29b2b10734eb23329b7a7672ca604df4f2b6e9c02d4ee22fe/pydantic_core-2.46.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ec638c5d194ef8af27db69f16c954a09797c0dc25015ad6123eb2c73a4d271ca", size = 2187533, upload-time = "2026-04-20T14:40:55.367Z" }, + { url = "https://files.pythonhosted.org/packages/b6/55/a1129141678a2026badc539ad1dee0a71d06f54c2f06a4bd68c030ac781b/pydantic_core-2.46.3-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:28ed528c45446062ee66edb1d33df5d88828ae167de76e773a3c7f64bd14e976", size = 2332985, upload-time = "2026-04-20T14:44:13.05Z" }, + { url = "https://files.pythonhosted.org/packages/d7/60/cb26f4077719f709e54819f4e8e1d43f4091f94e285eb6bd21e1190a7b7c/pydantic_core-2.46.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aed19d0c783886d5bd86d80ae5030006b45e28464218747dcf83dabfdd092c7b", size = 2373670, upload-time = "2026-04-20T14:41:53.421Z" }, + { url = "https://files.pythonhosted.org/packages/6b/7e/c3f21882bdf1d8d086876f81b5e296206c69c6082551d776895de7801fa0/pydantic_core-2.46.3-cp312-cp312-win32.whl", hash = "sha256:06d5d8820cbbdb4147578c1fe7ffcd5b83f34508cb9f9ab76e807be7db6ff0a4", size = 1966722, upload-time = "2026-04-20T14:44:30.588Z" }, + { url = "https://files.pythonhosted.org/packages/57/be/6b5e757b859013ebfbd7adba02f23b428f37c86dcbf78b5bb0b4ffd36e99/pydantic_core-2.46.3-cp312-cp312-win_amd64.whl", hash = "sha256:c3212fda0ee959c1dd04c60b601ec31097aaa893573a3a1abd0a47bcac2968c1", size = 2072970, upload-time = "2026-04-20T14:42:54.248Z" }, + { url = "https://files.pythonhosted.org/packages/bf/f8/a989b21cc75e9a32d24192ef700eea606521221a89faa40c919ce884f2b1/pydantic_core-2.46.3-cp312-cp312-win_arm64.whl", hash = "sha256:f1f8338dd7a7f31761f1f1a3c47503a9a3b34eea3c8b01fa6ee96408affb5e72", size = 2035963, upload-time = "2026-04-20T14:44:20.4Z" }, + { url = "https://files.pythonhosted.org/packages/9b/3c/9b5e8eb9821936d065439c3b0fb1490ffa64163bfe7e1595985a47896073/pydantic_core-2.46.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:12bc98de041458b80c86c56b24df1d23832f3e166cbaff011f25d187f5c62c37", size = 2102109, upload-time = "2026-04-20T14:41:24.219Z" }, + { url = "https://files.pythonhosted.org/packages/91/97/1c41d1f5a19f241d8069f1e249853bcce378cdb76eec8ab636d7bc426280/pydantic_core-2.46.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:85348b8f89d2c3508b65b16c3c33a4da22b8215138d8b996912bb1532868885f", size = 1951820, upload-time = "2026-04-20T14:42:14.236Z" }, + { url = "https://files.pythonhosted.org/packages/30/b4/d03a7ae14571bc2b6b3c7b122441154720619afe9a336fa3a95434df5e2f/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1105677a6df914b1fb71a81b96c8cce7726857e1717d86001f29be06a25ee6f8", size = 1977785, upload-time = "2026-04-20T14:42:31.648Z" }, + { url = "https://files.pythonhosted.org/packages/ae/0c/4086f808834b59e3c8f1aa26df8f4b6d998cdcf354a143d18ef41529d1fe/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:87082cd65669a33adeba5470769e9704c7cf026cc30afb9cc77fd865578ebaad", size = 2062761, upload-time = "2026-04-20T14:40:37.093Z" }, + { url = "https://files.pythonhosted.org/packages/fa/71/a649be5a5064c2df0db06e0a512c2281134ed2fcc981f52a657936a7527c/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60e5f66e12c4f5212d08522963380eaaeac5ebd795826cfd19b2dfb0c7a52b9c", size = 2232989, upload-time = "2026-04-20T14:42:59.254Z" }, + { url = "https://files.pythonhosted.org/packages/a2/84/7756e75763e810b3a710f4724441d1ecc5883b94aacb07ca71c5fb5cfb69/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6cdf19bf84128d5e7c37e8a73a0c5c10d51103a650ac585d42dd6ae233f2b7f", size = 2303975, upload-time = "2026-04-20T14:41:32.287Z" }, + { url = "https://files.pythonhosted.org/packages/6c/35/68a762e0c1e31f35fa0dac733cbd9f5b118042853698de9509c8e5bf128b/pydantic_core-2.46.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:031bb17f4885a43773c8c763089499f242aee2ea85cf17154168775dccdecf35", size = 2095325, upload-time = "2026-04-20T14:42:47.685Z" }, + { url = "https://files.pythonhosted.org/packages/77/bf/1bf8c9a8e91836c926eae5e3e51dce009bf495a60ca56060689d3df3f340/pydantic_core-2.46.3-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:bcf2a8b2982a6673693eae7348ef3d8cf3979c1d63b54fca7c397a635cc68687", size = 2133368, upload-time = "2026-04-20T14:41:22.766Z" }, + { url = "https://files.pythonhosted.org/packages/e5/50/87d818d6bab915984995157ceb2380f5aac4e563dddbed6b56f0ed057aba/pydantic_core-2.46.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28e8cf2f52d72ced402a137145923a762cbb5081e48b34312f7a0c8f55928ec3", size = 2173908, upload-time = "2026-04-20T14:42:52.044Z" }, + { url = "https://files.pythonhosted.org/packages/91/88/a311fb306d0bd6185db41fa14ae888fb81d0baf648a761ae760d30819d33/pydantic_core-2.46.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:17eaface65d9fc5abb940003020309c1bf7a211f5f608d7870297c367e6f9022", size = 2186422, upload-time = "2026-04-20T14:43:29.55Z" }, + { url = "https://files.pythonhosted.org/packages/8f/79/28fd0d81508525ab2054fef7c77a638c8b5b0afcbbaeee493cf7c3fef7e1/pydantic_core-2.46.3-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:93fd339f23408a07e98950a89644f92c54d8729719a40b30c0a30bb9ebc55d23", size = 2332709, upload-time = "2026-04-20T14:42:16.134Z" }, + { url = "https://files.pythonhosted.org/packages/b3/21/795bf5fe5c0f379308b8ef19c50dedab2e7711dbc8d0c2acf08f1c7daa05/pydantic_core-2.46.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:23cbdb3aaa74dfe0837975dbf69b469753bbde8eacace524519ffdb6b6e89eb7", size = 2372428, upload-time = "2026-04-20T14:41:10.974Z" }, + { url = "https://files.pythonhosted.org/packages/45/b3/ed14c659cbe7605e3ef063077680a64680aec81eb1a04763a05190d49b7f/pydantic_core-2.46.3-cp313-cp313-win32.whl", hash = "sha256:610eda2e3838f401105e6326ca304f5da1e15393ae25dacae5c5c63f2c275b13", size = 1965601, upload-time = "2026-04-20T14:41:42.128Z" }, + { url = "https://files.pythonhosted.org/packages/ef/bb/adb70d9a762ddd002d723fbf1bd492244d37da41e3af7b74ad212609027e/pydantic_core-2.46.3-cp313-cp313-win_amd64.whl", hash = "sha256:68cc7866ed863db34351294187f9b729964c371ba33e31c26f478471c52e1ed0", size = 2071517, upload-time = "2026-04-20T14:43:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/52/eb/66faefabebfe68bd7788339c9c9127231e680b11906368c67ce112fdb47f/pydantic_core-2.46.3-cp313-cp313-win_arm64.whl", hash = "sha256:f64b5537ac62b231572879cd08ec05600308636a5d63bcbdb15063a466977bec", size = 2035802, upload-time = "2026-04-20T14:43:38.507Z" }, + { url = "https://files.pythonhosted.org/packages/7f/db/a7bcb4940183fda36022cd18ba8dd12f2dff40740ec7b58ce7457befa416/pydantic_core-2.46.3-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:afa3aa644f74e290cdede48a7b0bee37d1c35e71b05105f6b340d484af536d9b", size = 2097614, upload-time = "2026-04-20T14:44:38.374Z" }, + { url = "https://files.pythonhosted.org/packages/24/35/e4066358a22e3e99519db370494c7528f5a2aa1367370e80e27e20283543/pydantic_core-2.46.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ced3310e51aa425f7f77da8bbbb5212616655bedbe82c70944320bc1dbe5e018", size = 1951896, upload-time = "2026-04-20T14:40:53.996Z" }, + { url = "https://files.pythonhosted.org/packages/87/92/37cf4049d1636996e4b888c05a501f40a43ff218983a551d57f9d5e14f0d/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e29908922ce9da1a30b4da490bd1d3d82c01dcfdf864d2a74aacee674d0bfa34", size = 1979314, upload-time = "2026-04-20T14:41:49.446Z" }, + { url = "https://files.pythonhosted.org/packages/d8/36/9ff4d676dfbdfb2d591cf43f3d90ded01e15b1404fd101180ed2d62a2fd3/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c9ff69140423eea8ed2d5477df3ba037f671f5e897d206d921bc9fdc39613e7", size = 2056133, upload-time = "2026-04-20T14:42:23.574Z" }, + { url = "https://files.pythonhosted.org/packages/bc/f0/405b442a4d7ba855b06eec8b2bf9c617d43b8432d099dfdc7bf999293495/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b675ab0a0d5b1c8fdb81195dc5bcefea3f3c240871cdd7ff9a2de8aa50772eb2", size = 2228726, upload-time = "2026-04-20T14:44:22.816Z" }, + { url = "https://files.pythonhosted.org/packages/e7/f8/65cd92dd5a0bd89ba277a98ecbfaf6fc36bbd3300973c7a4b826d6ab1391/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0087084960f209a9a4af50ecd1fb063d9ad3658c07bb81a7a53f452dacbfb2ba", size = 2301214, upload-time = "2026-04-20T14:44:48.792Z" }, + { url = "https://files.pythonhosted.org/packages/fd/86/ef96a4c6e79e7a2d0410826a68fbc0eccc0fd44aa733be199d5fcac3bb87/pydantic_core-2.46.3-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed42e6cc8e1b0e2b9b96e2276bad70ae625d10d6d524aed0c93de974ae029f9f", size = 2099927, upload-time = "2026-04-20T14:41:40.196Z" }, + { url = "https://files.pythonhosted.org/packages/6d/53/269caf30e0096e0a8a8f929d1982a27b3879872cca2d917d17c2f9fdf4fe/pydantic_core-2.46.3-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:f1771ce258afb3e4201e67d154edbbae712a76a6081079fe247c2f53c6322c22", size = 2128789, upload-time = "2026-04-20T14:41:15.868Z" }, + { url = "https://files.pythonhosted.org/packages/00/b0/1a6d9b6a587e118482910c244a1c5acf4d192604174132efd12bf0ac486f/pydantic_core-2.46.3-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a7610b6a5242a6c736d8ad47fd5fff87fcfe8f833b281b1c409c3d6835d9227f", size = 2173815, upload-time = "2026-04-20T14:44:25.152Z" }, + { url = "https://files.pythonhosted.org/packages/87/56/e7e00d4041a7e62b5a40815590114db3b535bf3ca0bf4dca9f16cef25246/pydantic_core-2.46.3-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:ff5e7783bcc5476e1db448bf268f11cb257b1c276d3e89f00b5727be86dd0127", size = 2181608, upload-time = "2026-04-20T14:41:28.933Z" }, + { url = "https://files.pythonhosted.org/packages/e8/22/4bd23c3d41f7c185d60808a1de83c76cf5aeabf792f6c636a55c3b1ec7f9/pydantic_core-2.46.3-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:9d2e32edcc143bc01e95300671915d9ca052d4f745aa0a49c48d4803f8a85f2c", size = 2326968, upload-time = "2026-04-20T14:42:03.962Z" }, + { url = "https://files.pythonhosted.org/packages/24/ac/66cd45129e3915e5ade3b292cb3bc7fd537f58f8f8dbdaba6170f7cabb74/pydantic_core-2.46.3-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6e42d83d1c6b87fa56b521479cff237e626a292f3b31b6345c15a99121b454c1", size = 2369842, upload-time = "2026-04-20T14:41:35.52Z" }, + { url = "https://files.pythonhosted.org/packages/a2/51/dd4248abb84113615473aa20d5545b7c4cd73c8644003b5259686f93996c/pydantic_core-2.46.3-cp314-cp314-win32.whl", hash = "sha256:07bc6d2a28c3adb4f7c6ae46aa4f2d2929af127f587ed44057af50bf1ce0f505", size = 1959661, upload-time = "2026-04-20T14:41:00.042Z" }, + { url = "https://files.pythonhosted.org/packages/20/eb/59980e5f1ae54a3b86372bd9f0fa373ea2d402e8cdcd3459334430f91e91/pydantic_core-2.46.3-cp314-cp314-win_amd64.whl", hash = "sha256:8940562319bc621da30714617e6a7eaa6b98c84e8c685bcdc02d7ed5e7c7c44e", size = 2071686, upload-time = "2026-04-20T14:43:16.471Z" }, + { url = "https://files.pythonhosted.org/packages/8c/db/1cf77e5247047dfee34bc01fa9bca134854f528c8eb053e144298893d370/pydantic_core-2.46.3-cp314-cp314-win_arm64.whl", hash = "sha256:5dcbbcf4d22210ced8f837c96db941bdb078f419543472aca5d9a0bb7cddc7df", size = 2026907, upload-time = "2026-04-20T14:43:31.732Z" }, + { url = "https://files.pythonhosted.org/packages/57/c0/b3df9f6a543276eadba0a48487b082ca1f201745329d97dbfa287034a230/pydantic_core-2.46.3-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:d0fe3dce1e836e418f912c1ad91c73357d03e556a4d286f441bf34fed2dbeecf", size = 2095047, upload-time = "2026-04-20T14:42:37.982Z" }, + { url = "https://files.pythonhosted.org/packages/66/57/886a938073b97556c168fd99e1a7305bb363cd30a6d2c76086bf0587b32a/pydantic_core-2.46.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9ce92e58abc722dac1bf835a6798a60b294e48eb0e625ec9fd994b932ac5feee", size = 1934329, upload-time = "2026-04-20T14:43:49.655Z" }, + { url = "https://files.pythonhosted.org/packages/0b/7c/b42eaa5c34b13b07ecb51da21761297a9b8eb43044c864a035999998f328/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a03e6467f0f5ab796a486146d1b887b2dc5e5f9b3288898c1b1c3ad974e53e4a", size = 1974847, upload-time = "2026-04-20T14:42:10.737Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9b/92b42db6543e7de4f99ae977101a2967b63122d4b6cf7773812da2d7d5b5/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2798b6ba041b9d70acfb9071a2ea13c8456dd1e6a5555798e41ba7b0790e329c", size = 2041742, upload-time = "2026-04-20T14:40:44.262Z" }, + { url = "https://files.pythonhosted.org/packages/0f/19/46fbe1efabb5aa2834b43b9454e70f9a83ad9c338c1291e48bdc4fecf167/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9be3e221bdc6d69abf294dcf7aff6af19c31a5cdcc8f0aa3b14be29df4bd03b1", size = 2236235, upload-time = "2026-04-20T14:41:27.307Z" }, + { url = "https://files.pythonhosted.org/packages/77/da/b3f95bc009ad60ec53120f5d16c6faa8cabdbe8a20d83849a1f2b8728148/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f13936129ce841f2a5ddf6f126fea3c43cd128807b5a59588c37cf10178c2e64", size = 2282633, upload-time = "2026-04-20T14:44:33.271Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6e/401336117722e28f32fb8220df676769d28ebdf08f2f4469646d404c43a3/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28b5f2ef03416facccb1c6ef744c69793175fd27e44ef15669201601cf423acb", size = 2109679, upload-time = "2026-04-20T14:44:41.065Z" }, + { url = "https://files.pythonhosted.org/packages/fc/53/b289f9bc8756a32fe718c46f55afaeaf8d489ee18d1a1e7be1db73f42cc4/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:830d1247d77ad23852314f069e9d7ddafeec5f684baf9d7e7065ed46a049c4e6", size = 2108342, upload-time = "2026-04-20T14:42:50.144Z" }, + { url = "https://files.pythonhosted.org/packages/10/5b/8292fc7c1f9111f1b2b7c1b0dcf1179edcd014fc3ea4517499f50b829d71/pydantic_core-2.46.3-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0793c90c1a3c74966e7975eaef3ed30ebdff3260a0f815a62a22adc17e4c01c", size = 2157208, upload-time = "2026-04-20T14:42:08.133Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9e/f80044e9ec07580f057a89fc131f78dda7a58751ddf52bbe05eaf31db50f/pydantic_core-2.46.3-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:d2d0aead851b66f5245ec0c4fb2612ef457f8bbafefdf65a2bf9d6bac6140f47", size = 2167237, upload-time = "2026-04-20T14:42:25.412Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/6781a1b037f3b96be9227edbd1101f6d3946746056231bf4ac48cdff1a8d/pydantic_core-2.46.3-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:2f40e4246676beb31c5ce77c38a55ca4e465c6b38d11ea1bd935420568e0b1ab", size = 2312540, upload-time = "2026-04-20T14:40:40.313Z" }, + { url = "https://files.pythonhosted.org/packages/3e/db/19c0839feeb728e7df03255581f198dfdf1c2aeb1e174a8420b63c5252e5/pydantic_core-2.46.3-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:cf489cf8986c543939aeee17a09c04d6ffb43bfef8ca16fcbcc5cfdcbed24dba", size = 2369556, upload-time = "2026-04-20T14:41:09.427Z" }, + { url = "https://files.pythonhosted.org/packages/e0/15/3228774cb7cd45f5f721ddf1b2242747f4eb834d0c491f0c02d606f09fed/pydantic_core-2.46.3-cp314-cp314t-win32.whl", hash = "sha256:ffe0883b56cfc05798bf994164d2b2ff03efe2d22022a2bb080f3b626176dd56", size = 1949756, upload-time = "2026-04-20T14:41:25.717Z" }, + { url = "https://files.pythonhosted.org/packages/b8/2a/c79cf53fd91e5a87e30d481809f52f9a60dd221e39de66455cf04deaad37/pydantic_core-2.46.3-cp314-cp314t-win_amd64.whl", hash = "sha256:706d9d0ce9cf4593d07270d8e9f53b161f90c57d315aeec4fb4fd7a8b10240d8", size = 2051305, upload-time = "2026-04-20T14:43:18.627Z" }, + { url = "https://files.pythonhosted.org/packages/0b/db/d8182a7f1d9343a032265aae186eb063fe26ca4c40f256b21e8da4498e89/pydantic_core-2.46.3-cp314-cp314t-win_arm64.whl", hash = "sha256:77706aeb41df6a76568434701e0917da10692da28cb69d5fb6919ce5fdb07374", size = 2026310, upload-time = "2026-04-20T14:41:01.778Z" }, + { url = "https://files.pythonhosted.org/packages/34/42/f426db557e8ab2791bc7562052299944a118655496fbff99914e564c0a94/pydantic_core-2.46.3-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:b12dd51f1187c2eb489af8e20f880362db98e954b54ab792fa5d92e8bcc6b803", size = 2091877, upload-time = "2026-04-20T14:43:27.091Z" }, + { url = "https://files.pythonhosted.org/packages/5c/4f/86a832a9d14df58e663bfdf4627dc00d3317c2bd583c4fb23390b0f04b8e/pydantic_core-2.46.3-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:f00a0961b125f1a47af7bcc17f00782e12f4cd056f83416006b30111d941dfa3", size = 1932428, upload-time = "2026-04-20T14:40:45.781Z" }, + { url = "https://files.pythonhosted.org/packages/11/1a/fe857968954d93fb78e0d4b6df5c988c74c4aaa67181c60be7cfe327c0ca/pydantic_core-2.46.3-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57697d7c056aca4bbb680200f96563e841a6386ac1129370a0102592f4dddff5", size = 1997550, upload-time = "2026-04-20T14:44:02.425Z" }, + { url = "https://files.pythonhosted.org/packages/17/eb/9d89ad2d9b0ba8cd65393d434471621b98912abb10fbe1df08e480ba57b5/pydantic_core-2.46.3-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd35aa21299def8db7ef4fe5c4ff862941a9a158ca7b63d61e66fe67d30416b4", size = 2137657, upload-time = "2026-04-20T14:42:45.149Z" }, ] [[package]] @@ -4889,14 +4837,14 @@ wheels = [ [[package]] name = "pyngrok" -version = "8.0.0" +version = "8.1.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ec/7a/6f9b08044be356d228533f6a8454377e2d31dd1309adf13209906019762d/pyngrok-8.0.0.tar.gz", hash = "sha256:6e7aaf90b43086ad25508a1122423608003f712d9988319ddf7be50431028101", size = 42082, upload-time = "2026-04-01T15:43:43.333Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/ae/6664934258773db4666e65730c43b4b06730f78d49861a9a04ebcf4742ff/pyngrok-8.1.2.tar.gz", hash = "sha256:3b5383ec7dc4646ac0d046435eb58c6cd1cbc9acad70e6dee012b05dc25b070a", size = 45078, upload-time = "2026-04-29T15:16:53.969Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/53/ae/d8cb883f717627bd8e2fe2a65ab6544cd23789a63d61411c037a65246982/pyngrok-8.0.0-py3-none-any.whl", hash = "sha256:bdc34c7a0a7e9bad60e44b9dcc7661c055c0a79dc029c7100ed15669c626d5c1", size = 24467, upload-time = "2026-04-01T15:43:42.3Z" }, + { url = "https://files.pythonhosted.org/packages/07/5c/7733776a6a9704bffee19d203f9be80f25f78a5011a9863971be60fe2763/pyngrok-8.1.2-py3-none-any.whl", hash = "sha256:849e9f55706288b00eb28f4ae8ea16b05e52c609f80ef4a88ca23b385f2f9178", size = 25935, upload-time = "2026-04-29T15:16:52.088Z" }, ] [[package]] @@ -5173,11 +5121,11 @@ wheels = [ [[package]] name = "pytz" -version = "2026.1.post1" +version = "2026.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/56/db/b8721d71d945e6a8ac63c0fc900b2067181dbb50805958d4d4661cf7d277/pytz-2026.1.post1.tar.gz", hash = "sha256:3378dde6a0c3d26719182142c56e60c7f9af7e968076f31aae569d72a0358ee1", size = 321088, upload-time = "2026-03-03T07:47:50.683Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ff/46/dd499ec9038423421951e4fad73051febaa13d2df82b4064f87af8b8c0c3/pytz-2026.2.tar.gz", hash = "sha256:0e60b47b29f21574376f218fe21abc009894a2321ea16c6754f3cad6eb7cdd6a", size = 320861, upload-time = "2026-05-04T01:35:29.667Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/10/99/781fe0c827be2742bcc775efefccb3b048a3a9c6ce9aec0cbf4a101677e5/pytz-2026.1.post1-py2.py3-none-any.whl", hash = "sha256:f2fd16142fda348286a75e1a524be810bb05d444e5a081f37f7affc635035f7a", size = 510489, upload-time = "2026-03-03T07:47:49.167Z" }, + { url = "https://files.pythonhosted.org/packages/ec/dd/96da98f892250475bdf2328112d7468abdd4acc7b902b6af23f4ed958ea0/pytz-2026.2-py2.py3-none-any.whl", hash = "sha256:04156e608bee23d3792fd45c94ae47fae1036688e75032eea2e3bf0323d1f126", size = 510141, upload-time = "2026-05-04T01:35:27.408Z" }, ] [[package]] @@ -5675,27 +5623,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.15.11" +version = "0.15.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e4/8d/192f3d7103816158dfd5ea50d098ef2aec19194e6cbccd4b3485bdb2eb2d/ruff-0.15.11.tar.gz", hash = "sha256:f092b21708bf0e7437ce9ada249dfe688ff9a0954fc94abab05dcea7dcd29c33", size = 4637264, upload-time = "2026-04-16T18:46:26.58Z" } +sdist = { url = "https://files.pythonhosted.org/packages/99/43/3291f1cc9106f4c63bdce7a8d0df5047fe8422a75b091c16b5e9355e0b11/ruff-0.15.12.tar.gz", hash = "sha256:ecea26adb26b4232c0c2ca19ccbc0083a68344180bba2a600605538ce51a40a6", size = 4643852, upload-time = "2026-04-24T18:17:14.305Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/1e/6aca3427f751295ab011828e15e9bf452200ac74484f1db4be0197b8170b/ruff-0.15.11-py3-none-linux_armv6l.whl", hash = "sha256:e927cfff503135c558eb581a0c9792264aae9507904eb27809cdcff2f2c847b7", size = 10607943, upload-time = "2026-04-16T18:46:05.967Z" }, - { url = "https://files.pythonhosted.org/packages/e7/26/1341c262e74f36d4e84f3d6f4df0ac68cd53331a66bfc5080daa17c84c0b/ruff-0.15.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7a1b5b2938d8f890b76084d4fa843604d787a912541eae85fd7e233398bbb73e", size = 10988592, upload-time = "2026-04-16T18:46:00.742Z" }, - { url = "https://files.pythonhosted.org/packages/03/71/850b1d6ffa9564fbb6740429bad53df1094082fe515c8c1e74b6d8d05f18/ruff-0.15.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d4176f3d194afbdaee6e41b9ccb1a2c287dba8700047df474abfbe773825d1cb", size = 10338501, upload-time = "2026-04-16T18:46:03.723Z" }, - { url = "https://files.pythonhosted.org/packages/f2/11/cc1284d3e298c45a817a6aadb6c3e1d70b45c9b36d8d9cce3387b495a03a/ruff-0.15.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b17c886fb88203ced3afe7f14e8d5ae96e9d2f4ccc0ee66aa19f2c2675a27e4", size = 10670693, upload-time = "2026-04-16T18:46:41.941Z" }, - { url = "https://files.pythonhosted.org/packages/ce/9e/f8288b034ab72b371513c13f9a41d9ba3effac54e24bfb467b007daee2ca/ruff-0.15.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:49fafa220220afe7758a487b048de4c8f9f767f37dfefad46b9dd06759d003eb", size = 10416177, upload-time = "2026-04-16T18:46:21.717Z" }, - { url = "https://files.pythonhosted.org/packages/85/71/504d79abfd3d92532ba6bbe3d1c19fada03e494332a59e37c7c2dabae427/ruff-0.15.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2ab8427e74a00d93b8bda1307b1e60970d40f304af38bccb218e056c220120d", size = 11221886, upload-time = "2026-04-16T18:46:15.086Z" }, - { url = "https://files.pythonhosted.org/packages/43/5a/947e6ab7a5ad603d65b474be15a4cbc6d29832db5d762cd142e4e3a74164/ruff-0.15.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:195072c0c8e1fc8f940652073df082e37a5d9cb43b4ab1e4d0566ab8977a13b7", size = 12075183, upload-time = "2026-04-16T18:46:07.944Z" }, - { url = "https://files.pythonhosted.org/packages/9f/a1/0b7bb6268775fdd3a0818aee8efd8f5b4e231d24dd4d528ced2534023182/ruff-0.15.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3a0996d486af3920dec930a2e7daed4847dfc12649b537a9335585ada163e9e", size = 11516575, upload-time = "2026-04-16T18:46:31.687Z" }, - { url = "https://files.pythonhosted.org/packages/30/c3/bb5168fc4d233cc06e95f482770d0f3c87945a0cd9f614b90ea8dc2f2833/ruff-0.15.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bef2cb556d509259f1fe440bb9cd33c756222cf0a7afe90d15edf0866702431", size = 11306537, upload-time = "2026-04-16T18:46:36.988Z" }, - { url = "https://files.pythonhosted.org/packages/e4/92/4cfae6441f3967317946f3b788136eecf093729b94d6561f963ed810c82e/ruff-0.15.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:030d921a836d7d4a12cf6e8d984a88b66094ccb0e0f17ddd55067c331191bf19", size = 11296813, upload-time = "2026-04-16T18:46:24.182Z" }, - { url = "https://files.pythonhosted.org/packages/43/26/972784c5dde8313acde8ac71ba8ac65475b85db4a2352a76c9934361f9bc/ruff-0.15.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0e783b599b4577788dbbb66b9addcef87e9a8832f4ce0c19e34bf55543a2f890", size = 10633136, upload-time = "2026-04-16T18:46:39.802Z" }, - { url = "https://files.pythonhosted.org/packages/5b/53/3985a4f185020c2f367f2e08a103032e12564829742a1b417980ce1514a0/ruff-0.15.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ae90592246625ba4a34349d68ec28d4400d75182b71baa196ddb9f82db025ef5", size = 10424701, upload-time = "2026-04-16T18:46:10.381Z" }, - { url = "https://files.pythonhosted.org/packages/d3/57/bf0dfb32241b56c83bb663a826133da4bf17f682ba8c096973065f6e6a68/ruff-0.15.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1f111d62e3c983ed20e0ca2e800f8d77433a5b1161947df99a5c2a3fb60514f0", size = 10873887, upload-time = "2026-04-16T18:46:29.157Z" }, - { url = "https://files.pythonhosted.org/packages/02/05/e48076b2a57dc33ee8c7a957296f97c744ca891a8ffb4ffb1aaa3b3f517d/ruff-0.15.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:06f483d6646f59eaffba9ae30956370d3a886625f511a3108994000480621d1c", size = 11404316, upload-time = "2026-04-16T18:46:19.462Z" }, - { url = "https://files.pythonhosted.org/packages/88/27/0195d15fe7a897cbcba0904792c4b7c9fdd958456c3a17d2ea6093716a9a/ruff-0.15.11-py3-none-win32.whl", hash = "sha256:476a2aa56b7da0b73a3ee80b6b2f0e19cce544245479adde7baa65466664d5f3", size = 10655535, upload-time = "2026-04-16T18:46:12.47Z" }, - { url = "https://files.pythonhosted.org/packages/3a/5e/c927b325bd4c1d3620211a4b96f47864633199feed60fa936025ab27e090/ruff-0.15.11-py3-none-win_amd64.whl", hash = "sha256:8b6756d88d7e234fb0c98c91511aae3cd519d5e3ed271cae31b20f39cb2a12a3", size = 11779692, upload-time = "2026-04-16T18:46:17.268Z" }, - { url = "https://files.pythonhosted.org/packages/63/b6/aeadee5443e49baa2facd51131159fd6301cc4ccfc1541e4df7b021c37dd/ruff-0.15.11-py3-none-win_arm64.whl", hash = "sha256:063fed18cc1bbe0ee7393957284a6fe8b588c6a406a285af3ee3f46da2391ee4", size = 11032614, upload-time = "2026-04-16T18:46:34.487Z" }, + { url = "https://files.pythonhosted.org/packages/c3/6e/e78ffb61d4686f3d96ba3df2c801161843746dcbcbb17a1e927d4829312b/ruff-0.15.12-py3-none-linux_armv6l.whl", hash = "sha256:f86f176e188e94d6bdbc09f09bfd9dc729059ad93d0e7390b5a73efe19f8861c", size = 10640713, upload-time = "2026-04-24T18:17:22.841Z" }, + { url = "https://files.pythonhosted.org/packages/ae/08/a317bc231fb9e7b93e4ef3089501e51922ff88d6936ce5cf870c4fe55419/ruff-0.15.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:e3bcd123364c3770b8e1b7baaf343cc99a35f197c5c6e8af79015c666c423a6c", size = 11069267, upload-time = "2026-04-24T18:17:30.105Z" }, + { url = "https://files.pythonhosted.org/packages/aa/a4/f828e9718d3dce1f5f11c39c4f65afd32783c8b2aebb2e3d259e492c47bd/ruff-0.15.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fe87510d000220aa1ed530d4448a7c696a0cae1213e5ec30e5874287b66557b5", size = 10397182, upload-time = "2026-04-24T18:17:07.177Z" }, + { url = "https://files.pythonhosted.org/packages/71/e0/3310fc6d1b5e1fdea22bf3b1b807c7e187b581021b0d7d4514cccdb5fb71/ruff-0.15.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84a1630093121375a3e2a95b4a6dc7b59e2b4ee76216e32d81aae550a832d002", size = 10758012, upload-time = "2026-04-24T18:16:55.759Z" }, + { url = "https://files.pythonhosted.org/packages/11/c1/a606911aee04c324ddaa883ae418f3569792fd3c4a10c50e0dd0a2311e1e/ruff-0.15.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fb129f40f114f089ebe0ca56c0d251cf2061b17651d464bb6478dc01e69f11f5", size = 10447479, upload-time = "2026-04-24T18:16:51.677Z" }, + { url = "https://files.pythonhosted.org/packages/9d/68/4201e8444f0894f21ab4aeeaee68aa4f10b51613514a20d80bd628d57e88/ruff-0.15.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0c862b172d695db7598426b8af465e7e9ac00a3ea2a3630ee67eb82e366aaa6", size = 11234040, upload-time = "2026-04-24T18:17:16.529Z" }, + { url = "https://files.pythonhosted.org/packages/34/ff/8a6d6cf4ccc23fd67060874e832c18919d1557a0611ebef03fdb01fff11e/ruff-0.15.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2849ea9f3484c3aca43a82f484210370319e7170df4dfe4843395ddf6c57bc33", size = 12087377, upload-time = "2026-04-24T18:17:04.944Z" }, + { url = "https://files.pythonhosted.org/packages/85/f6/c669cf73f5152f623d34e69866a46d5e6185816b19fcd5b6dd8a2d299922/ruff-0.15.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9e77c7e51c07fe396826d5969a5b846d9cd4c402535835fb6e21ce8b28fef847", size = 11367784, upload-time = "2026-04-24T18:17:25.409Z" }, + { url = "https://files.pythonhosted.org/packages/e8/39/c61d193b8a1daaa8977f7dea9e8d8ba866e02ea7b65d32f6861693aa4c12/ruff-0.15.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b2f4f2f3b1026b5fb449b467d9264bf22067b600f7b6f41fc5958909f449d0", size = 11344088, upload-time = "2026-04-24T18:17:12.258Z" }, + { url = "https://files.pythonhosted.org/packages/c2/8d/49afab3645e31e12c590acb6d3b5b69d7aab5b81926dbaf7461f9441f37a/ruff-0.15.12-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9ba3b8f1afd7e2e43d8943e55f249e13f9682fde09711644a6e7290eb4f3e339", size = 11271770, upload-time = "2026-04-24T18:17:02.457Z" }, + { url = "https://files.pythonhosted.org/packages/46/06/33f41fe94403e2b755481cdfb9b7ef3e4e0ed031c4581124658d935d52b4/ruff-0.15.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e852ba9fdc890655e1d78f2df1499efbe0e54126bd405362154a75e2bde159c5", size = 10719355, upload-time = "2026-04-24T18:17:27.648Z" }, + { url = "https://files.pythonhosted.org/packages/0d/59/18aa4e014debbf559670e4048e39260a85c7fcee84acfd761ac01e7b8d35/ruff-0.15.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:dd8aed930da53780d22fc70bdf84452c843cf64f8cb4eb38984319c24c5cd5fd", size = 10462758, upload-time = "2026-04-24T18:17:32.347Z" }, + { url = "https://files.pythonhosted.org/packages/25/e7/cc9f16fd0f3b5fddcbd7ec3d6ae30c8f3fde1047f32a4093a98d633c6570/ruff-0.15.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:01da3988d225628b709493d7dc67c3b9b12c0210016b08690ef9bd27970b262b", size = 10953498, upload-time = "2026-04-24T18:17:20.674Z" }, + { url = "https://files.pythonhosted.org/packages/72/7a/a9ba7f98c7a575978698f4230c5e8cc54bbc761af34f560818f933dafa0c/ruff-0.15.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9cae0f92bd5700d1213188b31cd3bdd2b315361296d10b96b8e2337d3d11f53e", size = 11447765, upload-time = "2026-04-24T18:17:09.755Z" }, + { url = "https://files.pythonhosted.org/packages/ea/f9/0ae446942c846b8266059ad8a30702a35afae55f5cdc54c5adf8d7afdc27/ruff-0.15.12-py3-none-win32.whl", hash = "sha256:d0185894e038d7043ba8fd6aee7499ece6462dc0ea9f1e260c7451807c714c20", size = 10657277, upload-time = "2026-04-24T18:17:18.591Z" }, + { url = "https://files.pythonhosted.org/packages/33/f1/9614e03e1cdcbf9437570b5400ced8a720b5db22b28d8e0f1bda429f660d/ruff-0.15.12-py3-none-win_amd64.whl", hash = "sha256:c87a162d61ab3adca47c03f7f717c68672edec7d1b5499e652331780fe74950d", size = 11837758, upload-time = "2026-04-24T18:17:00.113Z" }, + { url = "https://files.pythonhosted.org/packages/c0/98/6beb4b351e472e5f4c4613f7c35a5290b8be2497e183825310c4c3a3984b/ruff-0.15.12-py3-none-win_arm64.whl", hash = "sha256:a538f7a82d061cee7be55542aca1d86d1393d55d81d4fcc314370f4340930d4f", size = 11120821, upload-time = "2026-04-24T18:16:57.979Z" }, ] [[package]] @@ -5821,15 +5769,15 @@ wheels = [ [[package]] name = "sentry-sdk" -version = "2.58.0" +version = "2.59.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/26/b3/fb8291170d0e844173164709fc0fa0c221ed75a5da740c8746f2a83b4eb1/sentry_sdk-2.58.0.tar.gz", hash = "sha256:c1144d947352d54e5b7daa63596d9f848adf684989c06c4f5a659f0c85a18f6f", size = 438764, upload-time = "2026-04-13T17:23:26.265Z" } +sdist = { url = "https://files.pythonhosted.org/packages/65/e0/9bf5e5fc7442b10880f3ec0eff0ef4208b84a099606f343ec4f5445227fb/sentry_sdk-2.59.0.tar.gz", hash = "sha256:cd265808ef8bf3f3edf69b527c0a0b2b6b1322762679e55b8987db2e9584aec1", size = 447331, upload-time = "2026-05-04T12:19:06.538Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/eb/d875669993b762556ae8b2efd86219943b4c0864d22204d622a9aee3052b/sentry_sdk-2.58.0-py2.py3-none-any.whl", hash = "sha256:688d1c704ddecf382ea3326f21a67453d4caa95592d722b7c780a36a9d23109e", size = 460919, upload-time = "2026-04-13T17:23:24.675Z" }, + { url = "https://files.pythonhosted.org/packages/bf/00/b8cc413748fb6383d1582e7cda51314f99743351c462a92dc690d5b5853b/sentry_sdk-2.59.0-py2.py3-none-any.whl", hash = "sha256:abcf65ee9a9d9cdebf9ad369782408ecca9c1c792686ef06ba34f5ab233527fe", size = 468432, upload-time = "2026-05-04T12:19:04.741Z" }, ] [[package]] @@ -5969,7 +5917,7 @@ wheels = [ [[package]] name = "teleop" -version = "0.1.4" +version = "0.1.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fastapi" }, @@ -5980,9 +5928,9 @@ dependencies = [ { name = "uvicorn", extra = ["standard"] }, { name = "websocket-client" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ec/8c/902ef4c0fa148325e6b19a5af63c3aac5927c67551efabcd5732fc446c6d/teleop-0.1.4.tar.gz", hash = "sha256:b5cedcff336c612a3f7e6f93e379e24979ed42070903b722f5fefe07c8fca3ce", size = 44051, upload-time = "2025-12-08T10:49:45.823Z" } +sdist = { url = "https://files.pythonhosted.org/packages/87/dc/312c19122c8e64fcff16dc8a74659b84ba8a7bcd3ef7b3c330cfc65a2a29/teleop-0.1.5.tar.gz", hash = "sha256:9f5367b167e0f67abe818f346c467671bd2c1ad653df604bdfb2fa69b2937da9", size = 44173, upload-time = "2026-04-19T21:17:42.795Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/9c/217176617df23f634b0388111adbeb17ccb0409072639a97512e6c1c818d/teleop-0.1.4-py3-none-any.whl", hash = "sha256:6b8013947b27b89dbce50f9231a57d29f2e59ea864807b1ce6611ea3ad1694f4", size = 42332, upload-time = "2025-12-08T10:49:44.531Z" }, + { url = "https://files.pythonhosted.org/packages/f2/d1/45c79fcbf2551f2035c375e81d560c4ac46a5bbdb1622583b559eedcfc4e/teleop-0.1.5-py3-none-any.whl", hash = "sha256:75c3e63bb9eed1ea8ca32b48086cea45fa5ae3eb022dd0dcf0d615cf0b0d58dc", size = 42380, upload-time = "2026-04-19T21:17:41.386Z" }, ] [[package]] @@ -6064,14 +6012,14 @@ wheels = [ [[package]] name = "tifffile" -version = "2026.4.11" +version = "2026.5.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d7/4a/e687f5957fead200faad58dbf9c9431a2bbb118040e96f5fb8a55f7ebc50/tifffile-2026.4.11.tar.gz", hash = "sha256:17758ff0c0d4db385792a083ad3ca51fcb0f4d942642f4d8f8bc1287fdcf17bc", size = 394956, upload-time = "2026-04-12T01:57:28.793Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/3e/695c7ab56be57814e369c1f38bc3f64b9dea0a83e867d00c0c9d613a9929/tifffile-2026.5.2.tar.gz", hash = "sha256:21b10227ede8493814a34676774797f721f487e36cb0530e7c3bd882caa87f5a", size = 429140, upload-time = "2026-05-02T20:19:31.497Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3f/9f/74f110b4271ded519c7add4341cbabc824de26817ff1c345b3109df9e99c/tifffile-2026.4.11-py3-none-any.whl", hash = "sha256:9b94ffeddb39e97601af646345e8808f885773de01b299e480ed6d3a41509ec9", size = 248227, upload-time = "2026-04-12T01:57:26.969Z" }, + { url = "https://files.pythonhosted.org/packages/b4/af/ce4df3ca29122d219c45d3e86e5ff9a9df03b8cf31afd76817b662c803a3/tifffile-2026.5.2-py3-none-any.whl", hash = "sha256:5129b53b826e768a5b1af26b765eeea75c2d0a227d2d12849617e0737588e105", size = 266420, upload-time = "2026-05-02T20:19:29.814Z" }, ] [[package]] @@ -6295,7 +6243,7 @@ wheels = [ [[package]] name = "transformers" -version = "5.3.0" +version = "5.5.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, @@ -6308,9 +6256,9 @@ dependencies = [ { name = "tqdm" }, { name = "typer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fc/1a/70e830d53ecc96ce69cfa8de38f163712d2b43ac52fbd743f39f56025c31/transformers-5.3.0.tar.gz", hash = "sha256:009555b364029da9e2946d41f1c5de9f15e6b1df46b189b7293f33a161b9c557", size = 8830831, upload-time = "2026-03-04T17:41:46.119Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a5/1e/1e244ab2ab50a863e6b52cc55761910567fa532b69a6740f6e99c5fdbd98/transformers-5.5.4.tar.gz", hash = "sha256:2e67cadba81fc7608cc07c4dd54f524820bc3d95b1cabd0ef3db7733c4f8b82e", size = 8227649, upload-time = "2026-04-13T16:55:55.181Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b8/88/ae8320064e32679a5429a2c9ebbc05c2bf32cefb6e076f9b07f6d685a9b4/transformers-5.3.0-py3-none-any.whl", hash = "sha256:50ac8c89c3c7033444fb3f9f53138096b997ebb70d4b5e50a2e810bf12d3d29a", size = 10661827, upload-time = "2026-03-04T17:41:42.722Z" }, + { url = "https://files.pythonhosted.org/packages/29/fb/162a66789c65e5afa3b051309240c26bf37fbc8fea285b4546ae747995a2/transformers-5.5.4-py3-none-any.whl", hash = "sha256:0bd6281b82966fe5a7a16f553ea517a9db1dee6284d7cb224dfd88fc0dd1c167", size = 10236696, upload-time = "2026-04-13T16:55:51.497Z" }, ] [[package]] @@ -6339,7 +6287,7 @@ wheels = [ [[package]] name = "typer" -version = "0.24.1" +version = "0.25.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc" }, @@ -6347,9 +6295,9 @@ dependencies = [ { name = "rich" }, { name = "shellingham" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f5/24/cb09efec5cc954f7f9b930bf8279447d24618bb6758d4f6adf2574c41780/typer-0.24.1.tar.gz", hash = "sha256:e39b4732d65fbdcde189ae76cf7cd48aeae72919dea1fdfc16593be016256b45", size = 118613, upload-time = "2026-02-21T16:54:40.609Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/51/9aed62104cea109b820bbd6c14245af756112017d309da813ef107d42e7e/typer-0.25.1.tar.gz", hash = "sha256:9616eb8853a09ffeabab1698952f33c6f29ffdbceb4eaeecf571880e8d7664cc", size = 122276, upload-time = "2026-04-30T19:32:16.964Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4a/91/48db081e7a63bb37284f9fbcefda7c44c277b18b0e13fbc36ea2335b71e6/typer-0.24.1-py3-none-any.whl", hash = "sha256:112c1f0ce578bfb4cab9ffdabc68f031416ebcc216536611ba21f04e9aa84c9e", size = 56085, upload-time = "2026-02-21T16:54:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/3f/f9/2b3ff4e56e5fa7debfaf9eb135d0da96f3e9a1d5b27222223c7296336e5f/typer-0.25.1-py3-none-any.whl", hash = "sha256:75caa44ed46a03fb2dab8808753ffacdbfea88495e74c85a28c5eefcf5f39c89", size = 58409, upload-time = "2026-04-30T19:32:18.271Z" }, ] [[package]] @@ -6388,11 +6336,11 @@ wheels = [ [[package]] name = "tzdata" -version = "2026.1" +version = "2026.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/19/f5/cd531b2d15a671a40c0f66cf06bc3570a12cd56eef98960068ebbad1bf5a/tzdata-2026.1.tar.gz", hash = "sha256:67658a1903c75917309e753fdc349ac0efd8c27db7a0cb406a25be4840f87f98", size = 197639, upload-time = "2026-04-03T11:25:22.002Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/19/1b9b0e29f30c6d35cb345486df41110984ea67ae69dddbc0e8a100999493/tzdata-2026.2.tar.gz", hash = "sha256:9173fde7d80d9018e02a662e168e5a2d04f87c41ea174b139fbef642eda62d10", size = 198254, upload-time = "2026-04-24T15:22:08.651Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/70/d460bd685a170790ec89317e9bd33047988e4bce507b831f5db771e142de/tzdata-2026.1-py2.py3-none-any.whl", hash = "sha256:4b1d2be7ac37ceafd7327b961aa3a54e467efbdb563a23655fbfe0d39cfc42a9", size = 348952, upload-time = "2026-04-03T11:25:20.313Z" }, + { url = "https://files.pythonhosted.org/packages/ce/e4/dccd7f47c4b64213ac01ef921a1337ee6e30e8c6466046018326977efd95/tzdata-2026.2-py2.py3-none-any.whl", hash = "sha256:bbe9af844f658da81a5f95019480da3a89415801f6cc966806612cc7169bffe7", size = 349321, upload-time = "2026-04-24T15:22:05.876Z" }, ] [[package]] @@ -6424,15 +6372,15 @@ wheels = [ [[package]] name = "uvicorn" -version = "0.44.0" +version = "0.46.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "h11" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5e/da/6eee1ff8b6cbeed47eeb5229749168e81eb4b7b999a1a15a7176e51410c9/uvicorn-0.44.0.tar.gz", hash = "sha256:6c942071b68f07e178264b9152f1f16dfac5da85880c4ce06366a96d70d4f31e", size = 86947, upload-time = "2026-04-06T09:23:22.826Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/93/041fca8274050e40e6791f267d82e0e2e27dd165627bd640d3e0e378d877/uvicorn-0.46.0.tar.gz", hash = "sha256:fb9da0926999cc6cb22dc7cd71a94a632f078e6ae47ff683c5c420750fb7413d", size = 88758, upload-time = "2026-04-23T07:16:00.151Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/23/a5bbd9600dd607411fa644c06ff4951bec3a4d82c4b852374024359c19c0/uvicorn-0.44.0-py3-none-any.whl", hash = "sha256:ce937c99a2cc70279556967274414c087888e8cec9f9c94644dfca11bd3ced89", size = 69425, upload-time = "2026-04-06T09:23:21.524Z" }, + { url = "https://files.pythonhosted.org/packages/31/a3/5b1562db76a5a488274b2332a97199b32d0442aca0ed193697fd47786316/uvicorn-0.46.0-py3-none-any.whl", hash = "sha256:bbebbcbed972d162afca128605223022bedd345b7bc7855ce66deb31487a9048", size = 70926, upload-time = "2026-04-23T07:15:58.355Z" }, ] [package.optional-dependencies] @@ -6480,7 +6428,7 @@ wheels = [ [[package]] name = "virtualenv" -version = "21.2.4" +version = "21.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "distlib" }, @@ -6488,9 +6436,9 @@ dependencies = [ { name = "platformdirs" }, { name = "python-discovery" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0c/98/3a7e644e19cb26133488caff231be390579860bbbb3da35913c49a1d0a46/virtualenv-21.2.4.tar.gz", hash = "sha256:b294ef68192638004d72524ce7ef303e9d0cf5a44c95ce2e54a7500a6381cada", size = 5850742, upload-time = "2026-04-14T22:15:31.438Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/0d/915c02c94d207b85580eb09bffab54438a709e7288524094fe781da526c2/virtualenv-21.3.1.tar.gz", hash = "sha256:c2305bc1fddeec40699b8370d13f8d431b0701f00ce895061ce493aeded4426b", size = 7613791, upload-time = "2026-05-05T01:34:31.402Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/27/8d/edd0bd910ff803c308ee9a6b7778621af0d10252219ad9f19ef4d4982a61/virtualenv-21.2.4-py3-none-any.whl", hash = "sha256:29d21e941795206138d0f22f4e45ff7050e5da6c6472299fb7103318763861ac", size = 5831232, upload-time = "2026-04-14T22:15:29.342Z" }, + { url = "https://files.pythonhosted.org/packages/b1/4f/f71e641e504111a5a74e3a20bc52d01bd86788b22699dd3fee1c63253cf6/virtualenv-21.3.1-py3-none-any.whl", hash = "sha256:d1a71cf58f2f9228fff23a1f6ec15d39785c6b32e03658d104974247145edd35", size = 7594539, upload-time = "2026-05-05T01:34:28.98Z" }, ] [[package]] @@ -6594,11 +6542,11 @@ wheels = [ [[package]] name = "wcwidth" -version = "0.6.0" +version = "0.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/35/a2/8e3becb46433538a38726c948d3399905a4c7cabd0df578ede5dc51f0ec2/wcwidth-0.6.0.tar.gz", hash = "sha256:cdc4e4262d6ef9a1a57e018384cbeb1208d8abbc64176027e2c2455c81313159", size = 159684, upload-time = "2026-02-06T19:19:40.919Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/ee/afaf0f85a9a18fe47a67f1e4422ed6cf1fe642f0ae0a2f81166231303c52/wcwidth-0.7.0.tar.gz", hash = "sha256:90e3a7ea092341c44b99562e75d09e4d5160fe7a3974c6fb842a101a95e7eed0", size = 182132, upload-time = "2026-05-02T16:04:12.653Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/68/5a/199c59e0a824a3db2b89c5d2dade7ab5f9624dbf6448dc291b46d5ec94d3/wcwidth-0.6.0-py3-none-any.whl", hash = "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad", size = 94189, upload-time = "2026-02-06T19:19:39.646Z" }, + { url = "https://files.pythonhosted.org/packages/41/52/e465037f5375f43533d1a80b6923955201596a99142ed524d77b571a1418/wcwidth-0.7.0-py3-none-any.whl", hash = "sha256:5d69154c429a82910e241c738cd0e2976fac8a2dd47a1a805f4afed1c0f136f2", size = 110825, upload-time = "2026-05-02T16:04:11.033Z" }, ] [[package]] @@ -6745,85 +6693,115 @@ wheels = [ [[package]] name = "xxhash" -version = "3.6.0" +version = "3.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/02/84/30869e01909fb37a6cc7e18688ee8bf1e42d57e7e0777636bd47524c43c7/xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6", size = 85160, upload-time = "2025-10-02T14:37:08.097Z" } +sdist = { url = "https://files.pythonhosted.org/packages/24/2f/e183a1b407002f5af81822bee18b61cdb94b8670208ef34734d8d2b8ebe9/xxhash-3.7.0.tar.gz", hash = "sha256:6cc4eefbb542a5d6ffd6d70ea9c502957c925e800f998c5630ecc809d6702bae", size = 82022, upload-time = "2026-04-25T11:10:32.553Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/07/d9412f3d7d462347e4511181dea65e47e0d0e16e26fbee2ea86a2aefb657/xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c", size = 32744, upload-time = "2025-10-02T14:34:34.622Z" }, - { url = "https://files.pythonhosted.org/packages/79/35/0429ee11d035fc33abe32dca1b2b69e8c18d236547b9a9b72c1929189b9a/xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204", size = 30816, upload-time = "2025-10-02T14:34:36.043Z" }, - { url = "https://files.pythonhosted.org/packages/b7/f2/57eb99aa0f7d98624c0932c5b9a170e1806406cdbcdb510546634a1359e0/xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490", size = 194035, upload-time = "2025-10-02T14:34:37.354Z" }, - { url = "https://files.pythonhosted.org/packages/4c/ed/6224ba353690d73af7a3f1c7cdb1fc1b002e38f783cb991ae338e1eb3d79/xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2", size = 212914, upload-time = "2025-10-02T14:34:38.6Z" }, - { url = "https://files.pythonhosted.org/packages/38/86/fb6b6130d8dd6b8942cc17ab4d90e223653a89aa32ad2776f8af7064ed13/xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa", size = 212163, upload-time = "2025-10-02T14:34:39.872Z" }, - { url = "https://files.pythonhosted.org/packages/ee/dc/e84875682b0593e884ad73b2d40767b5790d417bde603cceb6878901d647/xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0", size = 445411, upload-time = "2025-10-02T14:34:41.569Z" }, - { url = "https://files.pythonhosted.org/packages/11/4f/426f91b96701ec2f37bb2b8cec664eff4f658a11f3fa9d94f0a887ea6d2b/xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2", size = 193883, upload-time = "2025-10-02T14:34:43.249Z" }, - { url = "https://files.pythonhosted.org/packages/53/5a/ddbb83eee8e28b778eacfc5a85c969673e4023cdeedcfcef61f36731610b/xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9", size = 210392, upload-time = "2025-10-02T14:34:45.042Z" }, - { url = "https://files.pythonhosted.org/packages/1e/c2/ff69efd07c8c074ccdf0a4f36fcdd3d27363665bcdf4ba399abebe643465/xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e", size = 197898, upload-time = "2025-10-02T14:34:46.302Z" }, - { url = "https://files.pythonhosted.org/packages/58/ca/faa05ac19b3b622c7c9317ac3e23954187516298a091eb02c976d0d3dd45/xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374", size = 210655, upload-time = "2025-10-02T14:34:47.571Z" }, - { url = "https://files.pythonhosted.org/packages/d4/7a/06aa7482345480cc0cb597f5c875b11a82c3953f534394f620b0be2f700c/xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d", size = 414001, upload-time = "2025-10-02T14:34:49.273Z" }, - { url = "https://files.pythonhosted.org/packages/23/07/63ffb386cd47029aa2916b3d2f454e6cc5b9f5c5ada3790377d5430084e7/xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae", size = 191431, upload-time = "2025-10-02T14:34:50.798Z" }, - { url = "https://files.pythonhosted.org/packages/0f/93/14fde614cadb4ddf5e7cebf8918b7e8fac5ae7861c1875964f17e678205c/xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb", size = 30617, upload-time = "2025-10-02T14:34:51.954Z" }, - { url = "https://files.pythonhosted.org/packages/13/5d/0d125536cbe7565a83d06e43783389ecae0c0f2ed037b48ede185de477c0/xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c", size = 31534, upload-time = "2025-10-02T14:34:53.276Z" }, - { url = "https://files.pythonhosted.org/packages/54/85/6ec269b0952ec7e36ba019125982cf11d91256a778c7c3f98a4c5043d283/xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829", size = 27876, upload-time = "2025-10-02T14:34:54.371Z" }, - { url = "https://files.pythonhosted.org/packages/33/76/35d05267ac82f53ae9b0e554da7c5e281ee61f3cad44c743f0fcd354f211/xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec", size = 32738, upload-time = "2025-10-02T14:34:55.839Z" }, - { url = "https://files.pythonhosted.org/packages/31/a8/3fbce1cd96534a95e35d5120637bf29b0d7f5d8fa2f6374e31b4156dd419/xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1", size = 30821, upload-time = "2025-10-02T14:34:57.219Z" }, - { url = "https://files.pythonhosted.org/packages/0c/ea/d387530ca7ecfa183cb358027f1833297c6ac6098223fd14f9782cd0015c/xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6", size = 194127, upload-time = "2025-10-02T14:34:59.21Z" }, - { url = "https://files.pythonhosted.org/packages/ba/0c/71435dcb99874b09a43b8d7c54071e600a7481e42b3e3ce1eb5226a5711a/xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263", size = 212975, upload-time = "2025-10-02T14:35:00.816Z" }, - { url = "https://files.pythonhosted.org/packages/84/7a/c2b3d071e4bb4a90b7057228a99b10d51744878f4a8a6dd643c8bd897620/xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546", size = 212241, upload-time = "2025-10-02T14:35:02.207Z" }, - { url = "https://files.pythonhosted.org/packages/81/5f/640b6eac0128e215f177df99eadcd0f1b7c42c274ab6a394a05059694c5a/xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89", size = 445471, upload-time = "2025-10-02T14:35:03.61Z" }, - { url = "https://files.pythonhosted.org/packages/5e/1e/3c3d3ef071b051cc3abbe3721ffb8365033a172613c04af2da89d5548a87/xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d", size = 193936, upload-time = "2025-10-02T14:35:05.013Z" }, - { url = "https://files.pythonhosted.org/packages/2c/bd/4a5f68381939219abfe1c22a9e3a5854a4f6f6f3c4983a87d255f21f2e5d/xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7", size = 210440, upload-time = "2025-10-02T14:35:06.239Z" }, - { url = "https://files.pythonhosted.org/packages/eb/37/b80fe3d5cfb9faff01a02121a0f4d565eb7237e9e5fc66e73017e74dcd36/xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db", size = 197990, upload-time = "2025-10-02T14:35:07.735Z" }, - { url = "https://files.pythonhosted.org/packages/d7/fd/2c0a00c97b9e18f72e1f240ad4e8f8a90fd9d408289ba9c7c495ed7dc05c/xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42", size = 210689, upload-time = "2025-10-02T14:35:09.438Z" }, - { url = "https://files.pythonhosted.org/packages/93/86/5dd8076a926b9a95db3206aba20d89a7fc14dd5aac16e5c4de4b56033140/xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11", size = 414068, upload-time = "2025-10-02T14:35:11.162Z" }, - { url = "https://files.pythonhosted.org/packages/af/3c/0bb129170ee8f3650f08e993baee550a09593462a5cddd8e44d0011102b1/xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd", size = 191495, upload-time = "2025-10-02T14:35:12.971Z" }, - { url = "https://files.pythonhosted.org/packages/e9/3a/6797e0114c21d1725e2577508e24006fd7ff1d8c0c502d3b52e45c1771d8/xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799", size = 30620, upload-time = "2025-10-02T14:35:14.129Z" }, - { url = "https://files.pythonhosted.org/packages/86/15/9bc32671e9a38b413a76d24722a2bf8784a132c043063a8f5152d390b0f9/xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392", size = 31542, upload-time = "2025-10-02T14:35:15.21Z" }, - { url = "https://files.pythonhosted.org/packages/39/c5/cc01e4f6188656e56112d6a8e0dfe298a16934b8c47a247236549a3f7695/xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6", size = 27880, upload-time = "2025-10-02T14:35:16.315Z" }, - { url = "https://files.pythonhosted.org/packages/f3/30/25e5321c8732759e930c555176d37e24ab84365482d257c3b16362235212/xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702", size = 32956, upload-time = "2025-10-02T14:35:17.413Z" }, - { url = "https://files.pythonhosted.org/packages/9f/3c/0573299560d7d9f8ab1838f1efc021a280b5ae5ae2e849034ef3dee18810/xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db", size = 31072, upload-time = "2025-10-02T14:35:18.844Z" }, - { url = "https://files.pythonhosted.org/packages/7a/1c/52d83a06e417cd9d4137722693424885cc9878249beb3a7c829e74bf7ce9/xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54", size = 196409, upload-time = "2025-10-02T14:35:20.31Z" }, - { url = "https://files.pythonhosted.org/packages/e3/8e/c6d158d12a79bbd0b878f8355432075fc82759e356ab5a111463422a239b/xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f", size = 215736, upload-time = "2025-10-02T14:35:21.616Z" }, - { url = "https://files.pythonhosted.org/packages/bc/68/c4c80614716345d55071a396cf03d06e34b5f4917a467faf43083c995155/xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5", size = 214833, upload-time = "2025-10-02T14:35:23.32Z" }, - { url = "https://files.pythonhosted.org/packages/7e/e9/ae27c8ffec8b953efa84c7c4a6c6802c263d587b9fc0d6e7cea64e08c3af/xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1", size = 448348, upload-time = "2025-10-02T14:35:25.111Z" }, - { url = "https://files.pythonhosted.org/packages/d7/6b/33e21afb1b5b3f46b74b6bd1913639066af218d704cc0941404ca717fc57/xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee", size = 196070, upload-time = "2025-10-02T14:35:26.586Z" }, - { url = "https://files.pythonhosted.org/packages/96/b6/fcabd337bc5fa624e7203aa0fa7d0c49eed22f72e93229431752bddc83d9/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd", size = 212907, upload-time = "2025-10-02T14:35:28.087Z" }, - { url = "https://files.pythonhosted.org/packages/4b/d3/9ee6160e644d660fcf176c5825e61411c7f62648728f69c79ba237250143/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729", size = 200839, upload-time = "2025-10-02T14:35:29.857Z" }, - { url = "https://files.pythonhosted.org/packages/0d/98/e8de5baa5109394baf5118f5e72ab21a86387c4f89b0e77ef3e2f6b0327b/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292", size = 213304, upload-time = "2025-10-02T14:35:31.222Z" }, - { url = "https://files.pythonhosted.org/packages/7b/1d/71056535dec5c3177eeb53e38e3d367dd1d16e024e63b1cee208d572a033/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf", size = 416930, upload-time = "2025-10-02T14:35:32.517Z" }, - { url = "https://files.pythonhosted.org/packages/dc/6c/5cbde9de2cd967c322e651c65c543700b19e7ae3e0aae8ece3469bf9683d/xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033", size = 193787, upload-time = "2025-10-02T14:35:33.827Z" }, - { url = "https://files.pythonhosted.org/packages/19/fa/0172e350361d61febcea941b0cc541d6e6c8d65d153e85f850a7b256ff8a/xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec", size = 30916, upload-time = "2025-10-02T14:35:35.107Z" }, - { url = "https://files.pythonhosted.org/packages/ad/e6/e8cf858a2b19d6d45820f072eff1bea413910592ff17157cabc5f1227a16/xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8", size = 31799, upload-time = "2025-10-02T14:35:36.165Z" }, - { url = "https://files.pythonhosted.org/packages/56/15/064b197e855bfb7b343210e82490ae672f8bc7cdf3ddb02e92f64304ee8a/xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746", size = 28044, upload-time = "2025-10-02T14:35:37.195Z" }, - { url = "https://files.pythonhosted.org/packages/7e/5e/0138bc4484ea9b897864d59fce9be9086030825bc778b76cb5a33a906d37/xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e", size = 32754, upload-time = "2025-10-02T14:35:38.245Z" }, - { url = "https://files.pythonhosted.org/packages/18/d7/5dac2eb2ec75fd771957a13e5dda560efb2176d5203f39502a5fc571f899/xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405", size = 30846, upload-time = "2025-10-02T14:35:39.6Z" }, - { url = "https://files.pythonhosted.org/packages/fe/71/8bc5be2bb00deb5682e92e8da955ebe5fa982da13a69da5a40a4c8db12fb/xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3", size = 194343, upload-time = "2025-10-02T14:35:40.69Z" }, - { url = "https://files.pythonhosted.org/packages/e7/3b/52badfb2aecec2c377ddf1ae75f55db3ba2d321c5e164f14461c90837ef3/xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6", size = 213074, upload-time = "2025-10-02T14:35:42.29Z" }, - { url = "https://files.pythonhosted.org/packages/a2/2b/ae46b4e9b92e537fa30d03dbc19cdae57ed407e9c26d163895e968e3de85/xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063", size = 212388, upload-time = "2025-10-02T14:35:43.929Z" }, - { url = "https://files.pythonhosted.org/packages/f5/80/49f88d3afc724b4ac7fbd664c8452d6db51b49915be48c6982659e0e7942/xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7", size = 445614, upload-time = "2025-10-02T14:35:45.216Z" }, - { url = "https://files.pythonhosted.org/packages/ed/ba/603ce3961e339413543d8cd44f21f2c80e2a7c5cfe692a7b1f2cccf58f3c/xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b", size = 194024, upload-time = "2025-10-02T14:35:46.959Z" }, - { url = "https://files.pythonhosted.org/packages/78/d1/8e225ff7113bf81545cfdcd79eef124a7b7064a0bba53605ff39590b95c2/xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd", size = 210541, upload-time = "2025-10-02T14:35:48.301Z" }, - { url = "https://files.pythonhosted.org/packages/6f/58/0f89d149f0bad89def1a8dd38feb50ccdeb643d9797ec84707091d4cb494/xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0", size = 198305, upload-time = "2025-10-02T14:35:49.584Z" }, - { url = "https://files.pythonhosted.org/packages/11/38/5eab81580703c4df93feb5f32ff8fa7fe1e2c51c1f183ee4e48d4bb9d3d7/xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152", size = 210848, upload-time = "2025-10-02T14:35:50.877Z" }, - { url = "https://files.pythonhosted.org/packages/5e/6b/953dc4b05c3ce678abca756416e4c130d2382f877a9c30a20d08ee6a77c0/xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11", size = 414142, upload-time = "2025-10-02T14:35:52.15Z" }, - { url = "https://files.pythonhosted.org/packages/08/a9/238ec0d4e81a10eb5026d4a6972677cbc898ba6c8b9dbaec12ae001b1b35/xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5", size = 191547, upload-time = "2025-10-02T14:35:53.547Z" }, - { url = "https://files.pythonhosted.org/packages/f1/ee/3cf8589e06c2164ac77c3bf0aa127012801128f1feebf2a079272da5737c/xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f", size = 31214, upload-time = "2025-10-02T14:35:54.746Z" }, - { url = "https://files.pythonhosted.org/packages/02/5d/a19552fbc6ad4cb54ff953c3908bbc095f4a921bc569433d791f755186f1/xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad", size = 32290, upload-time = "2025-10-02T14:35:55.791Z" }, - { url = "https://files.pythonhosted.org/packages/b1/11/dafa0643bc30442c887b55baf8e73353a344ee89c1901b5a5c54a6c17d39/xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679", size = 28795, upload-time = "2025-10-02T14:35:57.162Z" }, - { url = "https://files.pythonhosted.org/packages/2c/db/0e99732ed7f64182aef4a6fb145e1a295558deec2a746265dcdec12d191e/xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4", size = 32955, upload-time = "2025-10-02T14:35:58.267Z" }, - { url = "https://files.pythonhosted.org/packages/55/f4/2a7c3c68e564a099becfa44bb3d398810cc0ff6749b0d3cb8ccb93f23c14/xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67", size = 31072, upload-time = "2025-10-02T14:35:59.382Z" }, - { url = "https://files.pythonhosted.org/packages/c6/d9/72a29cddc7250e8a5819dad5d466facb5dc4c802ce120645630149127e73/xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad", size = 196579, upload-time = "2025-10-02T14:36:00.838Z" }, - { url = "https://files.pythonhosted.org/packages/63/93/b21590e1e381040e2ca305a884d89e1c345b347404f7780f07f2cdd47ef4/xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b", size = 215854, upload-time = "2025-10-02T14:36:02.207Z" }, - { url = "https://files.pythonhosted.org/packages/ce/b8/edab8a7d4fa14e924b29be877d54155dcbd8b80be85ea00d2be3413a9ed4/xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b", size = 214965, upload-time = "2025-10-02T14:36:03.507Z" }, - { url = "https://files.pythonhosted.org/packages/27/67/dfa980ac7f0d509d54ea0d5a486d2bb4b80c3f1bb22b66e6a05d3efaf6c0/xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca", size = 448484, upload-time = "2025-10-02T14:36:04.828Z" }, - { url = "https://files.pythonhosted.org/packages/8c/63/8ffc2cc97e811c0ca5d00ab36604b3ea6f4254f20b7bc658ca825ce6c954/xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a", size = 196162, upload-time = "2025-10-02T14:36:06.182Z" }, - { url = "https://files.pythonhosted.org/packages/4b/77/07f0e7a3edd11a6097e990f6e5b815b6592459cb16dae990d967693e6ea9/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99", size = 213007, upload-time = "2025-10-02T14:36:07.733Z" }, - { url = "https://files.pythonhosted.org/packages/ae/d8/bc5fa0d152837117eb0bef6f83f956c509332ce133c91c63ce07ee7c4873/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3", size = 200956, upload-time = "2025-10-02T14:36:09.106Z" }, - { url = "https://files.pythonhosted.org/packages/26/a5/d749334130de9411783873e9b98ecc46688dad5db64ca6e04b02acc8b473/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6", size = 213401, upload-time = "2025-10-02T14:36:10.585Z" }, - { url = "https://files.pythonhosted.org/packages/89/72/abed959c956a4bfc72b58c0384bb7940663c678127538634d896b1195c10/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93", size = 417083, upload-time = "2025-10-02T14:36:12.276Z" }, - { url = "https://files.pythonhosted.org/packages/0c/b3/62fd2b586283b7d7d665fb98e266decadf31f058f1cf6c478741f68af0cb/xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518", size = 193913, upload-time = "2025-10-02T14:36:14.025Z" }, - { url = "https://files.pythonhosted.org/packages/9a/9a/c19c42c5b3f5a4aad748a6d5b4f23df3bed7ee5445accc65a0fb3ff03953/xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119", size = 31586, upload-time = "2025-10-02T14:36:15.603Z" }, - { url = "https://files.pythonhosted.org/packages/03/d6/4cc450345be9924fd5dc8c590ceda1db5b43a0a889587b0ae81a95511360/xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f", size = 32526, upload-time = "2025-10-02T14:36:16.708Z" }, - { url = "https://files.pythonhosted.org/packages/0f/c9/7243eb3f9eaabd1a88a5a5acadf06df2d83b100c62684b7425c6a11bcaa8/xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95", size = 28898, upload-time = "2025-10-02T14:36:17.843Z" }, + { url = "https://files.pythonhosted.org/packages/f2/8a/51a14cdef4728c6c2337db8a7d8704422cc65676d9199d77215464c880af/xxhash-3.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:082c87bfdd2b9f457606c7a4a53457f4c4b48b0cdc48de0277f4349d79bb3d7a", size = 33357, upload-time = "2026-04-25T11:06:20.44Z" }, + { url = "https://files.pythonhosted.org/packages/b9/1b/0c2c933809421ffd9bf42b59315552c143c755db5d9a816b2f1ae273e884/xxhash-3.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5e7ce913b61f35b0c1c839a49ac9c8e75dd8d860150688aed353b0ce1bf409d8", size = 30869, upload-time = "2026-04-25T11:06:21.989Z" }, + { url = "https://files.pythonhosted.org/packages/03/a8/89d5fdd6ee12d70ba99451de46dd0e8010167468dcd913ec855653f4dd50/xxhash-3.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3beb1de3b1e9694fcdd853e570ee64c631c7062435d2f8c69c1adf809bc086f0", size = 194100, upload-time = "2026-04-25T11:06:23.586Z" }, + { url = "https://files.pythonhosted.org/packages/87/ee/2f9f2ed993e77206d1e66991290a1ebe22e843351ca3ebec8e49e01ba186/xxhash-3.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3e7b689c3bce16699efcf736066f5c6cc4472c3840fe4b22bd8279daf4abdac", size = 212977, upload-time = "2026-04-25T11:06:25.019Z" }, + { url = "https://files.pythonhosted.org/packages/de/60/5a91644615a9e9d4e42c2e9925f1908e3a24e4e691d9de7340d565bea024/xxhash-3.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a6545e6b409e3d5cbafc850fb84c55a1ca26ed15a6b11e3bf07a0e0cd84517c8", size = 236373, upload-time = "2026-04-25T11:06:26.482Z" }, + { url = "https://files.pythonhosted.org/packages/22/c0/f3a9384eaaed9d14d4d062a5d953aa0da489bfe9747877aa994caa87cd0b/xxhash-3.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:31ab1461c77a11461d703c88eb949e132a1c6515933cf675d97ec680f4bd18de", size = 212229, upload-time = "2026-04-25T11:06:28.065Z" }, + { url = "https://files.pythonhosted.org/packages/2e/67/02f07a9fd79726804190f2172c4894c3ed9a4ebccaca05653c84beb58025/xxhash-3.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7c4d596b7676f811172687ec567cbafb9e4dea2f9be1bbb4f622410cb7f40f40", size = 445462, upload-time = "2026-04-25T11:06:30.048Z" }, + { url = "https://files.pythonhosted.org/packages/40/37/558f5a90c0672fc9b4402dc25d87ac5b7406616e8969430c9ca4e52ee74d/xxhash-3.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13805f0461cba0a857924e70ff91ae6d52d2598f79a884e788db80532614a4a1", size = 193932, upload-time = "2026-04-25T11:06:31.857Z" }, + { url = "https://files.pythonhosted.org/packages/d5/90/aaa09cd58661d32044dbbad7df55bbe22a623032b810e7ed3b8c569a2a6f/xxhash-3.7.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1d398f372496152f1c6933a33566373f8d1b37b98b8c9d608fa6edc0976f23b2", size = 284807, upload-time = "2026-04-25T11:06:33.697Z" }, + { url = "https://files.pythonhosted.org/packages/d6/f3/53df3719ab127a02c174f0c1c74924fcd110866e89c966bc7909cfa8fa84/xxhash-3.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d610aa62cdb7d4d497740741772a24a794903bf3e79eaa51d2e800082abe11e5", size = 210445, upload-time = "2026-04-25T11:06:35.488Z" }, + { url = "https://files.pythonhosted.org/packages/72/33/d219975c0e8b6fa2eb9ccd486fe47e21bf1847985b878dd2fbc3126e0d5c/xxhash-3.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:073c23900a9fbf3d26616c17c830db28af9803677cd5b33aea3224d824111514", size = 241273, upload-time = "2026-04-25T11:06:37.24Z" }, + { url = "https://files.pythonhosted.org/packages/3e/50/49b1afe610eb3964cedcb90a4d4c3d46a261ee8669cbd4f060652619ae3c/xxhash-3.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:418a463c3e6a590c0cdc890f8be19adb44a8c8acd175ca5b2a6de77e61d0b386", size = 197950, upload-time = "2026-04-25T11:06:39.148Z" }, + { url = "https://files.pythonhosted.org/packages/c6/75/5f42a1a4c78717d906a4b6a140c6dbf837ab1f547a54d23c4e2903310936/xxhash-3.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:03f8ff4474ee61c845758ce00711d7087a770d77efb36f7e74a6e867301000b8", size = 210709, upload-time = "2026-04-25T11:06:40.958Z" }, + { url = "https://files.pythonhosted.org/packages/8a/85/237e446c25abced71e9c53d269f2cef5bab8a82b3f88a12e00c5368e7368/xxhash-3.7.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:44fba4a5f1d179b7ddc7b3dc40f56f9209046421679b57025d4d8821b376fd8d", size = 275345, upload-time = "2026-04-25T11:06:42.525Z" }, + { url = "https://files.pythonhosted.org/packages/62/34/c2c26c0a6a9cc739bc2a5f0ae03ba8b87deb12b8bce35f7ac495e790dc6d/xxhash-3.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:31e3516a0f829d06ded4a2c0f3c7c5561993256bfa1c493975fb9dc7bfa828a1", size = 414056, upload-time = "2026-04-25T11:06:44.343Z" }, + { url = "https://files.pythonhosted.org/packages/a0/aa/5c58e9bc8071b8afd8dcf297ff362f723c4892168faba149f19904132bf4/xxhash-3.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b59ee2ac81de57771a09ecad09191e840a1d2fae1ef684208320591055768f83", size = 191485, upload-time = "2026-04-25T11:06:46.262Z" }, + { url = "https://files.pythonhosted.org/packages/d4/69/a929cf9d1e2e65a48b818cdce72cb6b69eab2e6877f21436d0a1942aff43/xxhash-3.7.0-cp312-cp312-win32.whl", hash = "sha256:74bbd92f8c7fcc397ba0a11bfdc106bc72ad7f11e3a60277753f87e7532b4d81", size = 30671, upload-time = "2026-04-25T11:06:48.039Z" }, + { url = "https://files.pythonhosted.org/packages/b9/1b/104b41a8947f4e1d4a66ce1e628eea752f37d1890bfd7453559ca7a3d950/xxhash-3.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:7bd7bc82dd4f185f28f35193c2e968ef46131628e3cac62f639dadf321cba4d1", size = 31514, upload-time = "2026-04-25T11:06:49.279Z" }, + { url = "https://files.pythonhosted.org/packages/98/a0/1fd0ea1f1b886d9e7c73f0397571e22333a7d79e31da6d7127c2a4a71d75/xxhash-3.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:7d7148180ec99ba36585b42c8c5de25e9b40191613bc4be68909b4d25a77a852", size = 27761, upload-time = "2026-04-25T11:06:50.448Z" }, + { url = "https://files.pythonhosted.org/packages/c1/ca/d5174b4c36d10f64d4ca7050563138c5a599efb01a765858ddefc9c1202a/xxhash-3.7.0-cp313-cp313-android_21_arm64_v8a.whl", hash = "sha256:4b6d6b33f141158692bd4eafbb96edbc5aa0dabdb593a962db01a91983d4f8fa", size = 36813, upload-time = "2026-04-25T11:06:51.73Z" }, + { url = "https://files.pythonhosted.org/packages/41/d0/abc6c9d347ba1f1e1e1d98125d0881a0452c7f9a76a9dd03a7b5d2197f23/xxhash-3.7.0-cp313-cp313-android_21_x86_64.whl", hash = "sha256:845d347df254d6c619f616afa921331bada8614b8d373d58725c663ba97c3605", size = 35121, upload-time = "2026-04-25T11:06:53.048Z" }, + { url = "https://files.pythonhosted.org/packages/bf/11/4cc834eb3d79f2f2b3a6ef7324195208bcdfbdcf7534d2b17267aa5f3a8f/xxhash-3.7.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:fddbbb69a6fff4f421e7a0d1fa28f894b20112e9e3fab306af451e2dfd0e459b", size = 29624, upload-time = "2026-04-25T11:06:54.311Z" }, + { url = "https://files.pythonhosted.org/packages/23/83/e97d3e7b635fe73a1dfb1e91f805324dd6d930bb42041cbf18f183bc0b6d/xxhash-3.7.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:54876a4e45101cec2bf8f31a973cda073a23e2e108538dad224ba07f85f22487", size = 30638, upload-time = "2026-04-25T11:06:55.864Z" }, + { url = "https://files.pythonhosted.org/packages/f4/40/d84951d80c35db1f4c40a29a64a8520eea5d56e764c603906b4fe763580f/xxhash-3.7.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:0c72fe9c7e3d6dfd7f1e21e224a877917fa09c465694ba4e06464b9511b65544", size = 33323, upload-time = "2026-04-25T11:06:57.336Z" }, + { url = "https://files.pythonhosted.org/packages/89/cc/c7dc6558d97e9ab023f663d69ab28b340ed9bf4d2d94f2c259cf896bb354/xxhash-3.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a6d73a830b17ef49bc04e00182bd839164c1b3c59c127cd7c54fcb10c7ed8ee8", size = 33362, upload-time = "2026-04-25T11:06:58.656Z" }, + { url = "https://files.pythonhosted.org/packages/2a/6e/46b84017b1301d54091430353d4ad5901654a3e0871649877a416f7f1644/xxhash-3.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:91c3b07cf3362086d8f126c6aecd8e5e9396ad8b2f2219ea7e49a8250c318acd", size = 30874, upload-time = "2026-04-25T11:06:59.834Z" }, + { url = "https://files.pythonhosted.org/packages/df/5e/8f9158e3ab906ad3fec51e09b5ea0093e769f12207bfa42a368ca204e7ab/xxhash-3.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:50e879ebbac351c81565ca108db766d7832f5b8b6a5b14b8c0151f7190028e3d", size = 194185, upload-time = "2026-04-25T11:07:01.658Z" }, + { url = "https://files.pythonhosted.org/packages/f3/29/a804ded9f5d3d3758292678d23e7528b08fda7b7e750688d08b052322475/xxhash-3.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:921c14e93817842dd0dd9f372890a0f0c72e534650b6ab13c5be5cd0db11d47e", size = 213033, upload-time = "2026-04-25T11:07:03.606Z" }, + { url = "https://files.pythonhosted.org/packages/8b/91/1ce5a7d2fdc975267320e2c78fc1cecfe7ab735ccbcf6993ec5dd541cb2c/xxhash-3.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e64a7c9d7dfca3e0fafcbc5e455519090706a3e36e95d655cec3e04e79f95aaa", size = 236140, upload-time = "2026-04-25T11:07:05.396Z" }, + { url = "https://files.pythonhosted.org/packages/34/04/fd595a4fd8617b05fa27bd9b684ecb4985bfed27917848eea85d54036d06/xxhash-3.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2220af08163baf5fa36c2b8af079dc2cbe6e66ae061385267f9472362dfd53c6", size = 212291, upload-time = "2026-04-25T11:07:06.966Z" }, + { url = "https://files.pythonhosted.org/packages/03/fb/f1a379cbc372ae5b9f4ab36154c48a849ca6ebe3ac477067a57865bf3bc6/xxhash-3.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f14bb8b22a4a91325813e3d553b8963c10cf8c756cff65ee50c194431296c655", size = 445532, upload-time = "2026-04-25T11:07:08.525Z" }, + { url = "https://files.pythonhosted.org/packages/65/59/172424b79f8cfd4b6d8a122b2193e6b8ad4b11f7159bb3b6f9b3191329bb/xxhash-3.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:496736f86a9bedaf64b0dc70e3539d0766df01c71ea22032698e88f3f04a1ce9", size = 193990, upload-time = "2026-04-25T11:07:10.315Z" }, + { url = "https://files.pythonhosted.org/packages/b9/19/aeac22161d953f139f07ba5586cb4a17c5b7b6dff985122803bb12933500/xxhash-3.7.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0ff71596bd79816975b3de7130ab1ff4541410285a3c084584eeb1c8239996fd", size = 284876, upload-time = "2026-04-25T11:07:12.15Z" }, + { url = "https://files.pythonhosted.org/packages/77/d5/4fd0b59e7a02242953da05ff679fbb961b0a4368eac97a217e11dae110c1/xxhash-3.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1ad86695c19b1d46fe106925db3c7a37f16be37669dcf58dcc70a9dd6e324676", size = 210495, upload-time = "2026-04-25T11:07:13.952Z" }, + { url = "https://files.pythonhosted.org/packages/aa/fb/976a3165c728c7faf74aa1b5ab3cf6a85e6d731612894741840524c7d28c/xxhash-3.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:970f9f8c50961d639cbd0d988c96f80ddf66006de93641719282c4fe7a87c5e6", size = 241331, upload-time = "2026-04-25T11:07:15.557Z" }, + { url = "https://files.pythonhosted.org/packages/4a/2c/6763d5901d53ac9e6ba296e5717ae599025c9d268396e8faa8b4b0a8e0ac/xxhash-3.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5886ad85e9e347911783760a1d16cb6b393e8f9e3b52c982568226cb56927bdc", size = 198037, upload-time = "2026-04-25T11:07:17.563Z" }, + { url = "https://files.pythonhosted.org/packages/61/2b/876e722d533833f5f9a83473e6ba993e48745701096944e77bbecf29b2c3/xxhash-3.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6e934bbae1e0ec74e27d5f0d7f37ef547ce5ff9f0a7e63fb39e559fc99526734", size = 210744, upload-time = "2026-04-25T11:07:19.055Z" }, + { url = "https://files.pythonhosted.org/packages/21/e6/d7e7baef7ce24166b4668d3c48557bb35a23b92ecadcac7e7718d099ab69/xxhash-3.7.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:3b6b3d28228af044ebcded71c4a3dd86e1dbd7e2f4645bf40f7b5da65bb5fb5a", size = 275406, upload-time = "2026-04-25T11:07:20.908Z" }, + { url = "https://files.pythonhosted.org/packages/92/fe/198b3763b2e01ca908f2154969a2352ec99bda892b574a11a9a151c5ede4/xxhash-3.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:6be4d70d9ab76c9f324ead9c01af6ff52c324745ea0c3731682a0cf99720f1fe", size = 414125, upload-time = "2026-04-25T11:07:23.037Z" }, + { url = "https://files.pythonhosted.org/packages/3a/6d/019a11affd5a5499137cacca53808659964785439855b5aa40dfd3412916/xxhash-3.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:151d7520838d4465461a0b7f4ae488b3b00de16183dd3214c1a6b14bf89d7fb6", size = 191555, upload-time = "2026-04-25T11:07:24.991Z" }, + { url = "https://files.pythonhosted.org/packages/76/21/b96d58568df2d01533244c3e0e5cbdd0c8b2b25c4bec4d72f19259a292d7/xxhash-3.7.0-cp313-cp313-win32.whl", hash = "sha256:d798c1e291bffb8e37b5bbe0dda77fc767cd19e89cadaf66e6ed5d0ff88c9fe6", size = 30668, upload-time = "2026-04-25T11:07:26.665Z" }, + { url = "https://files.pythonhosted.org/packages/99/57/d849a8d3afa1f8f4bc6a831cd89f49f9706fbbad94d2975d6140a171988c/xxhash-3.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:875811ba23c543b1a1c3143c926e43996eb27ebb8f52d3500744aa608c275aed", size = 31524, upload-time = "2026-04-25T11:07:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/81/52/bacc753e92dee78b058af8dcef0a50815f5f860986c664a92d75f965b6a5/xxhash-3.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:54a675cb300dda83d71daae2a599389d22db8021a0f8db0dd659e14626eb3ecc", size = 27768, upload-time = "2026-04-25T11:07:29.113Z" }, + { url = "https://files.pythonhosted.org/packages/1c/47/ddbd683b7fc7e592c1a8d9d65f73ce9ab513f082b3967eee2baf549b8fc6/xxhash-3.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a3b19a42111c4057c1547a4a1396a53961dca576a0f6b82bfa88a2d1561764b2", size = 33576, upload-time = "2026-04-25T11:07:30.469Z" }, + { url = "https://files.pythonhosted.org/packages/07/f2/36d3310161db7f72efb4562aadde0ed429f1d0531782dd6345b12d2da527/xxhash-3.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8f4608a06e4d61b7a3425665a46d00e0579122e1a2fae97a0c52953a3aad9aa3", size = 31123, upload-time = "2026-04-25T11:07:31.989Z" }, + { url = "https://files.pythonhosted.org/packages/0d/3f/75937a5c69556ed213021e43cbedd84c8e0279d0d74e7d41a255d84ba4b1/xxhash-3.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ad37c7792479e49cf96c1ab25517d7003fe0d93687a772ba19a097d235bbe41e", size = 196491, upload-time = "2026-04-25T11:07:33.358Z" }, + { url = "https://files.pythonhosted.org/packages/22/29/f10d7ff8c7a733d4403a43b9de18c8fabc005f98cec054644f04418659ee/xxhash-3.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc026e3b89d98e30a8288c95cb696e77d150b3f0fb7a51f73dcd49ee6b5577fa", size = 215793, upload-time = "2026-04-25T11:07:34.919Z" }, + { url = "https://files.pythonhosted.org/packages/8b/fd/778f60aa295f58907938f030a8b514611f391405614a525cccd2ffc00eb5/xxhash-3.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c9b31ab1f28b078a6a1ac1a54eb35e7d5390deddd56870d0be3a0a733d1c321c", size = 237993, upload-time = "2026-04-25T11:07:36.638Z" }, + { url = "https://files.pythonhosted.org/packages/70/f5/736db5de387b4a540e37a05b84b40dc58a1ce974bfd2b4e5754ce29b68c3/xxhash-3.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3bb5fd680c038fd5229e44e9c493782f90df9bef632fd0499d442374688ff70b", size = 214887, upload-time = "2026-04-25T11:07:38.564Z" }, + { url = "https://files.pythonhosted.org/packages/4d/aa/09a095f22fdb9a27fbb716841fbff52119721f9ca4261952d07a912f7839/xxhash-3.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:030c0fd688fce3569fbb49a2feefd4110cbb0b650186fb4610759ecfac677548", size = 448407, upload-time = "2026-04-25T11:07:40.552Z" }, + { url = "https://files.pythonhosted.org/packages/74/8a/b745efeeca9e34a91c26fdc97ad8514c43d5a81ac78565cba80a1353870a/xxhash-3.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5b1bde10324f4c31812ae0d0502e92d916ae8917cad7209353f122b8b8f610c3", size = 196119, upload-time = "2026-04-25T11:07:42.101Z" }, + { url = "https://files.pythonhosted.org/packages/8a/5c/0cfceb024af90c191f665c7933b1f318ee234f4797858383bebd1881d52f/xxhash-3.7.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:503722d52a615f2604f5e7611de7d43878df010dc0053094ef91cb9a9ac3d987", size = 286751, upload-time = "2026-04-25T11:07:43.568Z" }, + { url = "https://files.pythonhosted.org/packages/0b/0a/0793e405dc3cf8f4ebe2c1acec1e4e4608cd9e7e50ea691dabbc2a95ccbb/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c72500a3b6d6c30ebfc135035bcace9eb5884f2dc220804efcaaba43e9f611dd", size = 212961, upload-time = "2026-04-25T11:07:45.388Z" }, + { url = "https://files.pythonhosted.org/packages/0c/7e/721118ffc63bfff94aa565bcf2555a820f9f4bdb0f001e0d609bdfad70de/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:43475925a766d01ca8cd9a857fd87f3d50406983c8506a4c07c4df12adcc867f", size = 243703, upload-time = "2026-04-25T11:07:47.053Z" }, + { url = "https://files.pythonhosted.org/packages/6e/18/16f6267160488b8276fd3d449d425712512add292ba545c1b6946bfdb7dd/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8d09dfd2ab135b985daf868b594315ebe11ad86cd9fea46e6c69f19b28f7d25a", size = 200894, upload-time = "2026-04-25T11:07:48.657Z" }, + { url = "https://files.pythonhosted.org/packages/2d/94/80ba841287fd97e3e9cac1d228788c8ef623746f570404961eec748ecb5c/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c50269d0055ac1faecfd559886d2cbe4b730de236585aba0e873f9d9dadbe585", size = 213357, upload-time = "2026-04-25T11:07:50.257Z" }, + { url = "https://files.pythonhosted.org/packages/a1/7e/106d4067130c59f1e18a55ffadcd876d8c68534883a1e02685b29d3d8153/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:1910df4756a5ab58cfad8744fc2d0f23926e3efcc346ee76e87b974abab922f4", size = 277600, upload-time = "2026-04-25T11:07:51.745Z" }, + { url = "https://files.pythonhosted.org/packages/c5/86/a081dd30da71d720b2612a792bfd55e45fa9a07ac76a0507f60487473c25/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d006faf3b491957efcb433489be3c149efe4787b7063d5cddb8ddaefdc60e0c1", size = 416980, upload-time = "2026-04-25T11:07:53.504Z" }, + { url = "https://files.pythonhosted.org/packages/35/29/1a95221a029a3c1293773869e1ab47b07cbbdd82444a42809e8c60156626/xxhash-3.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:abb65b4e947e958f7b3b0d71db3ce447d1bc5f37f5eab871ce7223bda8768a04", size = 193840, upload-time = "2026-04-25T11:07:55.103Z" }, + { url = "https://files.pythonhosted.org/packages/c5/e0/db909dd0823285de2286f67e10ee4d81e96ad35d7d8e964ecb07fccd8af9/xxhash-3.7.0-cp313-cp313t-win32.whl", hash = "sha256:178959906cb1716a1ce08e0d69c82886c70a15a6f2790fc084fdd146ca30cd49", size = 30966, upload-time = "2026-04-25T11:07:56.524Z" }, + { url = "https://files.pythonhosted.org/packages/7b/ff/d705b15b22f21ee106adce239cb65d35067a158c630b240270f09b17c2e6/xxhash-3.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2524a1e20d4c231d13b50f7cf39e44265b055669a64a7a4b9a2a44faa03f19b6", size = 31784, upload-time = "2026-04-25T11:07:57.758Z" }, + { url = "https://files.pythonhosted.org/packages/a2/1f/b2cf83c3638fd0588e0b17f22e5a9400bdfb1a3e3755324ac0aee2250b88/xxhash-3.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:37d994d0ffe81ef087bb330d392caa809bb5853c77e22ea3f71db024a0543dba", size = 27932, upload-time = "2026-04-25T11:07:59.109Z" }, + { url = "https://files.pythonhosted.org/packages/0e/cc/431db584f6fbb9312e40a173af027644e5580d39df1f73603cbb9dca4d6b/xxhash-3.7.0-cp314-cp314-android_24_arm64_v8a.whl", hash = "sha256:8c5fcfd806c335bfa2adf1cd0b3110a44fc7b6995c3a648c27489bae85801465", size = 36644, upload-time = "2026-04-25T11:08:00.658Z" }, + { url = "https://files.pythonhosted.org/packages/bc/01/255ec513e0a705d1f9a61413e78dfce4e3235203f0ed525a24c2b4b56345/xxhash-3.7.0-cp314-cp314-android_24_x86_64.whl", hash = "sha256:506a0b488f190f0a06769575e30caf71615c898ed93ab18b0dbcb6dec5c3713c", size = 35003, upload-time = "2026-04-25T11:08:02.338Z" }, + { url = "https://files.pythonhosted.org/packages/68/70/c55fc33c93445b44d8fc5a17b41ed99e3cebe92bcf8396809e63fc9a1165/xxhash-3.7.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:ec68dbba21532c0173a9872298e65c89749f7c9d21538c3a78b5bb6105871568", size = 29655, upload-time = "2026-04-25T11:08:03.701Z" }, + { url = "https://files.pythonhosted.org/packages/c2/72/ff8de73df000d74467d12a59ce6d6e2b2a368b978d41ab7b1fba5ed442be/xxhash-3.7.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:fa77e7ec1450d415d20129961814787c9abd9a07f98872f070b1fe96c5084611", size = 30664, upload-time = "2026-04-25T11:08:05.011Z" }, + { url = "https://files.pythonhosted.org/packages/b6/91/08416d9bd9bc3bf39d831abe8a5631ac2db5141dfd6fe81c3fe59a1f9264/xxhash-3.7.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:fe32736295ea38e43e7d9424053c8c47c9f64fecfc7c895fb3da9b30b131c9ee", size = 33317, upload-time = "2026-04-25T11:08:06.413Z" }, + { url = "https://files.pythonhosted.org/packages/0e/3b/86b1caa4dee10a99f4bf9521e623359341c5e50d05158fa10c275b2bd079/xxhash-3.7.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:ab9dd2c83c4bbd63e422181a76f13502d049d3ddcac9a1bdc29196263d692bb8", size = 33457, upload-time = "2026-04-25T11:08:08.099Z" }, + { url = "https://files.pythonhosted.org/packages/ed/38/98ea14ad1517e1461292a65906951458d520689782bfbae111050145bdba/xxhash-3.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3afec3a336a2286601a437cb07562ab0227685e6fbb9ec17e8c18457ff348ecf", size = 30894, upload-time = "2026-04-25T11:08:09.429Z" }, + { url = "https://files.pythonhosted.org/packages/61/a2/074654d0b893606541199993c7db70067d9fc63b748e0d60020a52a1bd36/xxhash-3.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:565df64437a9390f84465dcca33e7377114c7ede8d05cd2cf20081f831ea788e", size = 194409, upload-time = "2026-04-25T11:08:10.91Z" }, + { url = "https://files.pythonhosted.org/packages/e2/26/6d2a1afc468189f77ca28c32e1c83e1b9da1178231e05641dbc1b350e332/xxhash-3.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:12eca820a5d558633d423bf8bb78ce72a55394823f64089247f788a7e0ae691e", size = 213135, upload-time = "2026-04-25T11:08:12.575Z" }, + { url = "https://files.pythonhosted.org/packages/8e/0e/d8aecf95e09c42547453137be74d2f7b8b14e08f5177fa2fab6144a19061/xxhash-3.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f262b8f7599516567e070abf607b9af649052b2c4bd6f9be02b0cb41b7024805", size = 236379, upload-time = "2026-04-25T11:08:14.206Z" }, + { url = "https://files.pythonhosted.org/packages/f2/74/8140e8210536b3dd0cc816c4faaeb5ba6e63e8125ab25af4bcddd6a037b3/xxhash-3.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1598916cb197681e03e601901e4ab96a9a963de398c59d0964f8a6f44a2b361", size = 212447, upload-time = "2026-04-25T11:08:15.79Z" }, + { url = "https://files.pythonhosted.org/packages/a0/d2/462001d2903b4bee5a5689598a0a55e5e7cd1ac7f4247a5545cff10d3ebb/xxhash-3.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:322b2f0622230f526aeb1738149948a7ae357a9e2ceb1383c6fd1fdaecdafa16", size = 445660, upload-time = "2026-04-25T11:08:17.441Z" }, + { url = "https://files.pythonhosted.org/packages/23/09/2bd1ed7f8689b20e51727952cac8329d50c694dc32b2eba06ba5bc742b37/xxhash-3.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:24cc22070880cc57b830a65cde4e65fa884c6d9b28ae4803b5ee05911e7bafba", size = 194076, upload-time = "2026-04-25T11:08:19.134Z" }, + { url = "https://files.pythonhosted.org/packages/c9/6e/692302cd0a5f4ac4e6289f37fa888dc2e1e07750b68fe3e4bfe939b8cea3/xxhash-3.7.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb5a888a968b2434abf9ecda357b5d43f10d7b5a6da6fdbbe036208473aff0e2", size = 284990, upload-time = "2026-04-25T11:08:20.618Z" }, + { url = "https://files.pythonhosted.org/packages/05/d9/e54b159b3d9df7999d2a7c676ce7b323d1b5588a64f8f51ed8172567bd87/xxhash-3.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a999771ff97bec27d18341be4f3a36b163bb1ac41ec17bef6d2dabd84acd33c7", size = 210590, upload-time = "2026-04-25T11:08:22.24Z" }, + { url = "https://files.pythonhosted.org/packages/50/93/0e0df1a3a196ced4ca71de76d65ead25d8e87bbfb87b64306ea47a40c00d/xxhash-3.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:ed4a6efe2dee1655adb73e7ad40c6aa955a6892422b1e3b95de6a34de56e3cbb", size = 241442, upload-time = "2026-04-25T11:08:23.844Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a9/d917a7a814e90b218f8a0d37967105eea91bf752c3303683c99a1f7bfc1f/xxhash-3.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9fd17f14ac0faa12126c2f9ca774a8cf342957265ec3c8669c144e5e6cdb478c", size = 198356, upload-time = "2026-04-25T11:08:25.99Z" }, + { url = "https://files.pythonhosted.org/packages/89/5e/f2ba1877c39469abbefc72991d6ebdcbd4c0880db01ae8cb1f553b0c537d/xxhash-3.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:05fd1254268c59b5cb2a029dfc204275e9fc52de2913f1e53aa8d01442c96b4d", size = 210898, upload-time = "2026-04-25T11:08:27.608Z" }, + { url = "https://files.pythonhosted.org/packages/90/c6/be56b58e73de531f39a10de1355bb77ceb663900dc4bf2d6d3002a9c3f9e/xxhash-3.7.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:a2eae53197c6276d5b317f75a1be226bbf440c20b58bf525f36b5d0e1f657ca6", size = 275519, upload-time = "2026-04-25T11:08:29.301Z" }, + { url = "https://files.pythonhosted.org/packages/92/e2/17ddc85d5765b9c709f192009ed8f5a1fc876f4eb35bba7c307b5b1169f9/xxhash-3.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:bfe6f92e3522dcbe8c4281efd74fa7542a336cb00b0e3272c4ec0edabeaeaf67", size = 414191, upload-time = "2026-04-25T11:08:31.16Z" }, + { url = "https://files.pythonhosted.org/packages/9c/42/85f5b79f4bf1ec7ba052491164adfd4f4e9519f5dc7246de4fbd64a1bd56/xxhash-3.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7ab9a49c410d8c6c786ab99e79c529938d894c01433130353dd0fe999111077a", size = 191604, upload-time = "2026-04-25T11:08:32.862Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d0/6127b623aa4cca18d8b7743592b048d689fd6c6e37ff26a22cddf6cd9d7f/xxhash-3.7.0-cp314-cp314-win32.whl", hash = "sha256:040ea63668f9185b92bc74942df09c7e65703deed71431333678fc6e739a9955", size = 31271, upload-time = "2026-04-25T11:08:34.651Z" }, + { url = "https://files.pythonhosted.org/packages/64/4f/44fc4788568004c43921701cbc127f48218a1eede2c9aea231115323564d/xxhash-3.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2a61e2a3fb23c892496d587b470dee7fa1b58b248a187719c65ea8e94ec13257", size = 32284, upload-time = "2026-04-25T11:08:35.987Z" }, + { url = "https://files.pythonhosted.org/packages/6d/77/18bb895eb60a49453d16e17d67990e5caff557c78eafc90ad4e2eabf4570/xxhash-3.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:c7741c7524961d8c0cb4d4c21b28957ff731a3fd5b5cd8b856dc80a40e9e5acc", size = 28701, upload-time = "2026-04-25T11:08:37.767Z" }, + { url = "https://files.pythonhosted.org/packages/45/a0/46f72244570c550fbbb7db1ef554183dd5ebe9136385f30e032b781ae8f6/xxhash-3.7.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:fc84bf7aa7592f31ec63a3e7b11d624f468a3f19f5238cec7282a42e838ab1d7", size = 33646, upload-time = "2026-04-25T11:08:39.109Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3a/453846a7eceea11e75def361eed01ec6a0205b9822c19927ed364ccae7cc/xxhash-3.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9f1563fdc8abfc389748e6932c7e4e99c89a53e4ec37d4563c24fc06f5e5644b", size = 31125, upload-time = "2026-04-25T11:08:40.467Z" }, + { url = "https://files.pythonhosted.org/packages/bd/3e/49434aba738885d512f9e486db1bdd19db28dfa40372b56da26ef7a4e738/xxhash-3.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2d415f18becf6f153046ab6adc97da77e3643a0ee205dae61c4012604113a020", size = 196633, upload-time = "2026-04-25T11:08:41.943Z" }, + { url = "https://files.pythonhosted.org/packages/a4/e9/006cb6127baeb9f8abe6d15e62faa01349f09b34e2bfd65175b2422d026b/xxhash-3.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bb16aa13ed175bc9be5c2491ba031b85a9b51c4ed90e0b3d4ebe63cf3fb54f8e", size = 215899, upload-time = "2026-04-25T11:08:43.645Z" }, + { url = "https://files.pythonhosted.org/packages/27/e4/cc57d72e66df0ae29b914335f1c6dcf61e8f3746ddf0ae3c471aa4f15e00/xxhash-3.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f9fd595f1e5941b3d7863e4774e4b30caa6731fc34b9277da032295aa5656ee5", size = 238116, upload-time = "2026-04-25T11:08:45.698Z" }, + { url = "https://files.pythonhosted.org/packages/af/78/3531d4a3fd8a0038cc6be1f265a69c1b3587f557a10b677dd736de2202c1/xxhash-3.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1295325c5a98d552333fa53dc2b026b0ef0ec9c8e73ca3a952990b4c7d65d459", size = 215012, upload-time = "2026-04-25T11:08:47.355Z" }, + { url = "https://files.pythonhosted.org/packages/b4/f6/259fb1eaaec921f59b17203b0daee69829761226d3b980d5191d7723dd83/xxhash-3.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3573a651d146912da9daa9e29e5fbc45994420daaa9ef1e2fa5823e1dc485513", size = 448534, upload-time = "2026-04-25T11:08:49.149Z" }, + { url = "https://files.pythonhosted.org/packages/7b/16/a66d0eaf6a7e68532c07714361ddc904c663ec940f3b028c1ae4a21a7b9d/xxhash-3.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ec1e080a3d02d94ea9335bfab0e3374b877e25411422c18f51a943fa4b46381", size = 196217, upload-time = "2026-04-25T11:08:50.805Z" }, + { url = "https://files.pythonhosted.org/packages/8d/ef/d2efc7fc51756dc52509109d1a25cefc859d74bc4b19a167b12dbd8c2786/xxhash-3.7.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:84415265192072d8638a3afc3c1bc5995e310570cd9acb54dc46d3939e364fe0", size = 286906, upload-time = "2026-04-25T11:08:52.418Z" }, + { url = "https://files.pythonhosted.org/packages/fc/67/25decd1d4a4018582ec4db2a868a2b7e40640f4adb20dfeb19ac923aa825/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8d4dea659b57443989ef32f4295104fd6912c73d0bf26d1d148bb88a9f159b02", size = 213057, upload-time = "2026-04-25T11:08:54.105Z" }, + { url = "https://files.pythonhosted.org/packages/0d/5d/17651eb29d06786cdc40c60ae3d27d645aa5d61d2eca6237a7ba0b94789b/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:05ece0fe4d9c9c2728912d1981ae1566cfc83a011571b24732cbf76e1fb70dca", size = 243886, upload-time = "2026-04-25T11:08:56.109Z" }, + { url = "https://files.pythonhosted.org/packages/8a/d4/174d9cf7502243d586e6a9ae842b1ae23026620995114f85f1380e588bc9/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:fd880353cf1ffaf321bc18dd663e111976dbd0d3bbd8a66d58d2b470dfa7f396", size = 201015, upload-time = "2026-04-25T11:08:57.777Z" }, + { url = "https://files.pythonhosted.org/packages/91/8c/2254e2d06c3ac5e6fe22eaf3da791b87ea823ae9f2c17b4af66755c5752d/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:4e15cc9e2817f6481160f930c62842b3ff419e20e13072bcbab12230943092bc", size = 213457, upload-time = "2026-04-25T11:08:59.826Z" }, + { url = "https://files.pythonhosted.org/packages/79/a2/e3daa762545921173e3360f3b4ff7fc63c2d27359f7230ec1a7a74e117f6/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:90b9d1a8bd37d768ffc92a1f651ec69afc532a96fa1ac2ea7abbed5d630b3237", size = 277738, upload-time = "2026-04-25T11:09:01.423Z" }, + { url = "https://files.pythonhosted.org/packages/e1/4c/e186da2c46b87f5204640e008d42730bf3c1ee9f0efb71ae1ebcdfeac681/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:157c49475b34ecea8809e51123d9769a534e139d1247942f7a4bc67710bb2533", size = 417127, upload-time = "2026-04-25T11:09:03.592Z" }, + { url = "https://files.pythonhosted.org/packages/17/28/3798e15007a3712d0da3d3fe70f8e11916569858b5cc371053bc26270832/xxhash-3.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5a6ddec83325685e729ca119d1f5c518ec39294212ecd770e60693cdc5f7eb79", size = 193962, upload-time = "2026-04-25T11:09:06.228Z" }, + { url = "https://files.pythonhosted.org/packages/ad/95/a26baa93b5241fd7630998816a4ec47a5a0bad193b3f8fc8f3593e1a4a67/xxhash-3.7.0-cp314-cp314t-win32.whl", hash = "sha256:a04a6cab47e2166435aaf5b9e5ee41d1532cc8300efdef87f2a4d0acb7db19ed", size = 31643, upload-time = "2026-04-25T11:09:08.153Z" }, + { url = "https://files.pythonhosted.org/packages/44/36/5454f13c447e395f9b06a3e91274c59f503d31fad84e1836efe3bdb71f6a/xxhash-3.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8653dd7c2eda020545bb2c71c7f7039b53fe7434d0fc1a0a9deb79ab3f1a4fc1", size = 32522, upload-time = "2026-04-25T11:09:09.534Z" }, + { url = "https://files.pythonhosted.org/packages/74/35/698e7e3ff38e22992ea24870a511d8762474fb6783627a2910ff22a185c2/xxhash-3.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:468f0fc114faaa4b36699f8e328bbc3bb11dc418ba94ac52c26dd736d4b6c637", size = 28807, upload-time = "2026-04-25T11:09:11.234Z" }, ] [[package]]