From b1d162f333d4a7e15151e499234e9c3a8e5136ab Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 12:12:03 +0100 Subject: [PATCH 01/12] fix(policies): add device back to smolvlm expert (#2662) --- src/lerobot/policies/smolvla/modeling_smolvla.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py index cce41def8..485d3e4e5 100644 --- a/src/lerobot/policies/smolvla/modeling_smolvla.py +++ b/src/lerobot/policies/smolvla/modeling_smolvla.py @@ -527,6 +527,7 @@ class VLAFlowMatching(nn.Module): num_vlm_layers=self.config.num_vlm_layers, self_attn_every_n_layers=self.config.self_attn_every_n_layers, expert_width_multiplier=self.config.expert_width_multiplier, + device=self.config.device if self.config.device is not None else "auto", ) self.state_proj = nn.Linear( self.config.max_state_dim, self.vlm_with_expert.config.text_config.hidden_size From b303d1ab38f81fdc4606b53dbfb2f9c07989c51b Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 14:14:23 +0100 Subject: [PATCH 02/12] feat(scripts): add more info to lerobot-info (#2663) --- src/lerobot/scripts/lerobot_info.py | 44 ++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/src/lerobot/scripts/lerobot_info.py b/src/lerobot/scripts/lerobot_info.py index 9b49cad18..879d392be 100644 --- a/src/lerobot/scripts/lerobot_info.py +++ b/src/lerobot/scripts/lerobot_info.py @@ -27,6 +27,25 @@ lerobot-info import importlib import platform +import shutil +import subprocess +from importlib.metadata import PackageNotFoundError, distribution + +PACKAGE_NAME = "lerobot" + + +def get_ffmpeg_version() -> str: + """Get the ffmpeg version if installed, otherwise return 'N/A'.""" + command_path = shutil.which("ffmpeg") + if command_path is None: + return "N/A" + try: + result = subprocess.run([command_path, "-version"], capture_output=True, text=True, check=True) + first_line = result.stdout.splitlines()[0] + version_info = first_line.split(" ")[2] + return version_info + except (subprocess.SubprocessError, IndexError): + return "Installed (version parsing failed)" def get_package_version(package_name: str) -> str: @@ -38,16 +57,17 @@ def get_package_version(package_name: str) -> str: return "N/A" -def get_sys_info() -> dict: +def get_sys_info() -> dict[str, str]: """Run this to get basic system info to help for tracking issues & bugs.""" # General package versions info = { - "lerobot version": get_package_version("lerobot"), + "LeRobot version": get_package_version(PACKAGE_NAME), "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface Hub version": get_package_version("huggingface_hub"), "Datasets version": get_package_version("datasets"), "Numpy version": get_package_version("numpy"), + "FFmpeg version": get_ffmpeg_version(), } # PyTorch and GPU specific information @@ -58,10 +78,10 @@ def get_sys_info() -> dict: try: import torch - torch_version = torch.__version__ + torch_version = str(torch.__version__) torch_cuda_available = torch.cuda.is_available() if torch_cuda_available: - cuda_version = torch.version.cuda + cuda_version = str(torch.version.cuda) # Gets the name of the first available GPU gpu_model = torch.cuda.get_device_name(0) except ImportError: @@ -71,24 +91,34 @@ def get_sys_info() -> dict: info.update( { "PyTorch version": torch_version, - "Is PyTorch built with CUDA support?": torch_cuda_available, + "Is PyTorch built with CUDA support?": str(torch_cuda_available), "Cuda version": cuda_version, "GPU model": gpu_model, "Using GPU in script?": "", } ) + scripts = "N/A" + try: + dist = distribution(PACKAGE_NAME) + scripts = [ep.name for ep in dist.entry_points if ep.group == "console_scripts"] + except PackageNotFoundError: + pass + + info.update({f"{PACKAGE_NAME} scripts": str(scripts)}) return info -def format_dict_for_markdown(d: dict) -> str: +def format_dict_for_markdown(d: dict[str, str]) -> str: """Formats a dictionary into a markdown-friendly bulleted list.""" return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) def main(): + """ + Main function to print system info in markdown format. + """ system_info = get_sys_info() - print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the last point.\n") print(format_dict_for_markdown(system_info)) From 92fdbe9bbf1efbaf27cfe54651e00e14cc49c6c1 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 14:14:31 +0100 Subject: [PATCH 03/12] docs(dataset): add visualization section (#2664) --- docs/source/using_dataset_tools.mdx | 38 +++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/docs/source/using_dataset_tools.mdx b/docs/source/using_dataset_tools.mdx index 22c28828c..29e16ea0a 100644 --- a/docs/source/using_dataset_tools.mdx +++ b/docs/source/using_dataset_tools.mdx @@ -163,3 +163,41 @@ lerobot-edit-dataset \ ``` There is also a tool for adding features to a dataset that is not yet covered in `lerobot-edit-dataset`. + +# Dataset Visualization + +## Online Visualization + +When you record a dataset using `lerobot`, it automatically uploads to the Hugging Face Hub unless you specify otherwise. To view the dataset online, use our **LeRobot Dataset Visualizer**, available at: +https://huggingface.co/spaces/lerobot/visualize_dataset + +## Local Visualization + +You can also visualize episodes from a dataset locally using our command-line tool. + +**From the Hugging Face Hub:** + +```bash +lerobot-dataset-viz \ + --repo-id lerobot/pusht \ + --episode-index 0 +``` + +**From a local folder:** +Add the `--root` option and set `--mode local`. For example, to search in `./my_local_data_dir/lerobot/pusht`: + +```bash +lerobot-dataset-viz \ + --repo-id lerobot/pusht \ + --root ./my_local_data_dir \ + --mode local \ + --episode-index 0 +``` + +Once executed, the tool opens `rerun.io` and displays the camera streams, robot states, and actions for the selected episode. + +For advanced usageβ€”including visualizing datasets stored on a remote serverβ€”run: + +```bash +lerobot-dataset-viz --help +``` From 7621af5acdad69d173a10c0a6513f0a538b05e56 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 17:10:04 +0100 Subject: [PATCH 04/12] chore(ci): update PR template (#2665) * chore: update code of conduct to transformers one * chore: update PR template --- .github/PULL_REQUEST_TEMPLATE.md | 67 +++++++++++++++++++------------- CODE_OF_CONDUCT.md | 4 +- 2 files changed, 42 insertions(+), 29 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d37b1a92f..ec5ac4372 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,41 +1,54 @@ -## What this does +## Title -Explain what this PR does. Feel free to tag your PR with the appropriate label(s). +Short, imperative summary (e.g., "fix(robots): handle None in sensor parser"). See [CONTRIBUTING.md](../CONTRIBUTING.md) for PR conventions. -Examples: -| Title | Label | -|----------------------|-----------------| -| Fixes #[issue] | (πŸ› Bug) | -| Adds new dataset | (πŸ—ƒοΈ Dataset) | -| Optimizes something | (⚑️ Performance) | +## Type / Scope -## How it was tested +- **Type**: (Bug | Feature | Docs | Performance | Test | CI | Chore) +- **Scope**: (optional β€” name of module or package affected) -Explain/show how you tested your changes. +## Summary / Motivation -Examples: +- One-paragraph description of what changes and why. +- Why this change is needed and any trade-offs or design notes. -- Added `test_something` in `tests/test_stuff.py`. -- Added `new_feature` and checked that training converges with policy X on dataset/environment Y. -- Optimized `some_function`, it now runs X times faster than previously. +## Related issues -## How to checkout & try? (for the reviewer) +- Fixes / Closes: # (if any) +- Related: # (if any) -Provide a simple way for the reviewer to try out your changes. +## What changed -Examples: +- Short, concrete bullets of the modifications (files/behaviour). +- Short note if this introduces breaking changes and migration steps. -```bash -pytest -sx tests/test_stuff.py::test_something -``` +## How was this tested -```bash -lerobot-train --some.option=true -``` +- Tests added: list new tests or test files. +- Manual checks / dataset runs performed. -## SECTION TO REMOVE BEFORE SUBMITTING YOUR PR +## How to run locally (reviewer) -**Note**: Anyone in the community is free to review the PR once the tests have passed. Feel free to tag -members/contributors who may be interested in your PR. Try to avoid tagging more than 3 people. +- Run the relevant tests: -**Note**: Before submitting this PR, please read the [contributor guideline](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr). + ```bash + pytest -q tests/ -k + ``` + +- Run a quick example or CLI (if applicable): + + ```bash + lerobot-train --some.option=true + ``` + +## Checklist (required before merge) + +- [ ] Linting/formatting run (`pre-commit run -a`) +- [ ] All tests pass locally (`pytest`) +- [ ] Documentation updated +- [ ] CI is green + +## Reviewer notes + +- Anything the reviewer should focus on (performance, edge-cases, specific files) or general notes. +- Anyone in the community is free to review the PR. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index c0fdac843..305ffa276 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -52,7 +52,7 @@ decisions when appropriate. This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. -Examples of representing our community include using an official email address, +Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. @@ -60,7 +60,7 @@ representative at an online or offline event. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at -[feedback@huggingface.co](mailto:feedback@huggingface.co). +feedback@huggingface.co. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the From f0c98e23f17456b9b0bd68ae0fa651c5ad496968 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 17:52:45 +0100 Subject: [PATCH 05/12] feat(ci): simple automatic labelling (#2667) * ci: add pr labeler * ci: add issue labeler * ci: minor fixes for labelers * fix(ci): add explicit path for pr labeler --- .github/workflows/issue_labeler.yml | 105 ++++++++++++++++++++++++++++ .github/workflows/labeler.yml | 68 ++++++++++++++++++ .github/workflows/pr_labeler.yml | 39 +++++++++++ 3 files changed, 212 insertions(+) create mode 100644 .github/workflows/issue_labeler.yml create mode 100644 .github/workflows/labeler.yml create mode 100644 .github/workflows/pr_labeler.yml diff --git a/.github/workflows/issue_labeler.yml b/.github/workflows/issue_labeler.yml new file mode 100644 index 000000000..352819db1 --- /dev/null +++ b/.github/workflows/issue_labeler.yml @@ -0,0 +1,105 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow automatically labels issues based on their content. +name: Issue Labeler +on: + # Trigger on new issues and edits to existing issues + issues: + types: [opened, edited] + +permissions: + contents: read + issues: write + +jobs: + label-issue: + name: Auto Label Issue + runs-on: ubuntu-latest + if: github.repository == 'huggingface/lerobot' + steps: + - uses: actions/github-script@v8 + with: + script: | + // Setup Input Text (Unified Title + Body) + const body = (context.payload.issue.body || ''); + const title = (context.payload.issue.title || ''); + + // We keep a lowercased version for keyword matching + const text = `${title}\n${body}`.toLowerCase(); + + const labelsToAdd = new Set(); + + // Helper: Simple regex test + const matches = (re) => re.test(text); + + // Issue Type Detection (Dropdowns & Explicit Headers) + if (text.includes('bug report') || /\bissue type:.*bug/.test(text)) { + labelsToAdd.add('bug'); + } + if (text.includes('feature request') || /\bissue type:.*feature/.test(text)) { + labelsToAdd.add('enhancement'); + } + if (text.includes('technical question') || /\bissue type:.*question/.test(text)) { + labelsToAdd.add('question'); + } + if (text.includes('maintenance') || /\bissue type:.*maintenance/.test(text)) { + labelsToAdd.add('documentation'); + } + + // Keyword Heuristic + + // Domain Specific + if (matches(/example(s)?\b|script(s)?\b|sample(s)?\b|demo(s)?\b|notebook(s)?\b/i)) labelsToAdd.add('examples'); + if (matches(/dataset(s)?\b|data loader|data augmentation|data preprocessing/i)) labelsToAdd.add('dataset'); + if (matches(/mujoco|isaac|\bsimulation\b|\bsim /i)) labelsToAdd.add('simulation'); + if (matches(/train|loss|optimizer|backward|gradient|wandb|sac\b/i)) labelsToAdd.add('training'); + if (matches(/rerun|plot|video|render|visualiz|gif/i)) labelsToAdd.add('visualization'); + if (matches(/camera|realsense|lidar|depth|sensor|imu|microphone|rgbd/i)) labelsToAdd.add('sensors'); + if (matches(/aloha|koch|so-100|so100|mobile|teleop|manipulator|robot(s)?\b/i)) labelsToAdd.add('robots'); + if (matches(/teleop|teleoperator|controller|leader|follower|joystick|gamepad/i)) labelsToAdd.add('teleoperators'); + if (matches(/policy|policies|p0licy/i)) labelsToAdd.add('policies'); + if (matches(/processor(s)?\b|implement.*processor|processor pipeline/i)) labelsToAdd.add('processor'); + if (matches(/eval|evaluate|evaluation|metric(s)?\b|score|benchmark/i)) labelsToAdd.add('evaluation'); + + // Infrastructure & Code Quality + if (matches(/test|pytest|unittest|failing test/i)) labelsToAdd.add('tests'); + if (matches(/ci|github actions|workflow|gha|action(s)?\b|pipeline/i)) { + labelsToAdd.add('CI'); + labelsToAdd.add('github_actions'); + } + if (matches(/perf|latency|benchmark|throughput|fps|speed|performance|benchmarking/i)) labelsToAdd.add('performance'); + if (matches(/dependency|requirements|pip|conda|install error|importerror|package not found/i)) labelsToAdd.add('dependencies'); + if (matches(/python\b|pyproject|requirements(\.txt)?|pip install|typing error/i)) labelsToAdd.add('python'); + + // Documentation & Meta + if (matches(/doc|documentation|docs|readme|typo|how to/i)) labelsToAdd.add('documentation'); + if (matches(/refactor|cleanup|restructure|rename|modernize code/i)) labelsToAdd.add('refactor'); + if (matches(/release|changelog|version bump|cut a release|tag v/i)) labelsToAdd.add('release'); + + // Fixed: "BREAKING CHANGE" must be lowercase in regex because 'text' is lowercase + if (matches(/breaking change|breaking:|major change/i)) labelsToAdd.add('breaking change'); + + // Apply Labels + const labels = Array.from(labelsToAdd).filter(Boolean); + + if (labels.length > 0) { + console.log(`Adding labels: ${labels.join(', ')}`); + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + labels, + }); + } diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 000000000..1ff046882 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,68 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CI: + - changed-files: + - any-glob-to-any-file: + - '.github/**' + - 'docker/**' + +github_actions: + - changed-files: + - any-glob-to-any-file: '.github/**' + +documentation: + - changed-files: + - any-glob-to-any-file: + - '**/*.md' + - 'docs/**' + +examples: + - changed-files: + - any-glob-to-any-file: 'examples/**' + +tests: + - changed-files: + - any-glob-to-any-file: 'tests/**' + +sensors: + - changed-files: + - any-glob-to-any-file: 'src/lerobot/cameras/**' + +configuration: + - changed-files: + - any-glob-to-any-file: 'src/lerobot/configs/**' + +dataset: + - changed-files: + - any-glob-to-any-file: 'src/lerobot/datasets/**' + +evaluation: + - changed-files: + - any-glob-to-any-file: 'src/lerobot/envs/**' + +robots: + - changed-files: + - any-glob-to-any-file: + - 'src/lerobot/teleoperators/**' + - 'src/lerobot/robots/**' + - 'src/lerobot/motors/**' + +policies: + - changed-files: + - any-glob-to-any-file: 'src/lerobot/policies/**' + +processor: + - changed-files: + - any-glob-to-any-file: 'src/lerobot/processor/**' diff --git a/.github/workflows/pr_labeler.yml b/.github/workflows/pr_labeler.yml new file mode 100644 index 000000000..70338e221 --- /dev/null +++ b/.github/workflows/pr_labeler.yml @@ -0,0 +1,39 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow labels pull requests based on the files that were changed. +name: Pull Request Labeler + +on: + # Allows labeling pull requests when they are opened or updated + pull_request: + branches: + - main + types: [opened, synchronize, reopened, ready_for_review] + +permissions: + contents: read + pull-requests: write + +jobs: + triage: + name: Label PR + runs-on: ubuntu-latest + if: github.repository == 'huggingface/lerobot' + steps: + - uses: actions/labeler@v6 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + sync-labels: true # Removes labels if files are removed from the PR + configuration-path: '.github/workflows/labeler.yml' From 292333cafcbddf7c77aaed0bc5569ba37083ac7f Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 18:02:20 +0100 Subject: [PATCH 06/12] chore(ci): update issue template (#2666) --- .github/ISSUE_TEMPLATE/bug-report.yml | 101 +++++++++++++++++--------- 1 file changed, 68 insertions(+), 33 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 7423495de..b46ed0cca 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -12,57 +12,92 @@ # See the License for the specific language governing permissions and # limitations under the License. -name: "\U0001F41B Bug Report" -description: Submit a bug report to help us improve LeRobot +name: "πŸš€ Issue / Bug / Request" +description: Report a bug, suggest an improvement, or ask a technical question. body: - type: markdown attributes: value: | - Thanks for taking the time to submit a bug report! πŸ› - If this is not a bug related to the LeRobot library directly, but instead a general question about your code or the library specifically please use our [discord](https://discord.gg/s3KuuzsPFb). + ### Thanks for contributing to LeRobot! πŸ™Œ + Please choose the most relevant sections below. If this is a general "how-to" question, consider our [Discord](https://discord.gg/s3KuuzsPFb) for faster community support. + + - type: dropdown + id: issue-type + attributes: + label: Issue Type + description: What kind of ticket are you opening? + options: + - label: "πŸ› Bug Report (Something isn't working)" + - label: "πŸ’‘ Feature Request / Improvement" + - label: "❓ Technical Question" + - label: "🧹 Maintenance / Documentation" + validations: + required: true - type: textarea id: system-info attributes: - label: System Info - description: Please share your LeRobot configuration by running `lerobot-info` (if installed) or `python -m lerobot.scripts.display_sys_info` (if not installed) and pasting the output below. + label: Environment & System Info + description: | + For bugs or technical questions, please run `lerobot-info` and paste the output. + (Optional for feature requests). render: Shell - placeholder: lerobot version, OS, python version, numpy version, torch version, and lerobot's configuration - validations: - required: true - - - type: checkboxes - id: information-scripts-examples - attributes: - label: Information - description: 'The problem arises when using:' - options: - - label: "One of the scripts in the examples/ folder of LeRobot" - - label: "My own task or dataset (give details below)" + placeholder: lerobot version, OS, python version, etc. - type: textarea - id: reproduction + id: description validations: required: true attributes: - label: Reproduction + label: Description description: | - If needed, provide a simple code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet. - Sharing error messages or stack traces could be useful as well! - Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting - Try to avoid screenshots, as they are hard to read and don't allow copy-and-pasting. - + Provide a clear summary of the issue or your proposal. + - **Bugs:** What is happening? + - **Features:** What is the goal/use case? + - **Questions:** What are you trying to achieve? placeholder: | - Steps to reproduce the behavior: + A clear and concise description of the issue or suggestion. - 1. - 2. - 3. + - type: textarea + id: context-repro + validations: + required: true + attributes: + label: Context & Reproduction + description: | + Provide a code snippet, steps to reproduce a bug, or technical details about your proposal. + Please use code blocks for logs and scripts. + placeholder: | + Steps to reproduce / Usage example: + 1. + 2. + 3. - type: textarea id: expected-behavior - validations: - required: true attributes: - label: Expected behavior - description: "A clear and concise description of what you would expect to happen." + label: Expected Behavior / Desired Outcome + description: "Describe what you expected to happen or what the ideal solution looks like." + placeholder: "e.g. The script should finish without OOM, or I would like a new flag --fast-mode." + + - type: textarea + id: logs + attributes: + label: Relevant logs or stack trace + description: If applicable, paste relevant error logs here. + render: Shell + + - type: checkboxes + id: extras + attributes: + label: Checklist + options: + - label: I have searched existing issues to ensure this isn't a duplicate. + - label: I am using the latest version of the `main` branch. + - label: (For bugs) I have verified this is not an environment-specific issue. + + - type: textarea + id: workaround + attributes: + label: Additional Info / Workarounds + description: Anything else we should know? If you have a workaround, please share it! From 469b855e4274b1def783b8bd14c7bb12a2bb515b Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 22:31:22 +0100 Subject: [PATCH 07/12] fix(ci): better heuristic + issue type template fix (#2672) * fix(ci): better heuristic + issue type template fix * chore(ci): remove keywords in performance tag --- .github/ISSUE_TEMPLATE/bug-report.yml | 19 ++----- .github/workflows/issue_labeler.yml | 72 +++++++++++---------------- 2 files changed, 33 insertions(+), 58 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index b46ed0cca..1b8095c4a 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -27,10 +27,10 @@ body: label: Issue Type description: What kind of ticket are you opening? options: - - label: "πŸ› Bug Report (Something isn't working)" - - label: "πŸ’‘ Feature Request / Improvement" - - label: "❓ Technical Question" - - label: "🧹 Maintenance / Documentation" + - "πŸ› Bug Report (Something isn't working)" + - "πŸ’‘ Feature Request / Improvement" + - "❓ Technical Question" + - "🧹 Maintenance / Documentation" validations: required: true @@ -60,26 +60,17 @@ body: - type: textarea id: context-repro - validations: - required: true attributes: label: Context & Reproduction description: | Provide a code snippet, steps to reproduce a bug, or technical details about your proposal. - Please use code blocks for logs and scripts. + Please use code blocks for scripts and CLI commands. placeholder: | Steps to reproduce / Usage example: 1. 2. 3. - - type: textarea - id: expected-behavior - attributes: - label: Expected Behavior / Desired Outcome - description: "Describe what you expected to happen or what the ideal solution looks like." - placeholder: "e.g. The script should finish without OOM, or I would like a new flag --fast-mode." - - type: textarea id: logs attributes: diff --git a/.github/workflows/issue_labeler.yml b/.github/workflows/issue_labeler.yml index 352819db1..49a06d05d 100644 --- a/.github/workflows/issue_labeler.yml +++ b/.github/workflows/issue_labeler.yml @@ -32,64 +32,48 @@ jobs: - uses: actions/github-script@v8 with: script: | - // Setup Input Text (Unified Title + Body) + // Setup Input Text const body = (context.payload.issue.body || ''); const title = (context.payload.issue.title || ''); - - // We keep a lowercased version for keyword matching - const text = `${title}\n${body}`.toLowerCase(); - + const cleanBody = body.replace(/```[\s\S]*?```/g, ''); + const text = `${title}\n${cleanBody}`.toLowerCase(); const labelsToAdd = new Set(); - - // Helper: Simple regex test const matches = (re) => re.test(text); - // Issue Type Detection (Dropdowns & Explicit Headers) - if (text.includes('bug report') || /\bissue type:.*bug/.test(text)) { - labelsToAdd.add('bug'); - } - if (text.includes('feature request') || /\bissue type:.*feature/.test(text)) { - labelsToAdd.add('enhancement'); - } - if (text.includes('technical question') || /\bissue type:.*question/.test(text)) { - labelsToAdd.add('question'); - } - if (text.includes('maintenance') || /\bissue type:.*maintenance/.test(text)) { - labelsToAdd.add('documentation'); - } - - // Keyword Heuristic + // Keyword Heuristics // Domain Specific - if (matches(/example(s)?\b|script(s)?\b|sample(s)?\b|demo(s)?\b|notebook(s)?\b/i)) labelsToAdd.add('examples'); - if (matches(/dataset(s)?\b|data loader|data augmentation|data preprocessing/i)) labelsToAdd.add('dataset'); - if (matches(/mujoco|isaac|\bsimulation\b|\bsim /i)) labelsToAdd.add('simulation'); - if (matches(/train|loss|optimizer|backward|gradient|wandb|sac\b/i)) labelsToAdd.add('training'); - if (matches(/rerun|plot|video|render|visualiz|gif/i)) labelsToAdd.add('visualization'); - if (matches(/camera|realsense|lidar|depth|sensor|imu|microphone|rgbd/i)) labelsToAdd.add('sensors'); - if (matches(/aloha|koch|so-100|so100|mobile|teleop|manipulator|robot(s)?\b/i)) labelsToAdd.add('robots'); - if (matches(/teleop|teleoperator|controller|leader|follower|joystick|gamepad/i)) labelsToAdd.add('teleoperators'); - if (matches(/policy|policies|p0licy/i)) labelsToAdd.add('policies'); - if (matches(/processor(s)?\b|implement.*processor|processor pipeline/i)) labelsToAdd.add('processor'); - if (matches(/eval|evaluate|evaluation|metric(s)?\b|score|benchmark/i)) labelsToAdd.add('evaluation'); + if (matches(/\b(bug|error|issue|fault|crash|exception|\b/i)) labelsToAdd.add('bug'); + if (matches(/\b(feature|enhancement|improvement|support|implement|proposal)\b/i)) labelsToAdd.add('enhancement'); + if (matches(/\b(question|help|how to||clarify|explain|unclear)\b/i)) labelsToAdd.add('question'); + if (matches(/\b(maintenance|documentation|docs|readme|tutorial|guide|wiki)\b/i)) labelsToAdd.add('documentation'); + if (matches(/\b(example|script|sample|demo|notebook)s?\b/i)) labelsToAdd.add('examples'); + if (matches(/\b(datasets?|data loader|data augmentation|data preprocessing)\b/i)) labelsToAdd.add('dataset'); + if (matches(/\b(mujoco|isaac|simulation|sim)\b/i)) labelsToAdd.add('simulation'); + if (matches(/\b(train|training|loss|optimizer|backward|gradient|wandb|sac)\b/i)) labelsToAdd.add('training'); + if (matches(/\b(rerun|plot|video|render|visualiz|gif)/i)) labelsToAdd.add('visualization'); + if (matches(/\b(camera|realsense|lidar|depth|sensor|imu|microphone|rgbd)\b/i)) labelsToAdd.add('sensors'); + if (matches(/\b(aloha|koch|so-100|so100|mobile|teleop|manipulator|robots?)\b/i)) labelsToAdd.add('robots'); + if (matches(/\b(teleop|teleoperator|controller|leader|follower|joystick|gamepad)\b/i)) labelsToAdd.add('teleoperators'); + if (matches(/\b(policy|policies|p0licy)\b/i)) labelsToAdd.add('policies'); + if (matches(/\b(processors?|pipeline)\b/i)) labelsToAdd.add('processor'); + if (matches(/\b(eval|evaluate|evaluation|metrics?|score|benchmark)\b/i)) labelsToAdd.add('evaluation'); // Infrastructure & Code Quality - if (matches(/test|pytest|unittest|failing test/i)) labelsToAdd.add('tests'); - if (matches(/ci|github actions|workflow|gha|action(s)?\b|pipeline/i)) { + if (matches(/\b(tests?|pytest|unittest|failing test)\b/i)) labelsToAdd.add('tests'); + if (matches(/\b(ci|github actions|workflow|gha|actions?|pipeline)\b/i)) { labelsToAdd.add('CI'); labelsToAdd.add('github_actions'); } - if (matches(/perf|latency|benchmark|throughput|fps|speed|performance|benchmarking/i)) labelsToAdd.add('performance'); - if (matches(/dependency|requirements|pip|conda|install error|importerror|package not found/i)) labelsToAdd.add('dependencies'); - if (matches(/python\b|pyproject|requirements(\.txt)?|pip install|typing error/i)) labelsToAdd.add('python'); + if (matches(/\b(perf|latency|throughput|fps|speed|performance)\b/i)) labelsToAdd.add('performance'); + if (matches(/\b(dependency|requirements|pip|conda|install error|importerror|package not found)\b/i)) labelsToAdd.add('dependencies'); + if (matches(/\b(python|pyproject|requirements(\.txt)?|pip install|typing error)\b/i)) labelsToAdd.add('python'); // Documentation & Meta - if (matches(/doc|documentation|docs|readme|typo|how to/i)) labelsToAdd.add('documentation'); - if (matches(/refactor|cleanup|restructure|rename|modernize code/i)) labelsToAdd.add('refactor'); - if (matches(/release|changelog|version bump|cut a release|tag v/i)) labelsToAdd.add('release'); - - // Fixed: "BREAKING CHANGE" must be lowercase in regex because 'text' is lowercase - if (matches(/breaking change|breaking:|major change/i)) labelsToAdd.add('breaking change'); + if (matches(/\b(doc|documentation|docs|readme|typo|how to)\b/i)) labelsToAdd.add('documentation'); + if (matches(/\b(refactor|cleanup|restructure|rename|modernize code)\b/i)) labelsToAdd.add('refactor'); + if (matches(/\b(release|changelog|version bump|cut a release|tag v)\b/i)) labelsToAdd.add('release'); + if (matches(/\b(breaking change|major change)\b/i)) labelsToAdd.add('breaking change'); // Apply Labels const labels = Array.from(labelsToAdd).filter(Boolean); From 86eee5c1e2c7d6084700b6db3c7ec4717b1b7e36 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 22:40:33 +0100 Subject: [PATCH 08/12] fix(ci): close bracket pattern (#2674) --- .github/workflows/issue_labeler.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/issue_labeler.yml b/.github/workflows/issue_labeler.yml index 49a06d05d..27ca2b5f9 100644 --- a/.github/workflows/issue_labeler.yml +++ b/.github/workflows/issue_labeler.yml @@ -43,7 +43,7 @@ jobs: // Keyword Heuristics // Domain Specific - if (matches(/\b(bug|error|issue|fault|crash|exception|\b/i)) labelsToAdd.add('bug'); + if (matches(/\b(bug|error|issue|fault|crash|exception)\b/i)) labelsToAdd.add('bug'); if (matches(/\b(feature|enhancement|improvement|support|implement|proposal)\b/i)) labelsToAdd.add('enhancement'); if (matches(/\b(question|help|how to||clarify|explain|unclear)\b/i)) labelsToAdd.add('question'); if (matches(/\b(maintenance|documentation|docs|readme|tutorial|guide|wiki)\b/i)) labelsToAdd.add('documentation'); From 8667b9ef08d73a501067bb55bfde63e64f06da6d Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 17 Dec 2025 22:54:47 +0100 Subject: [PATCH 09/12] chore(ci): minor improvements auto labeling (#2675) --- .github/ISSUE_TEMPLATE/bug-report.yml | 4 ++-- .github/{workflows => }/labeler.yml | 1 + .github/workflows/pr_labeler.yml | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) rename .github/{workflows => }/labeler.yml (98%) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 1b8095c4a..74e88f287 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -83,9 +83,9 @@ body: attributes: label: Checklist options: - - label: I have searched existing issues to ensure this isn't a duplicate. + - label: I have searched existing tickets to ensure this isn't a duplicate. - label: I am using the latest version of the `main` branch. - - label: (For bugs) I have verified this is not an environment-specific issue. + - label: I have verified this is not an environment-specific problem. - type: textarea id: workaround diff --git a/.github/workflows/labeler.yml b/.github/labeler.yml similarity index 98% rename from .github/workflows/labeler.yml rename to .github/labeler.yml index 1ff046882..d3c5cc622 100644 --- a/.github/workflows/labeler.yml +++ b/.github/labeler.yml @@ -26,6 +26,7 @@ documentation: - changed-files: - any-glob-to-any-file: - '**/*.md' + - '**/*.mdx' - 'docs/**' examples: diff --git a/.github/workflows/pr_labeler.yml b/.github/workflows/pr_labeler.yml index 70338e221..8ec0c65f1 100644 --- a/.github/workflows/pr_labeler.yml +++ b/.github/workflows/pr_labeler.yml @@ -36,4 +36,3 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} sync-labels: true # Removes labels if files are removed from the PR - configuration-path: '.github/workflows/labeler.yml' From 4a151a9682b1923c978d31a8388af035c19582d9 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Thu, 18 Dec 2025 00:23:23 +0100 Subject: [PATCH 10/12] chore(ci): minor improvement bug-report template & pr auto label (#2676) * chore(ci): minor improvement bug-report template * chore(ci): change triggers for PR auto label --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- .github/workflows/pr_labeler.yml | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 74e88f287..9f602de30 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -24,7 +24,7 @@ body: - type: dropdown id: issue-type attributes: - label: Issue Type + label: Ticket Type description: What kind of ticket are you opening? options: - "πŸ› Bug Report (Something isn't working)" diff --git a/.github/workflows/pr_labeler.yml b/.github/workflows/pr_labeler.yml index 8ec0c65f1..177c20959 100644 --- a/.github/workflows/pr_labeler.yml +++ b/.github/workflows/pr_labeler.yml @@ -17,7 +17,8 @@ name: Pull Request Labeler on: # Allows labeling pull requests when they are opened or updated - pull_request: + # zizmor: ignore[dangerous-triggers] Needed to label PRs from forks + pull_request_target: branches: - main types: [opened, synchronize, reopened, ready_for_review] @@ -30,7 +31,7 @@ jobs: triage: name: Label PR runs-on: ubuntu-latest - if: github.repository == 'huggingface/lerobot' + if: github.repository == 'huggingface/lerobot' && !github.event.pull_request.draft steps: - uses: actions/labeler@v6 with: From f04958527e70cac3aa95265badd97b53f3ef7633 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Thu, 18 Dec 2025 12:50:32 +0100 Subject: [PATCH 11/12] Add sarm (#2639) * add initial modeling * make rewind pretrained policy * add annotation * small fix * add sarm * subtasks * fix spawn * fix rewind discrepancies * Add script to generate embedding for dataset (#2138) * Add generate and validate script * fix precommit * Improve generate embeddings function by using dataset tools (#2206) --------- Co-authored-by: Michel Aractingi * cleanup * change order train log * print batch size * update sarm processor * add reward output * change expected features * add image validation * change validation * get state input from dataset stats * raise if no state key is found * pass stats * cleanup and refactor * add episode inddex to complementary data * add subtask init and detection * revert lerobot_train changes * pass dataset metadata to policy * change loadig subtasks * add small logging * fix progress conversion and adding initial frame * use large offset for initial frame (ugly) * Remove rewind, use clip tokenizer * add tests, implement formula 1,2 correctly and cleanup * use task from dataset, cleanup visualizer * simplify * simplify and cleanup code and move compute_temporal_proportions to utils * fix normalization in visualization * Fix visualization and change prompt * fix formatting * add visualize subtask annotations * use qwen thinking * try different prompt * format * update prompt * higher temp, long output * different settings * use instruct * show full resp * split message * Temp: increase tolerance dataset * Fix RA-BC (#2572) * Add next observation loading for RA-BC progress deltas * Compute weights based on temporal progress deltas instead of static rewards * Add hard-masking for negative progress deltas in weight computation * Feat/add dual head (#2582) * Add dual dense sparse head and annotation * Add docs * add dual to procesor * cleanup * change sampling in visualize and cleanup * remove validation * remove compile * Feat/test uniform (#2587) * test uniform * add different string for misaligned * Fix rewind and add tests * uncomment text implementation * run precommit * Add head mode for ra-bc * fix visalization of single task * add * return per sample loss * Fix RA_BC (#2602) * update rabc implementation * compute rabc beforehand * fix import * add only progress calulation * use precomputed progress * multi gpu processing * import * fix dataset meta data extraction * add logging * logging * log * progress per episode * split differently * move clip to gpu * pre decode frames for an episode * fix cuda initalization * fix import * multi processing * rename * fix import * fix * fix rabc * use last known progress if oob * use last known progress if oob * add misalignment loss with random embeddings * discard previous changes * add selection of models to docs for ra_bc * add transformers dep * extend tolerance * initial commit with new codebase * add tests * fix * remove temporal sampler * drop last frame for sampler * use original ref * some fixes * fix visualization * remove smoothing and fix order subtasks * add stride rabc computation * add push to hub * add explanation * add kappa expllaination * better rabc logging * feedback pr * remove dataset tolerance * revert dataset tool * revert dataset changes * add credit * run precommit * change path for generate ra_bc * fix type * include sarm in all in pyproject * fix precommit * lazy import matplotlib * lazy import qwen * remove rich console * skip if transformers is not installed? * run only when we have faker * place transformer lazy loading * Dont test if low transformer version * fix * increase transformer * increase as 4.57.0 is yanked * remove pi from all * go back --------- Co-authored-by: Michel Aractingi Co-authored-by: s1lent4gnt --- docs/source/_toctree.yml | 4 + docs/source/sarm.mdx | 586 ++++++++ pyproject.toml | 4 +- src/lerobot/configs/train.py | 18 +- src/lerobot/data_processing/__init__.py | 13 + .../sarm_annotations/__init__.py | 13 + .../sarm_annotations/subtask_annotation.py | 1202 +++++++++++++++++ src/lerobot/policies/__init__.py | 1 + src/lerobot/policies/act/modeling_act.py | 1 + .../policies/diffusion/modeling_diffusion.py | 1 + src/lerobot/policies/factory.py | 20 + src/lerobot/policies/groot/modeling_groot.py | 2 +- src/lerobot/policies/pi0/modeling_pi0.py | 25 +- src/lerobot/policies/pi05/modeling_pi05.py | 25 +- .../policies/sarm/compute_rabc_weights.py | 870 ++++++++++++ .../policies/sarm/configuration_sarm.py | 248 ++++ src/lerobot/policies/sarm/modeling_sarm.py | 793 +++++++++++ src/lerobot/policies/sarm/processor_sarm.py | 518 +++++++ src/lerobot/policies/sarm/sarm_utils.py | 295 ++++ .../policies/smolvla/modeling_smolvla.py | 31 +- src/lerobot/policies/tdmpc/modeling_tdmpc.py | 1 + src/lerobot/policies/utils.py | 11 +- src/lerobot/policies/vqbet/modeling_vqbet.py | 1 + src/lerobot/policies/xvla/modeling_xvla.py | 2 +- src/lerobot/processor/converters.py | 3 +- src/lerobot/scripts/lerobot_train.py | 71 +- src/lerobot/utils/rabc.py | 276 ++++ tests/policies/test_sarm_processor.py | 694 ++++++++++ .../policies/test_sarm_subtask_annotations.py | 134 ++ tests/policies/test_sarm_utils.py | 615 +++++++++ 30 files changed, 6449 insertions(+), 29 deletions(-) create mode 100644 docs/source/sarm.mdx create mode 100644 src/lerobot/data_processing/__init__.py create mode 100644 src/lerobot/data_processing/sarm_annotations/__init__.py create mode 100644 src/lerobot/data_processing/sarm_annotations/subtask_annotation.py create mode 100644 src/lerobot/policies/sarm/compute_rabc_weights.py create mode 100644 src/lerobot/policies/sarm/configuration_sarm.py create mode 100644 src/lerobot/policies/sarm/modeling_sarm.py create mode 100644 src/lerobot/policies/sarm/processor_sarm.py create mode 100644 src/lerobot/policies/sarm/sarm_utils.py create mode 100644 src/lerobot/utils/rabc.py create mode 100644 tests/policies/test_sarm_processor.py create mode 100644 tests/policies/test_sarm_subtask_annotations.py create mode 100644 tests/policies/test_sarm_utils.py diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index aae7372fa..85a79ef17 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -42,6 +42,10 @@ - local: xvla title: X-VLA title: "Policies" +- sections: + - local: sarm + title: SARM + title: "Reward Models" - sections: - local: async title: Use Async Inference diff --git a/docs/source/sarm.mdx b/docs/source/sarm.mdx new file mode 100644 index 000000000..321097692 --- /dev/null +++ b/docs/source/sarm.mdx @@ -0,0 +1,586 @@ +# SARM: Stage-Aware Reward Modeling + +SARM (Stage-Aware Reward Modeling) is a video-based reward modeling framework for long-horizon robot manipulation tasks. This guide covers how to train SARM reward models and optionally use them with Reward-Aligned Behavior Cloning (RA-BC). + +**Paper**: [SARM: Stage-Aware Reward Modeling for Long Horizon Robot Manipulation](https://arxiv.org/abs/2509.25358) + +## Why Reward Models? + +Standard behavior cloning treats all demonstration frames equally, but real-world robot datasets are messy. They contain hesitations, corrections, and variable-quality trajectories. Reward models solve this by learning a generalizable notion of **task progress** from demonstrations: given video frames and a task description, they predict how close the robot is to completing the task (0β†’1). This learned "progress signal" can be used in multiple ways, two promising applications are: (1) **weighted imitation learning** (RA-BC), where high-progress frames receive more weight during policy training, and (2) **reinforcement learning**, where the reward model provides dense rewards for online or offline policy improvement. + +## Overview + +SARM has following features: + +1. **Stage-aware architecture**: Jointly predicts the high-level task stage and fine-grained progress within each stage +2. **Subtask annotations**: Uses natural language subtask annotations to derive consistent progress labels +3. **Temporal proportions**: Computes dataset-level priors (Ξ±Μ…\_k) for each subtask to normalize progress across variable-length demonstrations + +SARM trains on a compact **stage+tau** target for each frame: + +- **stage**: integer stage index `k ∈ {0, ..., K-1}` +- **Ο„ (tau)**: within-stage progress `Ο„ ∈ [0, 1]` +- **target encoding**: `y = k + Ο„` (this is what the dataset processor produces) + +At inference time (and in downstream RA-BC), SARM converts the raw `k + Ο„` value into a **normalized progress** in `[0, 1]` using dataset-level **temporal proportions** `Ξ±Μ…_k` (stored in `meta/temporal_proportions_*.json`). + +This matches **Formula (2)** from the paper: + +``` +progress_t = P_{k-1} + Ξ±Μ…_k Γ— Ο„_t +``` + +Where: + +- `Ο„_t = (t - s_k) / (e_k - s_k)` is within-subtask normalized time +- `P_{k-1}` is cumulative prior (sum of previous subtask proportions) +- `Ξ±Μ…_k` is the temporal proportion for subtask k + +This ensures identical task states map to consistent progress values, even across demonstrations of different lengths. + +## Inputs and Targets (What the new code expects) + +SARM is trained through its processor (`src/lerobot/policies/sarm/processor_sarm.py`), which: + +- **Encodes** images and task text with CLIP (ViT-B/32) into `video_features` and `text_features` +- **Pads/truncates** robot state into `state_features` (up to `max_state_dim`) +- **Builds targets** as `sparse_targets` (and `dense_targets` in `dense_only`/`dual`) using the stage+tau encoding `y = k + Ο„` +- **Masks rewind frames** using a per-sample `lengths` tensor (rewind is a training-time augmentation) + +At minimum, each training sample needs: + +- `task` (string): task description +- `policy.image_key` images and `policy.state_key` states from the dataset + +--- + +## Annotation Modes + +You can choose from **3 annotation modes** that determine how progress labels are computed: + +| Mode | Annotations Required | Heads | Use Case | +| -------------- | -------------------- | ---------------------------- | ------------------------------------------------------------ | +| `single_stage` | None | Sparse only | Simple tasks, quick experiments, no VLM needed | +| `dense_only` | Dense (VLM) | Dual (sparse auto-generated) | Detailed subtask tracking without defining high-level stages | +| `dual` | Sparse + Dense (VLM) | Dual | Full SARM paper setup with both granularities | + +### Mode Details + + + + +**No annotations required.** The entire episode is treated as a single stage called `"task"`, and progress is linear from 0 to 1 over the episode duration. + +- **Sparse head**: 1 stage ("task"), linear progress +- **Dense head**: Not used +- **Best for**: Simple tasks, quick experiments, or when VLM annotation is not available + +## Set Up Your Environment + +1. Install LeRobot by following our [Installation Guide](./installation). +2. Install SARM dependencies by running: + +```bash +pip install -e ".[sarm]" +``` + +Workflow: + +``` +1. Train SARM β†’ 2. Visualize predictions β†’ 3. (Optional) Train policy with RA-BC +``` + + + + +**Only dense (fine-grained) annotations from a VLM.** The sparse head automatically uses a single `"task"` stage covering the full episode, while the dense head learns detailed subtask progression. + +- **Sparse head**: 1 stage ("task"), linear progress (auto-generated) +- **Dense head**: Multiple fine-grained stages from VLM annotations +- **Best for**: When you want detailed subtask tracking but don't need to define high-level stages + +Workflow: + +``` +1. Annotate (dense) β†’ 2. Verify β†’ 3. Train SARM β†’ 4. Visualize β†’ 5. (Optional) Train policy with RA-BC +``` + + + + +**Both sparse and dense annotations from VLM.** Full dual-head mode as described in the SARM paper, with both high-level (sparse) and fine-grained (dense) stage predictions. + +- **Sparse head**: High-level stages from VLM annotations +- **Dense head**: Fine-grained stages from VLM annotations +- **Best for**: Complex multi-stage tasks where both granularities are useful + +Workflow: + +``` +1. Annotate (sparse+dense) β†’ 2. Verify β†’ 3. Train SARM β†’ 4. Visualize β†’ 5. (Optional) Train policy with RA-BC +``` + + + + +--- + +## Step 1: Subtask Annotation + + + + +**No annotation required!** Skip this step entirely. The model will use the episode's task description and compute linear progress automatically. + + + + +Generate **dense (fine-grained) annotations only** using a VLM. The sparse stage will be auto-generated. + +```bash +python src/lerobot/data_processing/sarm_annotations/subtask_annotation.py \ + --repo-id your-username/your-dataset \ + --dense-only \ + --dense-subtasks "Bring robot arms up from starting position,Grab near side and do 1st fold,Grab side and do 2nd fold,Grab side and do 3rd fold to finish folding" \ + --video-key observation.images.base \ + --num-workers 4 \ + --push-to-hub +``` + +**What gets saved:** + +- `meta/temporal_proportions_sparse.json` - Auto-generated sparse proportions (`{"task": 1.0}`) +- `meta/temporal_proportions_dense.json` - Dense temporal proportions +- Per-episode columns in `episodes/*.parquet`: + - `dense_subtask_names`, `dense_subtask_start_frames`, `dense_subtask_end_frames` + - (also time-based columns: `dense_subtask_start_times`, `dense_subtask_end_times`) + + + + +Generate **both sparse (high-level) and dense (fine-grained) annotations** using a VLM. + +```bash +python src/lerobot/data_processing/sarm_annotations/subtask_annotation.py \ + --repo-id your-username/your-dataset \ + --sparse-subtasks "Bring arms up from starting position,Fold the towel (3 folds in total)" \ + --dense-subtasks "Bring robot arms up from starting position,Grab near side and do 1st fold,Grab side and do 2nd fold,Grab side and do 3rd fold to finish folding" \ + --video-key observation.images.base \ + --num-workers 4 \ + --push-to-hub +``` + +**What gets saved:** + +- `meta/temporal_proportions_sparse.json` - Sparse temporal proportions +- `meta/temporal_proportions_dense.json` - Dense temporal proportions +- Per-episode columns in `episodes/*.parquet`: + - `sparse_subtask_names`, `sparse_subtask_start_frames`, `sparse_subtask_end_frames` + - `dense_subtask_names`, `dense_subtask_start_frames`, `dense_subtask_end_frames` + - (also time-based columns: `*_subtask_start_times`, `*_subtask_end_times`) + + + + +### Annotation Arguments + +| Argument | Description | +| ---------------------- | ------------------------------------------------------------------------------- | +| `--repo-id` | HuggingFace dataset repository ID | +| `--sparse-subtasks` | Comma-separated list of high-level subtask names | +| `--dense-subtasks` | Comma-separated list of fine-grained subtask names | +| `--dense-only` | Generate only dense annotations (auto-creates sparse "task" stage) | +| `--video-key` | Camera/video key to use (e.g., `observation.images.top`) | +| `--num-workers` | Number of parallel GPU workers (default: 1) | +| `--episodes` | Specific episode indices to annotate (default: all) | +| `--skip-existing` | Skip episodes that already have annotations | +| `--model` | VLM model (default: `Qwen/Qwen3-VL-30B-A3B-Instruct`) | +| `--num-visualizations` | Number of episodes to visualize after annotation (default: 5, set to 0 to skip) | + +> **Note**: After annotation completes, 5 episodes are automatically visualized by default. Use `--num-visualizations 0` to skip this step. + +--- + +## Step 2: Verify Annotations + + + + +**No verification needed!** Skip this step. + + + + +Visualize annotations using the `--visualize-only` flag: + +```bash +python src/lerobot/data_processing/sarm_annotations/subtask_annotation.py \ + --repo-id your-username/your-dataset \ + --visualize-only \ + --visualize-type dense \ + --num-visualizations 5 \ + --video-key observation.images.base \ + --output-dir ./subtask_viz +``` + + + + +Visualize annotations using the `--visualize-only` flag: + +```bash +python src/lerobot/data_processing/sarm_annotations/subtask_annotation.py \ + --repo-id your-username/your-dataset \ + --visualize-only \ + --visualize-type both \ + --num-visualizations 5 \ + --video-key observation.images.base \ + --output-dir ./subtask_viz +``` + + + + +This generates visualizations showing video frames with subtask boundaries overlaid and timeline of subtasks. + +### Visualization Arguments + +| Argument | Description | +| ---------------------- | -------------------------------------------------------------- | +| `--visualize-only` | Only visualize existing annotations (no generation) | +| `--num-visualizations` | Number of episodes to visualize (default: 5) | +| `--visualize-type` | Type of annotations to visualize: `sparse`, `dense`, or `both` | + +**Tip**: If annotations are inaccurate, adjust your subtask descriptions to be more specific and re-run. + +--- + +## Step 3: Train SARM + + + + +Train with **no annotations** - uses linear progress from 0 to 1: + +```bash +python src/lerobot/scripts/lerobot_train.py \ + --dataset.repo_id=your-username/your-dataset \ + --policy.type=sarm \ + --policy.annotation_mode=single_stage \ + --policy.image_key=observation.images.base \ + --output_dir=outputs/train/sarm_single \ + --batch_size=32 \ + --steps=5000 \ + --wandb.enable=true \ + --wandb.project=sarm \ + --policy.repo_id=your-username/your-model-name +``` + + + + +Train with **dense annotations only** (sparse auto-generated): + +```bash +python src/lerobot/scripts/lerobot_train.py \ + --dataset.repo_id=your-username/your-dataset \ + --policy.type=sarm \ + --policy.annotation_mode=dense_only \ + --policy.image_key=observation.images.base \ + --output_dir=outputs/train/sarm_dense \ + --batch_size=32 \ + --steps=5000 \ + --wandb.enable=true \ + --wandb.project=sarm \ + --policy.repo_id=your-username/your-model-name +``` + + + + +Train with **both sparse and dense annotations**: + +```bash +python src/lerobot/scripts/lerobot_train.py \ + --dataset.repo_id=your-username/your-dataset \ + --policy.type=sarm \ + --policy.annotation_mode=dual \ + --policy.image_key=observation.images.base \ + --output_dir=outputs/train/sarm_dual \ + --batch_size=32 \ + --steps=5000 \ + --wandb.enable=true \ + --wandb.project=sarm \ + --policy.repo_id=your-username/your-model-name +``` + + + + +### Multi-GPU Training + +Add `accelerate launch --multi_gpu --num_processes=4` to use multiple GPUs for training. + +### Training Arguments + +| Argument | Description | Default | +| -------------------------- | ----------------------------------------------------------------- | ------------------------ | +| `--policy.annotation_mode` | `single_stage`, `dense_only`, or `dual` | `single_stage` | +| `--policy.image_key` | Camera key for images | `observation.images.top` | +| `--policy.state_key` | Key for joint states | `observation.state` | +| `--policy.n_obs_steps` | Observation history steps (total obs frames = `n_obs_steps + 1`) | `8` | +| `--policy.frame_gap` | Gap (in frames) between sampled observations (at 30 fps: 30 β‰ˆ 1s) | `30` | + +--- + +## Step 4: Visualize Predictions + +Use `compute_rabc_weights.py` with `--visualize-only` to visualize model predictions (and, if available, annotation-derived targets) without writing a parquet file. + + + + +```bash +python src/lerobot/policies/sarm/compute_rabc_weights.py \ + --dataset-repo-id your-username/your-dataset \ + --reward-model-path your-username/sarm-model \ + --visualize-only \ + --num-visualizations 5 \ + --head-mode sparse \ + --output-dir ./sarm_viz +``` + + + + +```bash +python src/lerobot/policies/sarm/compute_rabc_weights.py \ + --dataset-repo-id your-username/your-dataset \ + --reward-model-path your-username/sarm-model \ + --visualize-only \ + --num-visualizations 5 \ + --head-mode dense \ + --output-dir ./sarm_viz +``` + + + + +```bash +python src/lerobot/policies/sarm/compute_rabc_weights.py \ + --dataset-repo-id your-username/your-dataset \ + --reward-model-path your-username/sarm-model \ + --visualize-only \ + --num-visualizations 5 \ + --head-mode both \ + --output-dir ./sarm_viz +``` + + + + +The visualization shows: + +- **Progress plot**: Predicted progress (and optional annotation-derived β€œGT” when available and `--stride 1`) +- **Stage probabilities**: Stacked area plot of predicted stage probabilities +- **Sample frames**: Key frames from the episode with progress/stage labels + +### Visualization Arguments + +| Argument | Description | +| ---------------------- | --------------------------------------------------------- | +| `--visualize-only` | Only visualize predictions (no RABC computation) | +| `--num-visualizations` | Number of episodes to visualize (default: 5) | +| `--head-mode` | SARM head to use: `sparse`, `dense`, or `both` | +| `--stride` | Compute every N frames, interpolate the rest (default: 1) | + +--- + +## Step 5 (Optional): Train Policy with RA-BC + +Reward-Aligned Behavior Cloning (RA-BC) uses the trained SARM model to weight training samples based on predicted progress improvement. This requires two steps: + +1. **Precompute progress values** for all frames using the trained SARM model +2. **Train policy** with RA-BC weighting using the precomputed values + +### How RA-BC Works + +For each training sample, RA-BC computes the progress delta: + +``` +r_i = Ο†(o_{t+Ξ”}) - Ο†(o_t) +``` + +Where `Ο†` is the SARM progress prediction and `Ξ”` is the policy's `chunk_size`. Samples with positive progress (good demonstrations) get higher weights, while samples with negative or zero progress get down-weighted. + +The weighting follows **Equations 8-9** from the paper: + +- **Soft weight**: `wΜƒ_i = clip((r_i βˆ’ (ΞΌ βˆ’ 2Οƒ)) / (4Οƒ + Ξ΅), 0, 1)` +- **Final weight**: `w_i = πŸ™{r_i > ΞΊ} + πŸ™{0 ≀ r_i ≀ ΞΊ} Γ— wΜƒ_i` + +### Step 5a: Compute SARM Progress Values + +First, run the SARM model on all frames in your dataset to compute progress values: + +```bash +python src/lerobot/policies/sarm/compute_rabc_weights.py \ + --dataset-repo-id your-username/your-dataset \ + --reward-model-path your-username/sarm-model \ + --head-mode sparse \ + --num-visualizations 5 \ + --push-to-hub +``` + +This script: + +- Processes all frames and computes progress values +- Saves progress values to a parquet file next to the dataset on disk (defaults to `/sarm_progress.parquet`) +- Generates visualizations of the first N episodes (default: 5) + +**Arguments:** + +| Argument | Description | Default | +| ---------------------- | -------------------------------------------------------------- | ---------- | +| `--reward-model-path` | Path to trained SARM model | (required) | +| `--head-mode` | SARM head to use: `sparse`, `dense`, or `both` | `sparse` | +| `--device` | Device for inference | `cuda` | +| `--visualize-only` | Only visualize predictions (no RA-BC computation) | `false` | +| `--num-visualizations` | Number of episodes to visualize (default: 5, set to 0 to skip) | `5` | + +**Output format** (`sarm_progress.parquet`): + +| Column | Description | +| ----------------- | ---------------------------------------------- | +| `index` | Global frame index in dataset | +| `episode_index` | Episode number | +| `frame_index` | Local frame index within episode | +| `progress_sparse` | Sparse head progress value [0, 1] | +| `progress_dense` | Dense head progress value [0, 1] (if computed) | + +### Step 5b: Train Policy with RA-BC + +Once you have the progress file, train your policy with RA-BC weighting. The progress file is auto-detected from the dataset path (`sarm_progress.parquet`). Currently PI0, PI0.5 and SmolVLA are supported with RA-BC: + +```bash +python src/lerobot/scripts/lerobot_train.py \ + --dataset.repo_id=your-username/your-dataset \ + --policy.type=pi0 \ + --use_rabc=true \ + --rabc_head_mode=sparse \ + --rabc_kappa=0.01 \ + --output_dir=outputs/train/policy_rabc \ + --batch_size=32 \ + --steps=40000 +``` + +The training script automatically: + +- Loads the precomputed progress values from the parquet file +- Uses the policy's `chunk_size` to compute progress deltas (Ξ”) +- Computes sample weights based on progress improvement +- Applies weighted loss during training + +**RA-BC Arguments:** + +| Argument | Description | Default | +| ---------------------- | ---------------------------------------------------------- | ---------------------------------- | +| `--use_rabc` | Enable RA-BC sample weighting | `false` | +| `--rabc_progress_path` | Path to progress parquet file (auto-detected from dataset) | `sarm_progress.parquet` in dataset | +| `--rabc_head_mode` | Which SARM head's progress to use: `sparse` or `dense` | `sparse` | +| `--rabc_kappa` | Threshold ΞΊ for high-quality samples | `0.01` | + +### Tuning RA-BC Kappa + +The `kappa` parameter is the threshold that determines which samples get full weight (w=1). Understanding how to tune it is critical for RA-BC to work effectively. + +**How the weighting works:** + +| Condition | Weight | +| ------------------- | ----------------------- | +| `delta > kappa` | 1.0 (hard threshold) | +| `0 ≀ delta ≀ kappa` | Soft weight from Eq. 8 | +| `delta < 0` | 0.0 (negative progress) | + +**Diagnosing kappa issues:** + +Monitor these WandB metrics during training: + +| Metric | Healthy Range | Problem Indicator | +| ------------------ | ------------- | ------------------------- | +| `rabc_mean_weight` | 0.3 - 0.8 | β‰ˆ 1.0 means kappa too low | +| `rabc_delta_mean` | > 0 | Should be positive | +| `rabc_delta_std` | > 0 | Variance in data quality | + +**If `rabc_mean_weight β‰ˆ 1.0`:** Your kappa is too low. Most samples have `delta > kappa` and bypass the soft-weighting entirely. RA-BC becomes equivalent to vanilla BC. + +**Setting kappa based on your data:** + +The default `kappa=0.01` was tuned for the paper's T-shirt folding task (~90s episodes at 30fps). For your dataset, check the logged `rabc_delta_mean` and `rabc_delta_std`: + +``` +# If delta_mean β‰ˆ 0.03 and delta_std β‰ˆ 0.02: +# Most deltas fall in range [0.01, 0.05] + +# Option 1: Set kappa = delta_mean (medium selectivity) +--rabc_kappa=0.03 + +# Option 2: Set kappa = delta_mean + delta_std (high selectivity) +--rabc_kappa=0.05 + +# Option 3: Set kappa = delta_mean + 2*delta_std (very selective) +--rabc_kappa=0.07 +``` + +**When RA-BC may not help:** + +If your dataset is already high quality (consistent progress across all demonstrations), RA-BC won't provide much benefit since there's nothing to filter. + +### Multi-GPU Training with RA-BC + +```bash +accelerate launch \ + --multi_gpu \ + --num_processes=4 \ + src/lerobot/scripts/lerobot_train.py \ + --dataset.repo_id=your-username/your-dataset \ + --policy.type=pi0 \ + --use_rabc=true \ + --rabc_kappa=0.01 \ + --output_dir=outputs/train/policy_rabc \ + --batch_size=32 \ + --steps=40000 +``` + +--- + +## Tips & Best Practices + +### Choosing a Mode + +- **Start with `single_stage`** for quick experiments - no annotation overhead +- Use **`dense_only`** when you want detailed progress tracking but tasks don't have clear high-level stages +- Use **`dual`** for complex tasks where both coarse and fine-grained progress is meaningful + +### Annotation Quality + +1. **Be specific with subtask names**: Instead of "fold", use "grab near side and fold toward center" +2. **Verify with visualization**: Always check a few episodes before training +3. **Consistent naming**: Use the same subtask names across all episodes + +### RA-BC + +1. **Train SARM first**: RA-BC quality depends entirely on SARM quality +2. **Monitor `rabc_mean_weight`**: If it's β‰ˆ 1.0, increase kappa (see [Tuning RA-BC Kappa](#tuning-ra-bc-kappa)) + +--- + +## Citation + +```bibtex +@article{chen2025sarm, + title={SARM: Stage-Aware Reward Modeling for Long Horizon Robot Manipulation}, + author={Chen, Qianzhong and Yu, Justin and Schwager, Mac and Abbeel, Pieter and Shentu, Yide and Wu, Philipp}, + journal={arXiv preprint arXiv:2509.25358}, + year={2025} +} +``` diff --git a/pyproject.toml b/pyproject.toml index 9458c0127..ed3e2ae43 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ dependencies = [ # Common pygame-dep = ["pygame>=2.5.1,<2.7.0"] placo-dep = ["placo>=0.9.6,<0.10.0"] -transformers-dep = ["transformers>=4.53.0,<5.0.0"] +transformers-dep = ["transformers>=4.57.1,<5.0.0"] grpcio-dep = ["grpcio==1.73.1", "protobuf==6.31.0"] # TODO: Bumb dependency (compatible with wandb) # Motors @@ -133,6 +133,7 @@ groot = [ "ninja>=1.11.1,<2.0.0", "flash-attn>=2.5.9,<3.0.0 ; sys_platform != 'darwin'" ] +sarm = ["lerobot[transformers-dep]", "faker>=33.0.0,<35.0.0", "matplotlib>=3.10.3,<4.0.0", "qwen-vl-utils>=0.0.14"] xvla = ["lerobot[transformers-dep]"] hilserl = ["lerobot[transformers-dep]", "gym-hil>=0.1.13,<0.2.0", "lerobot[grpcio-dep]", "lerobot[placo-dep]"] @@ -173,6 +174,7 @@ all = [ "lerobot[phone]", "lerobot[libero]", "lerobot[metaworld]", + "lerobot[sarm]" ] [project.scripts] diff --git a/src/lerobot/configs/train.py b/src/lerobot/configs/train.py index 13a8d6525..cee9dfdf9 100644 --- a/src/lerobot/configs/train.py +++ b/src/lerobot/configs/train.py @@ -65,9 +65,17 @@ class TrainPipelineConfig(HubMixin): scheduler: LRSchedulerConfig | None = None eval: EvalConfig = field(default_factory=EvalConfig) wandb: WandBConfig = field(default_factory=WandBConfig) - checkpoint_path: Path | None = field(init=False, default=None) + + # RA-BC (Reward-Aligned Behavior Cloning) parameters + use_rabc: bool = False # Enable reward-weighted training + rabc_progress_path: str | None = None # Path to precomputed SARM progress parquet file + rabc_kappa: float = 0.01 # Hard threshold for high-quality samples + rabc_epsilon: float = 1e-6 # Small constant for numerical stability + rabc_head_mode: str | None = "sparse" # For dual-head models: "sparse" or "dense" + # Rename map for the observation to override the image and state keys rename_map: dict[str, str] = field(default_factory=dict) + checkpoint_path: Path | None = field(init=False, default=None) def validate(self) -> None: # HACK: We parse again the cli args here to get the pretrained paths if there was some. @@ -131,6 +139,14 @@ class TrainPipelineConfig(HubMixin): "'policy.repo_id' argument missing. Please specify it to push the model to the hub." ) + if self.use_rabc and not self.rabc_progress_path: + # Auto-detect from dataset path + repo_id = self.dataset.repo_id + if self.dataset.root: + self.rabc_progress_path = str(Path(self.dataset.root) / "sarm_progress.parquet") + else: + self.rabc_progress_path = f"hf://datasets/{repo_id}/sarm_progress.parquet" + @classmethod def __get_path_fields__(cls) -> list[str]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" diff --git a/src/lerobot/data_processing/__init__.py b/src/lerobot/data_processing/__init__.py new file mode 100644 index 000000000..2f76d5676 --- /dev/null +++ b/src/lerobot/data_processing/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/lerobot/data_processing/sarm_annotations/__init__.py b/src/lerobot/data_processing/sarm_annotations/__init__.py new file mode 100644 index 000000000..2f76d5676 --- /dev/null +++ b/src/lerobot/data_processing/sarm_annotations/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/lerobot/data_processing/sarm_annotations/subtask_annotation.py b/src/lerobot/data_processing/sarm_annotations/subtask_annotation.py new file mode 100644 index 000000000..67e37bab8 --- /dev/null +++ b/src/lerobot/data_processing/sarm_annotations/subtask_annotation.py @@ -0,0 +1,1202 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +SARM Subtask Annotation using local GPU (Qwen3-VL). + +This script implements the annotation approach from the SARM paper using local GPU inference: +"SARM: Stage-Aware Reward Modeling for Long Horizon Robot Manipulation" +Paper: https://arxiv.org/pdf/2509.25358 + +What it does: +1. Takes videos from a LeRobot dataset +2. Uses Qwen3-VL running locally on GPU to identify when subtasks occur +3. Saves subtask timestamps to the dataset metadata +4. Optionally pushes the annotated dataset to HuggingFace Hub + +SARM trains reward models that predict: + - Stage: Which subtask is currently being executed (discrete classification) + - Progress: How far along the subtask we are (continuous 0-1) + +Supports three annotation modes: + 1. No annotations (no args): Auto-creates single sparse "task" stage covering full episode. + Use with SARM config annotation_mode="single_stage" for simple tasks. + + 2. Dense-only (--dense-only --dense-subtasks): Dense annotations from VLM, auto-generated + single sparse "task" stage. Use with annotation_mode="dense_only". + + 3. Dual mode (--sparse-subtasks + --dense-subtasks): Both sparse and dense annotations + from VLM. Use with annotation_mode="dual". + +Requirements: + - GPU with sufficient VRAM (16GB+ recommended for 30B model) + - `pip install transformers, torch, qwen-vl-utils` + +Run with: +```bash +python examples/dataset_annotation/subtask_annotation.py \ + --repo-id your-username/your-dataset \ + --sparse-subtasks "Do ..." \ + --dense-subtasks "Do task 1, Do task 2, Do task 3" \ + --video-key observation.images.base \ + --push-to-hub +``` +""" + +import argparse +import json +import multiprocessing as mp +import random +import re +import subprocess +import tempfile +import textwrap +import time +from concurrent.futures import ProcessPoolExecutor, as_completed +from pathlib import Path +from typing import Any + +import cv2 +import numpy as np +import pandas as pd +import torch +from pydantic import BaseModel, Field +from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration + +from lerobot.datasets.lerobot_dataset import LeRobotDataset + + +# Pydantic Models for SARM Subtask Annotation +class Timestamp(BaseModel): + """Timestamp in MM:SS or SS format""" + + start: str = Field(description="Start timestamp (MM:SS or just seconds)") + end: str = Field(description="End timestamp (MM:SS or just seconds)") + + +class Subtask(BaseModel): + """Individual subtask/stage - must use EXACT names from provided list""" + + name: str = Field(description="Subtask name - MUST match one from the predefined list exactly") + timestamps: Timestamp + + +class SubtaskAnnotation(BaseModel): + """Complete annotation for a robot manipulation episode""" + + subtasks: list[Subtask] = Field(description="List of all subtasks in temporal order") + + +def compute_temporal_proportions( + annotations: dict[int, Any], fps: int = 30, subtask_order: list[str] | None = None +) -> dict[str, float]: + """ + Compute dataset-level temporal proportions (priors) for each subtask. + + Implements SARM Paper Formula (1): αΎ±_k = (1/M) Γ— Ξ£_i (L_{i,k} / T_i) + + Args: + annotations: Dict mapping episode index to SubtaskAnnotation object. + fps: Frames per second (unused, kept for API compatibility) + subtask_order: Optional list defining the output order of subtasks. + + Returns: + Dict mapping subtask name to its temporal proportion (αΎ±_k), ordered by subtask_order if provided. + """ + subtask_proportions: dict[str, list[float]] = {} + + for annotation in annotations.values(): + total_duration = 0 + durations: dict[str, int] = {} + + for subtask in annotation.subtasks: + start_parts = subtask.timestamps.start.split(":") + end_parts = subtask.timestamps.end.split(":") + + start_seconds = ( + int(start_parts[0]) * 60 + int(start_parts[1]) + if len(start_parts) == 2 + else int(start_parts[0]) + ) + end_seconds = ( + int(end_parts[0]) * 60 + int(end_parts[1]) if len(end_parts) == 2 else int(end_parts[0]) + ) + + duration = end_seconds - start_seconds + durations[subtask.name] = duration + total_duration += duration + + if total_duration > 0: + for name, duration in durations.items(): + if name not in subtask_proportions: + subtask_proportions[name] = [] + subtask_proportions[name].append(duration / total_duration) + + if not subtask_proportions: + return {} + + avg_proportions = {name: sum(props) / len(props) for name, props in subtask_proportions.items()} + + total = sum(avg_proportions.values()) + if total > 0: + avg_proportions = {name: prop / total for name, prop in avg_proportions.items()} + + # Reorder according to subtask_order if provided + if subtask_order: + avg_proportions = { + name: avg_proportions.get(name, 0.0) for name in subtask_order if name in avg_proportions + } + + return avg_proportions + + +def create_sarm_prompt(subtask_list: list[str]) -> str: + subtask_str = "\n".join([f" - {name}" for name in subtask_list]) + + return textwrap.dedent(f"""\ + # Role + You are a Robotics Vision System specializing in temporal action localization for robot manipulation. Your job is to segment a single demonstration video into distinct, non-overlapping atomic actions from a fixed subtask list. + + # Subtask Label Set (Closed Vocabulary) + You must strictly identify the video segments using ONLY the following labels. Do not create new labels or modify existing ones: + + [ + {subtask_str} + ] + + The video shows one successful execution of all subtasks in a logical order. + + # Ground-Truth Semantics (Very Important) + Use **visual state changes** to define when a subtask starts and ends. Do NOT assume equal durations for the subtasks. + + - A subtask **starts** at the first frame where the robot's motion clearly initiates that subtask. + - A subtask **ends** at the first frame where that specific action is visually completed and the manipulated object reaches a temporary, stable configuration. + + If there are short pauses or micro-motions that don't clearly correspond to a new subtask, they belong to the **current** subtask. + + # Hard Constraints & Logic + 1. **Continuous Coverage (No Gaps):** + - The entire video duration from "00:00" to the final timestamp must be covered by subtasks. + - There can be no gaps between subtasks. + - If there is any idle or ambiguous time between clear actions, extend the *preceding* subtask to cover it. + + 2. **Boundary Consistency:** + - The `"end"` timestamp of one subtask must be exactly equal to the `"start"` timestamp of the next subtask. + - Boundaries must coincide with a real visual state transition, not just a convenient time split. + + 3. **Chronological Order, One Occurrence Each:** + - This is a single successful demonstration. + - Each subtask from the vocabulary appears **exactly once**, in the correct logical order. + - **Durations may be very different** between subtasks. Never assume they are similar lengths. Base all boundaries only on the video. + + 4. **Reject Uniform Segmentation (Important):** + - Do NOT simply divide the video into equal or nearly equal time chunks. + - If your boundaries would result in subtasks with similar durations (e.g. all around 5 seconds), treat this as evidence that your segmentation is wrong and refine the boundaries. + - Only use nearly equal durations if the video truly shows each subtask taking the same amount of time (this is very rare). + + 5. **Timestamps:** + - Timestamps must be in `"MM:SS"` format. + - The first subtask always starts at `"00:00"`. + - The last subtask ends at the final visible frame of the video. + + # Step 1 β€” Textual Timeline (must do this first) + First, write a extensive and detailed textual timeline describing what happens in the video with approximate timestamps. + For each subtask, include: + - its name + - an approximate start and end time, + - an description of the visual event at the boundary (e.g. "shirt fully folded to the left", "robot rotates folded shirt 90 degrees"). + + Format this as a bullet list. + + # Step 2 β€” JSON Output (final answer) + After the textual timeline, output **only** valid JSON with this structure. + The JSON **must** be consistent with the textual timeline above: + + {{ + "subtasks": [ + {{ + "name": "EXACT_NAME_FROM_LIST", + "timestamps": {{ + "start": "MM:SS", + "end": "MM:SS" + }} + }}, + {{ + "name": "EXACT_NAME_FROM_LIST", + "timestamps": {{ + "start": "MM:SS", + "end": "MM:SS" + }} + }} + ] + }} + + Do not add any extra keys to the JSON. + """) + + +class VideoAnnotator: + """Annotates robot manipulation videos using local Qwen3-VL model on GPU""" + + def __init__( + self, + subtask_list: list[str], + model_name: str = "Qwen/Qwen3-VL-30B-A3B-Instruct", + device: str = "cuda", + torch_dtype: torch.dtype = torch.bfloat16, + model: Qwen3VLMoeForConditionalGeneration | None = None, # noqa: F821 + processor: AutoProcessor | None = None, # noqa: F821 + ): + """ + Initialize the video annotator with local model. + + Args: + subtask_list: List of allowed subtask names (for consistency) + model_name: Hugging Face model name (default: Qwen/Qwen3-VL-30B-A3B-Instruct) + device: Device to use (cuda, cpu) + torch_dtype: Data type for model (bfloat16, float16, float32) + model: Pre-loaded model instance (optional, to share between annotators) + processor: Pre-loaded processor instance (optional, to share between annotators) + """ + self.subtask_list = subtask_list + self.prompt = create_sarm_prompt(subtask_list) + self.device = device + + # Use provided model/processor or load new ones + if model is not None and processor is not None: + self.model = model + self.processor = processor + print(f"Using shared model on {device}") + else: + from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration + + print(f"Loading model: {model_name}...") + + self.model = Qwen3VLMoeForConditionalGeneration.from_pretrained( + model_name, torch_dtype=torch_dtype, device_map=device, trust_remote_code=True + ) + + self.processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True) + + print(f"Model loaded successfully on {device}") + + def extract_episode_segment( + self, file_path: Path, start_timestamp: float, end_timestamp: float, target_fps: int = 1 + ) -> Path: + """ + Extract a specific episode segment from concatenated video. + Uses minimal compression to preserve quality for local inference. + + Args: + file_path: Path to the concatenated video file + start_timestamp: Starting timestamp in seconds (within this video file) + end_timestamp: Ending timestamp in seconds (within this video file) + target_fps: Target FPS (default: 1 for faster processing) + + Returns: + Path to extracted video file + """ + # Create temporary file for extracted video + with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_file: + tmp_path = Path(tmp_file.name) + + try: + # Check if ffmpeg is available + subprocess.run( # nosec B607 + ["ffmpeg", "-version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True + ) + except (subprocess.CalledProcessError, FileNotFoundError) as err: + raise RuntimeError("ffmpeg not found, cannot extract episode segment") from err + + try: + # Calculate duration + duration = end_timestamp - start_timestamp + + print(f"Extracting episode: {start_timestamp:.1f}s-{end_timestamp:.1f}s ({duration:.1f}s)") + + # Use ffmpeg to extract segment with minimal quality loss + cmd = [ + "ffmpeg", + "-i", + str(file_path), + "-ss", + str(start_timestamp), + "-t", + str(duration), + "-r", + str(target_fps), + "-c:v", + "libx264", + "-preset", + "ultrafast", + "-crf", + "23", + "-an", + "-y", + str(tmp_path), + ] + + subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) + + # Verify the output file was created and is not empty + if not tmp_path.exists() or tmp_path.stat().st_size == 0: + print("Video extraction failed (0 bytes) - skipping episode") + if tmp_path.exists(): + tmp_path.unlink() + raise RuntimeError("FFmpeg produced empty video file") + + # Show extraction results + file_size_mb = tmp_path.stat().st_size / (1024 * 1024) + + # Fail if file is too small (< 100KB likely means extraction failed) + if file_size_mb < 0.1: + print(f"Extracted video too small ({file_size_mb:.2f}MB) - skipping episode") + tmp_path.unlink() + raise RuntimeError(f"Video extraction produced invalid file ({file_size_mb:.2f}MB)") + + print(f"Extracted: {file_size_mb:.1f}MB ({target_fps} FPS)") + + return tmp_path + + except subprocess.CalledProcessError as e: + raise RuntimeError(f"ffmpeg failed ({e})") from e + + def annotate( + self, + file_path: str | Path, + fps: int, + start_timestamp: float = 0.0, + end_timestamp: float | None = None, + max_retries: int = 3, + ) -> SubtaskAnnotation: + """Annotate a video segment using local GPU.""" + from qwen_vl_utils import process_vision_info + + file_path = Path(file_path) + + if end_timestamp is None: + cap = cv2.VideoCapture(str(file_path)) + end_timestamp = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) / (cap.get(cv2.CAP_PROP_FPS) or 1) + cap.release() + + duration = end_timestamp - start_timestamp + duration_str = f"{int(duration // 60):02d}:{int(duration % 60):02d}" + + extracted_path = self.extract_episode_segment(file_path, start_timestamp, end_timestamp, 1) + is_extracted = extracted_path != file_path + + try: + messages = [ + {"role": "system", "content": [{"type": "text", "text": self.prompt}]}, + { + "role": "user", + "content": [ + {"type": "video", "video": str(extracted_path), "fps": 1.0}, + { + "type": "text", + "text": f"Video is {duration_str} (~{duration:.1f}s). Follow instructions.", + }, + ], + }, + ] + + for attempt in range(max_retries): + try: + text = self.processor.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True + ) + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt", + ).to(self.device) + + with torch.no_grad(): + generated_ids = self.model.generate( + **inputs, max_new_tokens=1024, do_sample=True, temperature=0.7 + ) + + response = self.processor.batch_decode( + [out[len(inp) :] for inp, out in zip(inputs.input_ids, generated_ids, strict=True)], + skip_special_tokens=True, + )[0].strip() + + # Extract JSON + if "```json" in response: + response = response.split("```json")[1].split("```")[0] + elif "```" in response: + response = response.split("```")[1].split("```")[0] + + try: + return SubtaskAnnotation.model_validate(json.loads(response)) + except json.JSONDecodeError: + match = re.search(r"\{.*\}", response, re.DOTALL) + if match: + return SubtaskAnnotation.model_validate(json.loads(match.group())) + raise ValueError("No JSON found") from None + except Exception as e: + if attempt == max_retries - 1: + raise RuntimeError(f"Failed after {max_retries} attempts") from e + time.sleep(1) + finally: + if is_extracted and extracted_path.exists(): + extracted_path.unlink() + + +def display_annotation(annotation: SubtaskAnnotation, episode_idx: int, fps: int, prefix: str = ""): + """Display annotation summary.""" + subtask_summary = ", ".join( + f"{s.name}({s.timestamps.start}-{s.timestamps.end})" for s in annotation.subtasks + ) + print(f"Episode {episode_idx} {prefix}: {len(annotation.subtasks)} subtasks - {subtask_summary}") + + +def timestamp_to_seconds(timestamp: str) -> float: + """Convert MM:SS or SS timestamp to seconds""" + parts = timestamp.split(":") + if len(parts) == 2: + return int(parts[0]) * 60 + int(parts[1]) + else: + return int(parts[0]) + + +def extract_frame(video_path: Path, timestamp: float) -> np.ndarray | None: + """Extract a single frame from video at given timestamp.""" + cap = cv2.VideoCapture(str(video_path)) + if not cap.isOpened(): + return None + cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000) + ret, frame = cap.read() + cap.release() + return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if ret else None + + +def draw_timeline(ax, subtasks, total_duration, colors): + """Draw a timeline with color-coded subtask segments.""" + import matplotlib.patches as mpatches + + bar_height, bar_y = 0.6, 0.5 + + for i, subtask in enumerate(subtasks): + start = timestamp_to_seconds(subtask.timestamps.start) + end = timestamp_to_seconds(subtask.timestamps.end) + color = colors[i % len(colors)] + + rect = mpatches.FancyBboxPatch( + (start, bar_y - bar_height / 2), + end - start, + bar_height, + boxstyle="round,pad=0.02,rounding_size=0.1", + facecolor=color, + edgecolor="white", + linewidth=1.5, + alpha=0.85, + ) + ax.add_patch(rect) + + # Add label if segment is wide enough + duration = end - start + if duration > total_duration * 0.06: + ax.text( + (start + end) / 2, + bar_y, + subtask.name, + ha="center", + va="center", + fontsize=8, + fontweight="bold", + color="white", + rotation=0 if duration > total_duration * 0.12 else 45, + ) + + if i > 0: + ax.axvline(x=start, ymin=0.1, ymax=0.9, color="white", linestyle="--", linewidth=1.5, alpha=0.7) + + ax.axvline(x=0, ymin=0.1, ymax=0.9, color="#00ff00", linestyle="-", linewidth=2, alpha=0.9) + if subtasks: + ax.axvline( + x=timestamp_to_seconds(subtasks[-1].timestamps.end), + ymin=0.1, + ymax=0.9, + color="white", + linestyle="--", + linewidth=1.5, + alpha=0.7, + ) + + ax.set_xlim(-total_duration * 0.02, total_duration * 1.02) + ax.set_ylim(-0.1, 1.1) + ax.set_xlabel("Time (seconds)", fontsize=10, color="white", labelpad=5) + for spine in ["top", "right", "left"]: + ax.spines[spine].set_visible(False) + ax.spines["bottom"].set_color("#444444") + ax.tick_params(axis="x", colors="#888888", labelsize=8) + ax.tick_params(axis="y", left=False, labelleft=False) + + +def visualize_episode( + ep_idx: int, + annotation: SubtaskAnnotation, + video_path: Path, + video_start: float, + video_end: float, + output_path: Path, + video_key: str, + ann_type: str, +): + """Create visualization for a single episode with frames and timeline.""" + import matplotlib.pyplot as plt + + if annotation is None: + print(f"No {ann_type} annotation for episode {ep_idx}") + return + + subtasks = annotation.subtasks + if not subtasks: + print(f"No subtasks for episode {ep_idx}") + return + + colors = plt.cm.tab10(np.linspace(0, 1, max(len(subtasks), 10))) + total_duration = timestamp_to_seconds(subtasks[-1].timestamps.end) + + # Extract middle frame from each subtask + sample_frames, frame_times = [], [] + for subtask in subtasks: + start = timestamp_to_seconds(subtask.timestamps.start) + end = timestamp_to_seconds(subtask.timestamps.end) + mid = (start + end) / 2 + frame_times.append(mid) + sample_frames.append(extract_frame(video_path, video_start + mid)) + + # Create figure + fig_width = max(16, len(subtasks) * 2.5) + fig = plt.figure(figsize=(fig_width, 10)) + fig.patch.set_facecolor("#1a1a2e") + + gs = fig.add_gridspec( + 2, + max(len(subtasks), 1), + height_ratios=[2, 1], + hspace=0.3, + wspace=0.1, + left=0.05, + right=0.95, + top=0.88, + bottom=0.1, + ) + + fig.suptitle( + f"Episode {ep_idx} - {ann_type.capitalize()} Annotations", + fontsize=18, + fontweight="bold", + color="white", + y=0.96, + ) + fig.text( + 0.5, + 0.91, + f"Camera: {video_key} | Duration: {video_end - video_start:.1f}s | {len(subtasks)} subtasks", + ha="center", + fontsize=11, + color="#888888", + ) + + # Plot frames + for i, (frame, subtask) in enumerate(zip(sample_frames, subtasks, strict=True)): + ax = fig.add_subplot(gs[0, i]) + ax.set_facecolor("#16213e") + if frame is not None: + ax.imshow(frame) + else: + ax.text( + 0.5, 0.5, "N/A", ha="center", va="center", fontsize=12, color="white", transform=ax.transAxes + ) + ax.set_title(subtask.name, fontsize=10, fontweight="bold", color=colors[i % len(colors)], pad=8) + ax.axis("off") + ax.text( + 0.5, + -0.08, + f"t={frame_times[i]:.1f}s", + ha="center", + fontsize=9, + color="#888888", + transform=ax.transAxes, + ) + + # Plot timeline + ax_timeline = fig.add_subplot(gs[1, :]) + ax_timeline.set_facecolor("#16213e") + draw_timeline(ax_timeline, subtasks, total_duration, colors) + + output_path.parent.mkdir(parents=True, exist_ok=True) + plt.savefig(output_path, dpi=150, facecolor=fig.get_facecolor(), edgecolor="none", bbox_inches="tight") + plt.close() + print(f"Saved: {output_path}") + + +def visualize_annotations( + dataset: LeRobotDataset, + sparse_annotations: dict[int, SubtaskAnnotation], + dense_annotations: dict[int, SubtaskAnnotation] | None, + video_key: str, + output_dir: Path, + num_episodes: int = 5, + annotation_type: str = "sparse", + episode_indices: list[int] | None = None, +): + """ + Visualize subtask annotations for a set of episodes. + + Args: + dataset: LeRobotDataset instance + sparse_annotations: Dict mapping episode index to sparse annotations + dense_annotations: Dict mapping episode index to dense annotations (or None) + video_key: Camera/video key to use + output_dir: Directory to save visualization images + num_episodes: Number of episodes to visualize (ignored if episode_indices provided) + annotation_type: "sparse", "dense", or "both" + episode_indices: Specific episode indices to visualize (optional) + """ + # Determine available episodes based on annotation type + if annotation_type == "sparse": + available = set(sparse_annotations.keys()) + elif annotation_type == "dense": + available = set(dense_annotations.keys()) if dense_annotations else set() + else: # both + sparse_set = set(sparse_annotations.keys()) + dense_set = set(dense_annotations.keys()) if dense_annotations else set() + available = sparse_set | dense_set + + if not available: + print("Error: No annotations found to visualize.") + return + + # Select episodes to visualize + if episode_indices: + episodes = sorted([e for e in episode_indices if e in available]) + missing = set(episode_indices) - available + if missing: + print(f"Episodes not found in annotations: {sorted(missing)}") + else: + episodes = sorted(random.sample(list(available), min(num_episodes, len(available)))) + print(f"Visualizing {len(episodes)} episodes: {episodes}") + output_dir.mkdir(parents=True, exist_ok=True) + + # Generate visualizations + for i, ep_idx in enumerate(episodes, 1): + print(f"Processing episode {ep_idx} ({i}/{len(episodes)})") + video_path = dataset.root / dataset.meta.get_video_file_path(ep_idx, video_key) + if not video_path.exists(): + print(f"Video not found: {video_path}") + continue + + video_start = float(dataset.meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx]) + video_end = float(dataset.meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx]) + + if annotation_type == "both": + # Visualize both sparse and dense + for ann_type, annotations in [("sparse", sparse_annotations), ("dense", dense_annotations)]: + if annotations and ep_idx in annotations: + output_path = output_dir / f"episode_{ep_idx:04d}_{ann_type}.png" + visualize_episode( + ep_idx, + annotations.get(ep_idx), + video_path, + video_start, + video_end, + output_path, + video_key, + ann_type, + ) + else: + annotations = sparse_annotations if annotation_type == "sparse" else dense_annotations + if annotations and ep_idx in annotations: + output_path = output_dir / f"episode_{ep_idx:04d}_{annotation_type}.png" + visualize_episode( + ep_idx, + annotations.get(ep_idx), + video_path, + video_start, + video_end, + output_path, + video_key, + annotation_type, + ) + + print(f"Visualizations saved to: {output_dir.absolute()}") + + +def save_annotations_to_dataset( + dataset_path: Path, annotations: dict[int, SubtaskAnnotation], fps: int, prefix: str = "sparse" +): + """Save annotations to LeRobot dataset parquet format.""" + from lerobot.datasets.utils import DEFAULT_EPISODES_PATH, load_episodes + + episodes_dataset = load_episodes(dataset_path) + if not episodes_dataset or len(episodes_dataset) == 0: + return + + episodes_df = episodes_dataset.to_pandas() + cols = [ + f"{prefix}_{c}" + for c in [ + "subtask_names", + "subtask_start_times", + "subtask_end_times", + "subtask_start_frames", + "subtask_end_frames", + ] + ] + for col in cols: + episodes_df[col] = None + + for ep_idx, ann in annotations.items(): + if ep_idx >= len(episodes_df): + continue + names, starts, ends, start_frames, end_frames = [], [], [], [], [] + for s in ann.subtasks: + names.append(s.name) + st, et = timestamp_to_seconds(s.timestamps.start), timestamp_to_seconds(s.timestamps.end) + starts.append(st) + ends.append(et) + start_frames.append(int(st * fps)) + end_frames.append(int(et * fps)) + episodes_df.at[ep_idx, cols[0]] = names + episodes_df.at[ep_idx, cols[1]] = starts + episodes_df.at[ep_idx, cols[2]] = ends + episodes_df.at[ep_idx, cols[3]] = start_frames + episodes_df.at[ep_idx, cols[4]] = end_frames + + # Group by file and write + for ep_idx in episodes_df.index: + key = ( + episodes_df.loc[ep_idx, "meta/episodes/chunk_index"], + episodes_df.loc[ep_idx, "meta/episodes/file_index"], + ) + path = dataset_path / DEFAULT_EPISODES_PATH.format(chunk_index=key[0], file_index=key[1]) + if path.exists(): + file_df = pd.read_parquet(path) + for col in cols + ( + [ + "subtask_names", + "subtask_start_times", + "subtask_end_times", + "subtask_start_frames", + "subtask_end_frames", + ] + if prefix == "sparse" + else [] + ): + if col not in file_df.columns: + file_df[col] = None + if ep_idx in annotations: + for col in cols: + file_df.at[ep_idx, col] = episodes_df.loc[ep_idx, col] + if prefix == "sparse": # Legacy columns + for i, legacy in enumerate( + [ + "subtask_names", + "subtask_start_times", + "subtask_end_times", + "subtask_start_frames", + "subtask_end_frames", + ] + ): + file_df.at[ep_idx, legacy] = episodes_df.loc[ep_idx, cols[i]] + file_df.to_parquet(path, engine="pyarrow", compression="snappy") + + +def generate_auto_sparse_annotations( + dataset: LeRobotDataset, episode_indices: list[int], video_key: str +) -> dict[int, SubtaskAnnotation]: + """Auto-generate single 'task' stage annotations for all episodes.""" + annotations = {} + for ep_idx in episode_indices: + start = float(dataset.meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx]) + end = float(dataset.meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx]) + duration = end - start + end_str = f"{int(duration // 60):02d}:{int(duration % 60):02d}" + annotations[ep_idx] = SubtaskAnnotation( + subtasks=[Subtask(name="task", timestamps=Timestamp(start="00:00", end=end_str))] + ) + return annotations + + +def load_annotations_from_dataset(dataset_path: Path, prefix: str = "sparse") -> dict[int, SubtaskAnnotation]: + """Load annotations from LeRobot dataset parquet files.""" + from lerobot.datasets.utils import load_episodes + + episodes_dataset = load_episodes(dataset_path) + if not episodes_dataset or len(episodes_dataset) == 0: + return {} + + col_names = f"{prefix}_subtask_names" + col_start = f"{prefix}_subtask_start_times" + col_end = f"{prefix}_subtask_end_times" + + # Fall back to legacy columns for sparse + if col_names not in episodes_dataset.column_names: + if prefix == "sparse" and "subtask_names" in episodes_dataset.column_names: + col_names, col_start, col_end = "subtask_names", "subtask_start_times", "subtask_end_times" + else: + return {} + + df = episodes_dataset.to_pandas() + annotations = {} + for ep_idx in df.index: + names = df.loc[ep_idx, col_names] + if names is None or (isinstance(names, float) and pd.isna(names)): + continue + starts, ends = df.loc[ep_idx, col_start], df.loc[ep_idx, col_end] + annotations[int(ep_idx)] = SubtaskAnnotation( + subtasks=[ + Subtask( + name=n, + timestamps=Timestamp( + start=f"{int(s) // 60:02d}:{int(s) % 60:02d}", + end=f"{int(e) // 60:02d}:{int(e) % 60:02d}", + ), + ) + for n, s, e in zip(names, starts, ends, strict=True) + ] + ) + return annotations + + +def process_single_episode( + ep_idx: int, + dataset_root: Path, + dataset_meta, + video_key: str, + fps: int, + annotator: VideoAnnotator, +) -> tuple[int, SubtaskAnnotation | None, str | None]: + """Process a single episode annotation.""" + try: + video_path = dataset_root / dataset_meta.get_video_file_path(ep_idx, video_key) + if not video_path.exists(): + return ep_idx, None, f"Video not found: {video_path}" + + start = float(dataset_meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx]) + end = float(dataset_meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx]) + return ep_idx, annotator.annotate(video_path, fps, start, end), None + except Exception as e: + return ep_idx, None, str(e) + + +def worker_process_episodes( + worker_id: int, + gpu_id: int, + episode_indices: list[int], + repo_id: str, + video_key: str, + sparse_subtask_list: list[str], + dense_subtask_list: list[str] | None, + model_name: str, + torch_dtype: torch.dtype, +) -> tuple[dict, dict | None]: + """Worker for parallel processing across GPUs.""" + device = f"cuda:{gpu_id}" + dataset = LeRobotDataset(repo_id, download_videos=False) + + sparse_annotator = VideoAnnotator(sparse_subtask_list, model_name, device, torch_dtype) + dense_annotator = ( + VideoAnnotator( + dense_subtask_list, + model_name, + device, + torch_dtype, + sparse_annotator.model, + sparse_annotator.processor, + ) + if dense_subtask_list + else None + ) + + sparse_annotations, dense_annotations = {}, {} if dense_subtask_list else None + + for ep_idx in episode_indices: + _, sparse_ann, err = process_single_episode( + ep_idx, dataset.root, dataset.meta, video_key, dataset.fps, sparse_annotator + ) + if sparse_ann: + sparse_annotations[ep_idx] = sparse_ann + + if dense_annotator: + _, dense_ann, _ = process_single_episode( + ep_idx, dataset.root, dataset.meta, video_key, dataset.fps, dense_annotator + ) + if dense_ann: + dense_annotations[ep_idx] = dense_ann + + return sparse_annotations, dense_annotations + + +def main(): + parser = argparse.ArgumentParser(description="SARM-style subtask annotation using local GPU (Qwen3-VL)") + parser.add_argument("--repo-id", type=str, required=True, help="HuggingFace dataset repository ID") + parser.add_argument( + "--sparse-subtasks", type=str, default=None, help="Comma-separated sparse subtask names" + ) + parser.add_argument( + "--dense-subtasks", type=str, default=None, help="Comma-separated dense subtask names" + ) + parser.add_argument( + "--dense-only", action="store_true", help="Dense-only mode with auto-generated sparse 'task' stage" + ) + parser.add_argument("--episodes", type=int, nargs="+", default=None, help="Episode indices to annotate") + parser.add_argument("--model", type=str, default="Qwen/Qwen3-VL-30B-A3B-Instruct", help="VLM model") + parser.add_argument("--skip-existing", action="store_true", help="Skip already annotated episodes") + parser.add_argument("--video-key", type=str, default=None, help="Video key (default: first available)") + parser.add_argument("--push-to-hub", action="store_true", help="Push to HuggingFace Hub") + parser.add_argument("--output-repo-id", type=str, default=None, help="Output repo ID for push") + parser.add_argument("--device", type=str, default="cuda", help="Device (cuda/cpu)") + parser.add_argument("--dtype", type=str, default="bfloat16", choices=["bfloat16", "float16", "float32"]) + parser.add_argument("--num-workers", type=int, default=1, help="Parallel workers for multi-GPU") + parser.add_argument("--gpu-ids", type=int, nargs="+", default=None, help="GPU IDs to use") + # Visualization options + parser.add_argument( + "--visualize-only", + action="store_true", + help="Only visualize existing annotations (no generation)", + ) + parser.add_argument( + "--num-visualizations", + type=int, + default=5, + help="Number of episodes to visualize (default: 5)", + ) + parser.add_argument( + "--visualize-type", + type=str, + default="sparse", + choices=["sparse", "dense", "both"], + help="Type of annotations to visualize (default: sparse)", + ) + parser.add_argument( + "--output-dir", + type=str, + default="./subtask_viz", + help="Output directory for visualizations (default: ./subtask_viz)", + ) + + args = parser.parse_args() + + # Load dataset first (needed for both annotation and visualization) + print(f"Loading dataset: {args.repo_id}") + dataset = LeRobotDataset(args.repo_id, download_videos=True) + fps = dataset.fps + + if not dataset.meta.video_keys: + raise ValueError("No video keys found") + + video_key = ( + args.video_key if args.video_key in (dataset.meta.video_keys or []) else dataset.meta.video_keys[0] + ) + print(f"Using camera: {video_key}, FPS: {fps}") + + # Handle visualization-only mode + if args.visualize_only: + print("Visualization-only mode") + sparse_annotations = load_annotations_from_dataset(dataset.root, prefix="sparse") + dense_annotations = load_annotations_from_dataset(dataset.root, prefix="dense") + + if not sparse_annotations and not dense_annotations: + return print("Error: No annotations found. Run annotation first.") + + print(f"Found {len(sparse_annotations)} sparse, {len(dense_annotations)} dense annotations") + + visualize_annotations( + dataset=dataset, + sparse_annotations=sparse_annotations, + dense_annotations=dense_annotations if dense_annotations else None, + video_key=video_key, + output_dir=Path(args.output_dir), + num_episodes=args.num_visualizations, + annotation_type=args.visualize_type, + episode_indices=args.episodes, + ) + return + + # Validate arguments for annotation mode + if args.dense_only and not args.dense_subtasks: + return print("Error: --dense-only requires --dense-subtasks") + if args.dense_subtasks and not args.sparse_subtasks and not args.dense_only: + return print("Error: --dense-subtasks requires --sparse-subtasks or --dense-only") + + sparse_subtask_list = ( + [s.strip() for s in args.sparse_subtasks.split(",")] if args.sparse_subtasks else None + ) + dense_subtask_list = [s.strip() for s in args.dense_subtasks.split(",")] if args.dense_subtasks else None + auto_sparse = sparse_subtask_list is None + dense_mode = dense_subtask_list is not None + torch_dtype = {"bfloat16": torch.bfloat16, "float16": torch.float16, "float32": torch.float32}[args.dtype] + + # Determine episodes + episode_indices = args.episodes or list(range(dataset.meta.total_episodes)) + + existing_annotations = load_annotations_from_dataset(dataset.root, prefix="sparse") + if args.skip_existing: + episode_indices = [ep for ep in episode_indices if ep not in existing_annotations] + + if not episode_indices: + return print("All episodes already annotated!") + print(f"Annotating {len(episode_indices)} episodes") + + # GPU setup + gpu_ids = args.gpu_ids or list( + range(min(args.num_workers, torch.cuda.device_count() if torch.cuda.is_available() else 1)) + ) + args.num_workers = len(gpu_ids) + + sparse_annotations = existing_annotations.copy() + dense_annotations = {} if dense_mode else None + + # Auto-sparse mode + if auto_sparse: + sparse_annotations.update(generate_auto_sparse_annotations(dataset, episode_indices, video_key)) + save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse") + print(f"Auto-generated {len(episode_indices)} sparse 'task' annotations") + + # VLM annotation (for sparse if not auto, and for dense) + need_vlm = (not auto_sparse) or dense_mode + + if need_vlm: + if args.num_workers > 1 and not auto_sparse: + # Parallel processing + print(f"Parallel processing with {args.num_workers} workers") + episodes_per_worker = [[] for _ in range(args.num_workers)] + for i, ep_idx in enumerate(episode_indices): + episodes_per_worker[i % args.num_workers].append(ep_idx) + + with ProcessPoolExecutor( + max_workers=args.num_workers, mp_context=mp.get_context("spawn") + ) as executor: + futures = [ + executor.submit( + worker_process_episodes, + w, + gpu_ids[w], + episodes_per_worker[w], + args.repo_id, + video_key, + sparse_subtask_list, + dense_subtask_list, + args.model, + torch_dtype, + ) + for w in range(args.num_workers) + if episodes_per_worker[w] + ] + + for future in as_completed(futures): + try: + worker_sparse, worker_dense = future.result() + sparse_annotations.update(worker_sparse) + if dense_mode and worker_dense: + dense_annotations.update(worker_dense) + save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse") + if dense_mode: + save_annotations_to_dataset(dataset.root, dense_annotations, fps, prefix="dense") + except Exception as e: + raise RuntimeError(f"Worker failed: {e}") from e + else: + # Sequential processing + sparse_annotator = ( + VideoAnnotator(sparse_subtask_list, args.model, args.device, torch_dtype) + if not auto_sparse and sparse_subtask_list + else None + ) + dense_annotator = ( + VideoAnnotator( + dense_subtask_list, + args.model, + args.device, + torch_dtype, + sparse_annotator.model if sparse_annotator else None, + sparse_annotator.processor if sparse_annotator else None, + ) + if dense_mode + else None + ) + + for i, ep_idx in enumerate(episode_indices): + print(f"Episode {ep_idx} ({i + 1}/{len(episode_indices)})") + + if sparse_annotator: + _, sparse_ann, err = process_single_episode( + ep_idx, dataset.root, dataset.meta, video_key, fps, sparse_annotator + ) + if sparse_ann: + sparse_annotations[ep_idx] = sparse_ann + save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse") + elif err: + print(f"Sparse failed: {err}") + + if dense_annotator: + _, dense_ann, err = process_single_episode( + ep_idx, dataset.root, dataset.meta, video_key, fps, dense_annotator + ) + if dense_ann: + dense_annotations[ep_idx] = dense_ann + save_annotations_to_dataset(dataset.root, dense_annotations, fps, prefix="dense") + elif err: + print(f"Dense failed: {err}") + + # Save temporal proportions + def save_proportions(annotations, prefix, subtask_list=None, is_auto=False): + props: dict[str, float] = ( + {"task": 1.0} if is_auto else compute_temporal_proportions(annotations, fps, subtask_list) + ) + path = dataset.root / "meta" / f"temporal_proportions_{prefix}.json" + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, "w") as f: + json.dump(props, f, indent=2) + print(f"Saved {prefix} temporal proportions") + + save_proportions(sparse_annotations, "sparse", sparse_subtask_list, auto_sparse) + if dense_mode and dense_annotations: + save_proportions(dense_annotations, "dense", dense_subtask_list) + + print(f"\nComplete! {len(sparse_annotations)} sparse, {len(dense_annotations or {})} dense annotations") + + # Visualize annotations after generation + if args.num_visualizations > 0: + print(f"\nGenerating {args.num_visualizations} visualizations...") + visualize_type = "both" if dense_mode else "sparse" + visualize_annotations( + dataset=dataset, + sparse_annotations=sparse_annotations, + dense_annotations=dense_annotations, + video_key=video_key, + output_dir=Path(args.output_dir), + num_episodes=args.num_visualizations, + annotation_type=visualize_type, + ) + + if args.push_to_hub: + try: + dataset.push_to_hub(push_videos=True) + print(f"Pushed to {args.output_repo_id or args.repo_id}") + except Exception as e: + print(f"Push failed: {e}") + + +if __name__ == "__main__": + main() diff --git a/src/lerobot/policies/__init__.py b/src/lerobot/policies/__init__.py index 788542d49..ceefb0d56 100644 --- a/src/lerobot/policies/__init__.py +++ b/src/lerobot/policies/__init__.py @@ -29,6 +29,7 @@ __all__ = [ "PI0Config", "PI05Config", "SmolVLAConfig", + "SARMConfig", "TDMPCConfig", "VQBeTConfig", "GrootConfig", diff --git a/src/lerobot/policies/act/modeling_act.py b/src/lerobot/policies/act/modeling_act.py index b7cbcd061..a5c48eb3d 100644 --- a/src/lerobot/policies/act/modeling_act.py +++ b/src/lerobot/policies/act/modeling_act.py @@ -50,6 +50,7 @@ class ACTPolicy(PreTrainedPolicy): def __init__( self, config: ACTConfig, + **kwargs, ): """ Args: diff --git a/src/lerobot/policies/diffusion/modeling_diffusion.py b/src/lerobot/policies/diffusion/modeling_diffusion.py index 3ab6719cb..1fdc76f10 100644 --- a/src/lerobot/policies/diffusion/modeling_diffusion.py +++ b/src/lerobot/policies/diffusion/modeling_diffusion.py @@ -56,6 +56,7 @@ class DiffusionPolicy(PreTrainedPolicy): def __init__( self, config: DiffusionConfig, + **kwargs, ): """ Args: diff --git a/src/lerobot/policies/factory.py b/src/lerobot/policies/factory.py index 3d17fa7dc..eb1ff41f7 100644 --- a/src/lerobot/policies/factory.py +++ b/src/lerobot/policies/factory.py @@ -37,6 +37,7 @@ from lerobot.policies.pi05.configuration_pi05 import PI05Config from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.sac.configuration_sac import SACConfig from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig +from lerobot.policies.sarm.configuration_sarm import SARMConfig from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.policies.utils import validate_visual_features_consistency @@ -105,6 +106,10 @@ def get_policy_class(name: str) -> type[PreTrainedPolicy]: from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy return SmolVLAPolicy + elif name == "sarm": + from lerobot.policies.sarm.modeling_sarm import SARMRewardModel + + return SARMRewardModel elif name == "groot": from lerobot.policies.groot.modeling_groot import GrootPolicy @@ -337,6 +342,14 @@ def make_pre_post_processors( dataset_stats=kwargs.get("dataset_stats"), ) + elif isinstance(policy_cfg, SARMConfig): + from lerobot.policies.sarm.processor_sarm import make_sarm_pre_post_processors + + processors = make_sarm_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + dataset_meta=kwargs.get("dataset_meta"), + ) elif isinstance(policy_cfg, GrootConfig): from lerobot.policies.groot.processor_groot import make_groot_pre_post_processors @@ -435,6 +448,13 @@ def make_policy( cfg.input_features = {key: ft for key, ft in features.items() if key not in cfg.output_features} kwargs["config"] = cfg + # Pass dataset_stats to the policy if available (needed for some policies like SARM) + if ds_meta is not None and hasattr(ds_meta, "stats"): + kwargs["dataset_stats"] = ds_meta.stats + + if ds_meta is not None: + kwargs["dataset_meta"] = ds_meta + if cfg.pretrained_path: # Load a pretrained policy and override the config if needed (for example, if there are inference-time # hyperparameters that we want to vary). diff --git a/src/lerobot/policies/groot/modeling_groot.py b/src/lerobot/policies/groot/modeling_groot.py index 605f7a097..bdaef37b9 100644 --- a/src/lerobot/policies/groot/modeling_groot.py +++ b/src/lerobot/policies/groot/modeling_groot.py @@ -49,7 +49,7 @@ class GrootPolicy(PreTrainedPolicy): name = "groot" config_class = GrootConfig - def __init__(self, config: GrootConfig): + def __init__(self, config: GrootConfig, **kwargs): """Initialize Groot policy wrapper.""" super().__init__(config) config.validate_features() diff --git a/src/lerobot/policies/pi0/modeling_pi0.py b/src/lerobot/policies/pi0/modeling_pi0.py index 4b79c2902..0d9c77e00 100644 --- a/src/lerobot/policies/pi0/modeling_pi0.py +++ b/src/lerobot/policies/pi0/modeling_pi0.py @@ -907,6 +907,7 @@ class PI0Policy(PreTrainedPolicy): def __init__( self, config: PI0Config, + **kwargs, ): """ Args: @@ -1235,9 +1236,15 @@ class PI0Policy(PreTrainedPolicy): return actions - def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: - """Run the batch through the model and compute the loss for training.""" + def forward(self, batch: dict[str, Tensor], reduction: str = "mean") -> tuple[Tensor, dict]: + """Run the batch through the model and compute the loss for training. + Args: + batch: Training batch containing observations and actions. + reduction: How to reduce the loss. Options: + - "mean": Return scalar mean loss (default, backward compatible) + - "none": Return per-sample losses of shape (batch_size,) for RA-BC weighting + """ # Prepare inputs images, img_masks = self._preprocess_images(batch) lang_tokens, lang_masks = batch[f"{OBS_LANGUAGE_TOKENS}"], batch[f"{OBS_LANGUAGE_ATTENTION_MASK}"] @@ -1251,11 +1258,17 @@ class PI0Policy(PreTrainedPolicy): original_action_dim = self.config.output_features[ACTION].shape[0] losses = losses[:, :, :original_action_dim] - loss = losses.mean() - loss_dict = { - "loss": loss.item(), "loss_per_dim": losses.mean(dim=[0, 1]).detach().cpu().numpy().tolist(), } - return loss, loss_dict + if reduction == "none": + # Return per-sample losses (B,) by averaging over time and action dims + per_sample_loss = losses.mean(dim=(1, 2)) + loss_dict["loss"] = per_sample_loss.mean().item() + return per_sample_loss, loss_dict + else: + # Default: return scalar mean loss + loss = losses.mean() + loss_dict["loss"] = loss.item() + return loss, loss_dict diff --git a/src/lerobot/policies/pi05/modeling_pi05.py b/src/lerobot/policies/pi05/modeling_pi05.py index 64eb4cb23..2cd142042 100644 --- a/src/lerobot/policies/pi05/modeling_pi05.py +++ b/src/lerobot/policies/pi05/modeling_pi05.py @@ -880,6 +880,7 @@ class PI05Policy(PreTrainedPolicy): def __init__( self, config: PI05Config, + **kwargs, ): """ Args: @@ -1209,9 +1210,15 @@ class PI05Policy(PreTrainedPolicy): return actions - def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: - """Run the batch through the model and compute the loss for training.""" + def forward(self, batch: dict[str, Tensor], reduction: str = "mean") -> tuple[Tensor, dict]: + """Run the batch through the model and compute the loss for training. + Args: + batch: Training batch containing observations and actions. + reduction: How to reduce the loss. Options: + - "mean": Return scalar mean loss (default, backward compatible) + - "none": Return per-sample losses of shape (batch_size,) for RA-BC weighting + """ # Prepare inputs images, img_masks = self._preprocess_images(batch) tokens, masks = batch[f"{OBS_LANGUAGE_TOKENS}"], batch[f"{OBS_LANGUAGE_ATTENTION_MASK}"] @@ -1225,11 +1232,17 @@ class PI05Policy(PreTrainedPolicy): original_action_dim = self.config.output_features[ACTION].shape[0] losses = losses[:, :, :original_action_dim] - loss = losses.mean() - loss_dict = { - "loss": loss.item(), "loss_per_dim": losses.mean(dim=[0, 1]).detach().cpu().numpy().tolist(), } - return loss, loss_dict + if reduction == "none": + # Return per-sample losses (B,) by averaging over time and action dims + per_sample_loss = losses.mean(dim=(1, 2)) + loss_dict["loss"] = per_sample_loss.mean().item() + return per_sample_loss, loss_dict + else: + # Default: return scalar mean loss + loss = losses.mean() + loss_dict["loss"] = loss.item() + return loss, loss_dict diff --git a/src/lerobot/policies/sarm/compute_rabc_weights.py b/src/lerobot/policies/sarm/compute_rabc_weights.py new file mode 100644 index 000000000..5b6ea6e9b --- /dev/null +++ b/src/lerobot/policies/sarm/compute_rabc_weights.py @@ -0,0 +1,870 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Compute SARM progress values for RA-BC (Reward-Aware Behavior Cloning) weighting. + +This script processes all frames in a dataset with SARM to compute progress values [0, 1]. +The results are saved as a parquet file that can be loaded during training for RA-BC weighting. + +Uses multi-output extraction: each SARM query returns progress for 9 frames, so we only +need ~num_frames/30 queries instead of one per frame (~30x speedup). + +Usage: + # Full RA-BC computation with visualizations + python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + --dataset-repo-id lerobot/aloha_sim_insertion_human \\ + --reward-model-path pepijn223/sarm_single_uni4 + + # Faster computation with stride (compute every 5 frames, interpolate the rest) + python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + --dataset-repo-id lerobot/aloha_sim_insertion_human \\ + --reward-model-path pepijn223/sarm_single_uni4 \\ + --stride 5 + + # Visualize predictions only (no RA-BC computation) + python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + --dataset-repo-id lerobot/aloha_sim_insertion_human \\ + --reward-model-path pepijn223/sarm_single_uni4 \\ + --visualize-only \\ + --num-visualizations 5 + +The output is saved to the dataset's local cache directory as 'sarm_progress.parquet'. +""" + +import argparse +import logging +from pathlib import Path + +import matplotlib.gridspec as gridspec +import matplotlib.pyplot as plt +import numpy as np +import pyarrow as pa +import pyarrow.parquet as pq +import torch +from tqdm import tqdm + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.policies.sarm.modeling_sarm import SARMRewardModel +from lerobot.policies.sarm.processor_sarm import make_sarm_pre_post_processors +from lerobot.policies.sarm.sarm_utils import normalize_stage_tau + + +def get_reward_model_path_from_parquet(parquet_path: Path) -> str | None: + """Read reward_model_path from parquet metadata if available.""" + if not parquet_path.exists(): + return None + try: + metadata = pq.read_metadata(parquet_path).schema.to_arrow_schema().metadata + if metadata and b"reward_model_path" in metadata: + return metadata[b"reward_model_path"].decode() + except Exception: # nosec B110 + return None + return None + + +def load_sarm_resources( + dataset_repo_id: str, + reward_model_path: str, + device: str = "cuda", +) -> tuple[LeRobotDataset, SARMRewardModel, any]: + """ + Load SARM model, dataset, and preprocessor. + + Returns: + Tuple of (dataset, reward_model, preprocessor) + """ + logging.info(f"Loading model: {reward_model_path}") + reward_model = SARMRewardModel.from_pretrained(reward_model_path) + reward_model.config.device = device + reward_model.to(device).eval() + + image_key = reward_model.config.image_key + state_key = reward_model.config.state_key + delta_indices = reward_model.config.observation_delta_indices + + logging.info(f"Loading dataset: {dataset_repo_id}") + temp_dataset = LeRobotDataset(dataset_repo_id, download_videos=True) + fps = temp_dataset.fps + + delta_timestamps = { + image_key: [idx / fps for idx in delta_indices], + state_key: [idx / fps for idx in delta_indices], + } + dataset = LeRobotDataset(dataset_repo_id, delta_timestamps=delta_timestamps) + logging.info(f"Dataset: {dataset.num_episodes} episodes, {dataset.num_frames} frames") + + preprocess, _ = make_sarm_pre_post_processors( + config=reward_model.config, + dataset_stats=dataset.meta.stats, + dataset_meta=dataset.meta, + ) + + return dataset, reward_model, preprocess + + +def to_numpy_image(img) -> np.ndarray: + """Convert image tensor to numpy uint8 (H, W, C).""" + if isinstance(img, torch.Tensor): + img = img.cpu().numpy() + if img.ndim == 4: + # Take center frame for bidirectional sampling + img = img[img.shape[0] // 2] + if img.shape[0] in [1, 3]: + img = np.transpose(img, (1, 2, 0)) + if img.dtype != np.uint8: + # Handle normalized images (may have negative values or values > 1) + img = img.astype(np.float32) + img = (img - img.min()) / (img.max() - img.min() + 1e-8) # Normalize to [0, 1] + img = (img * 255).astype(np.uint8) + return img + + +def visualize_episode( + frames, progress_preds, stage_preds, title, output_path, stage_labels, gt_progress=None, gt_stages=None +): + """Create visualization with progress plot, stage probabilities, and sample frames. + + Same as sarm_inference_visualization.py + """ + num_stages = stage_preds.shape[1] + colors = plt.cm.tab10(np.linspace(0, 1, num_stages)) + frame_indices = np.arange(len(progress_preds)) + + fig = plt.figure(figsize=(14, 12)) + gs = gridspec.GridSpec(3, 1, height_ratios=[2, 1, 1], hspace=0.3) + ax_progress, ax_stages, ax_frames = fig.add_subplot(gs[0]), fig.add_subplot(gs[1]), fig.add_subplot(gs[2]) + + # Progress plot + ax_progress.plot(frame_indices, progress_preds, linewidth=2, color="#2E86AB", label="Predicted") + ax_progress.fill_between(frame_indices, 0, progress_preds, alpha=0.3, color="#2E86AB") + if gt_progress is not None: + ax_progress.plot( + frame_indices, gt_progress, linewidth=2, color="#28A745", linestyle="--", label="Ground Truth" + ) + ax_progress.axhline(y=1.0, color="gray", linestyle="--", alpha=0.5) + ax_progress.set_ylabel("Progress") + ax_progress.set_title(f'Task: "{title}"', fontweight="bold") + ax_progress.set_ylim(-0.05, 1.1) + ax_progress.legend(loc="upper left") + ax_progress.grid(True, alpha=0.3) + + # Stage predictions + ax_stages.stackplot( + frame_indices, + *[stage_preds[:, i] for i in range(num_stages)], + colors=colors, + alpha=0.8, + labels=stage_labels, + ) + if gt_stages is not None: + for change_idx in np.where(np.diff(gt_stages) != 0)[0] + 1: + ax_stages.axvline(x=change_idx, color="black", linestyle="-", alpha=0.7, linewidth=1.5) + ax_stages.set_xlabel("Frame") + ax_stages.set_ylabel("Stage Probability") + ax_stages.set_ylim(0, 1) + ax_stages.legend(loc="upper left", ncol=min(num_stages, 5), fontsize=8) + ax_stages.grid(True, alpha=0.3) + + # Sample frames + ax_frames.axis("off") + num_sample = 8 + sample_indices = np.linspace(0, len(frames) - 1, num_sample, dtype=int) + h, w = frames[0].shape[:2] + combined = np.zeros((h, w * num_sample, 3), dtype=np.uint8) + for i, idx in enumerate(sample_indices): + frame = frames[idx] + if frame.shape[-1] == 1: + frame = np.repeat(frame, 3, axis=-1) + combined[:, i * w : (i + 1) * w] = frame + stage_name = stage_labels[np.argmax(stage_preds[idx])][:12] + ax_frames.text( + i * w + w / 2, + -10, + f"Frame {idx}\n{progress_preds[idx]:.2f}\n{stage_name}", + ha="center", + va="top", + fontsize=7, + ) + ax_frames.imshow(combined) + ax_frames.set_title("Sample Frames", pad=20) + + output_path.parent.mkdir(parents=True, exist_ok=True) + plt.savefig(output_path, dpi=150, bbox_inches="tight") + plt.close() + print(f"Saved: {output_path}") + + +def visualize_sarm_predictions( + dataset: LeRobotDataset, + reward_model: SARMRewardModel, + preprocess, + episode_indices: list[int], + head_mode: str, + output_dir: Path, + num_display_frames: int = 5, + stride: int = 1, +): + """ + Visualize SARM predictions for multiple episodes. + + Computes predictions for every frame by default. With stride > 1, computes predictions + every N frames and interpolates (progress + stage probabilities) for visualization. + + Args: + dataset: LeRobotDataset with delta_timestamps configured + reward_model: Loaded SARM model + preprocess: Preprocessor from make_sarm_pre_post_processors + episode_indices: List of episode indices to visualize + head_mode: "sparse", "dense", or "both" + output_dir: Directory to save visualizations + num_display_frames: Number of frames to display in thumbnail strip (default: 5) + stride: Compute predictions every N frames, interpolate the rest (default: 1) + """ + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + image_key = reward_model.config.image_key + state_key = reward_model.config.state_key + dual_mode = reward_model.config.uses_dual_heads + device = reward_model.device + + # Center frame index for bidirectional sampling + target_idx = reward_model.config.n_obs_steps // 2 + + # Determine which heads to visualize + schemes_to_viz = [] + if head_mode in ("sparse", "both") or not dual_mode: + schemes_to_viz.append("sparse") + if head_mode in ("dense", "both") and dual_mode: + schemes_to_viz.append("dense") + + # Set preprocessor to eval mode to disable augmentations + if hasattr(preprocess, "eval"): + preprocess.eval() + for step in preprocess.steps: + if hasattr(step, "eval"): + step.eval() + + for episode_idx in episode_indices: + ep = dataset.meta.episodes[episode_idx] + ep_start = ep["dataset_from_index"] + ep_end = ep["dataset_to_index"] + task = dataset[ep_start].get("task", "perform the task") + num_frames = ep_end - ep_start + + # Select frames for display thumbnails (evenly sampled from begin to end) + display_indices = set( + [ + ep_start + int(i * (num_frames - 1) / (num_display_frames - 1)) + for i in range(num_display_frames) + ] + if num_frames >= num_display_frames + else list(range(ep_start, ep_end)) + ) + viz_frames = {} + + # Load display frames up-front (stride mode might skip them otherwise). + for frame_idx in display_indices: + sample = dataset[frame_idx] + viz_frames[frame_idx] = to_numpy_image(sample[image_key]) + + # Initialize storage for each scheme + scheme_data = {} + for scheme in schemes_to_viz: + num_stages = getattr(reward_model.config, f"num_{scheme}_stages") + scheme_data[scheme] = { + "viz_progress": np.full(num_frames, np.nan), + "viz_stages": np.full((num_frames, num_stages), np.nan), + "viz_gt_progress": np.full(num_frames, np.nan), + "viz_gt_stages": np.full(num_frames, np.nan), + "target_key": f"{scheme}_targets", + "num_stages": num_stages, + "temporal_props": getattr(reward_model.config, f"{scheme}_temporal_proportions"), + "subtask_names": getattr(reward_model.config, f"{scheme}_subtask_names"), + } + + if stride > 1: + logging.info(f"Visualization stride={stride}: inferring every {stride} frames and interpolating") + + # Process frames one at a time to avoid memory buildup + frame_indices = list(range(ep_start, ep_end, stride)) + if (ep_end - 1) not in frame_indices: + frame_indices.append(ep_end - 1) + frame_indices = sorted(set(frame_indices)) + + for frame_idx in tqdm(frame_indices, desc=f"Episode {episode_idx}", leave=False): + local_idx = frame_idx - ep_start + sample = dataset[frame_idx] + + batch = { + image_key: sample[image_key], + "task": task, + "index": frame_idx, + "episode_index": episode_idx, + } + if state_key in sample: + batch[state_key] = sample[state_key] + + with torch.no_grad(): + processed = preprocess(batch) + video_features = processed["video_features"].to(device) + text_features = processed["text_features"].to(device) + state_features = processed.get("state_features") + if state_features is not None: + state_features = state_features.to(device) + lengths = processed.get("lengths") + + for scheme in schemes_to_viz: + sd = scheme_data[scheme] + + # Ground truth + # In stride visualization mode, ground-truth plots can be misleading + # (only sparse points are available), so we skip GT. + if stride == 1 and sd["target_key"] in processed: + gt_target = processed[sd["target_key"]][0, target_idx].cpu().item() + sd["viz_gt_stages"][local_idx] = int(gt_target) + sd["viz_gt_progress"][local_idx] = normalize_stage_tau( + gt_target, + num_stages=sd["num_stages"], + temporal_proportions=sd["temporal_props"], + subtask_names=sd["subtask_names"], + ) + + # Predictions + reward, stage_probs = reward_model.calculate_rewards( + text_embeddings=text_features, + video_embeddings=video_features, + state_features=state_features, + lengths=lengths, + return_all_frames=True, + return_stages=True, + head_mode=scheme, + ) + + # Handle both tensor and numpy outputs + if isinstance(reward, torch.Tensor): + reward = reward.cpu().numpy() + stage_probs = stage_probs.cpu().numpy() + + if reward.ndim == 2: + sd["viz_progress"][local_idx] = reward[0, target_idx] + sd["viz_stages"][local_idx] = stage_probs[0, target_idx, :] + else: + sd["viz_progress"][local_idx] = reward[target_idx] + sd["viz_stages"][local_idx] = stage_probs[target_idx, :] + + # Clear GPU memory after each frame + del processed, video_features, text_features + if state_features is not None: + del state_features + + torch.cuda.empty_cache() + + # Interpolate predictions back to per-frame arrays for smooth visualization. + if stride > 1: + all_local = np.arange(num_frames) + for scheme in schemes_to_viz: + sd = scheme_data[scheme] + + valid = np.isfinite(sd["viz_progress"]) + valid_idx = np.where(valid)[0] + if valid_idx.size >= 1: + sd["viz_progress"] = interpolate_progress( + valid_idx, sd["viz_progress"][valid_idx], all_local + ) + + stage_interp = np.zeros_like(sd["viz_stages"], dtype=np.float32) + for s in range(sd["num_stages"]): + stage_interp[:, s] = interpolate_progress( + valid_idx, sd["viz_stages"][valid_idx, s], all_local + ) + + stage_interp = np.clip(stage_interp, 0.0, 1.0) + row_sums = stage_interp.sum(axis=1, keepdims=True) + nz = row_sums.squeeze(-1) > 0 + stage_interp[nz] = stage_interp[nz] / row_sums[nz] + sd["viz_stages"] = stage_interp + else: + # No valid points: keep NaNs/zeros; visualization will be empty. + sd["viz_stages"] = np.nan_to_num(sd["viz_stages"], nan=0.0) + + # Generate visualization for each head + ordered_viz_frames = [viz_frames[idx] for idx in sorted(display_indices)] + for scheme in schemes_to_viz: + sd = scheme_data[scheme] + stage_labels = sd["subtask_names"] or [f"Stage {i + 1}" for i in range(sd["num_stages"])] + viz_path = output_dir / f"sarm_prediction_ep{episode_idx}_{scheme}.png" + + visualize_episode( + frames=np.array(ordered_viz_frames), + progress_preds=sd["viz_progress"], + stage_preds=sd["viz_stages"], + title=f"{task} (Episode {episode_idx})", + output_path=viz_path, + stage_labels=stage_labels, + gt_progress=sd["viz_gt_progress"] if not np.all(np.isnan(sd["viz_gt_progress"])) else None, + gt_stages=sd["viz_gt_stages"] if not np.all(np.isnan(sd["viz_gt_stages"])) else None, + ) + + # Clear memory between episodes + torch.cuda.empty_cache() + + logging.info(f"Visualizations saved to: {output_dir.absolute()}") + + +def generate_all_frame_indices(ep_start: int, ep_end: int, frame_gap: int = 30) -> list[int]: + """Generate all frame indices, ordered by offset for cache-friendly access. + + Orders frames as: [0, 30, 60...], [1, 31, 61...], ..., [29, 59, 89...] + This groups frames that share similar temporal windows together. + """ + num_frames = ep_end - ep_start + indices = [] + for offset in range(frame_gap): + for frame_rel in range(offset, num_frames, frame_gap): + indices.append(ep_start + frame_rel) + return indices + + +def interpolate_progress( + computed_indices: np.ndarray, + computed_values: np.ndarray, + all_indices: np.ndarray, +) -> np.ndarray: + """Linearly interpolate values to fill in gaps (robust to NaNs / edge cases).""" + computed_indices = np.asarray(computed_indices) + computed_values = np.asarray(computed_values) + all_indices = np.asarray(all_indices) + + mask = np.isfinite(computed_values) + if mask.sum() == 0: + return np.full(all_indices.shape, np.nan, dtype=np.float32) + if mask.sum() == 1: + return np.full(all_indices.shape, float(computed_values[mask][0]), dtype=np.float32) + + out = np.interp(all_indices, computed_indices[mask], computed_values[mask]) + return out.astype(np.float32) + + +def compute_sarm_progress( + dataset_repo_id: str, + reward_model_path: str, + output_path: str | None = None, + head_mode: str = "sparse", + device: str = "cuda", + num_visualizations: int = 5, + output_dir: str = "./sarm_viz", + stride: int = 1, +): + """ + Compute SARM progress predictions for all frames in a dataset. + + Args: + dataset_repo_id: HuggingFace dataset repo ID or local path + reward_model_path: Path to pretrained SARM model + output_path: Path to save results. If None, saves to dataset's cache directory + head_mode: SARM head to use ("sparse", "dense", or "both") + device: Device to use for inference + num_visualizations: Number of episodes to visualize (0 to skip) + output_dir: Directory to save visualizations + stride: Compute progress every N frames, interpolate the rest (default: 1 = every frame) + """ + dataset, reward_model, preprocess = load_sarm_resources(dataset_repo_id, reward_model_path, device) + + # Set preprocessor to eval mode to disable augmentations + if hasattr(preprocess, "eval"): + preprocess.eval() + for step in preprocess.steps: + if hasattr(step, "eval"): + step.eval() + + image_key = reward_model.config.image_key + state_key = reward_model.config.state_key + frame_gap = reward_model.config.frame_gap + num_episodes = dataset.num_episodes + total_frames = dataset.num_frames + logging.info(f"Processing {total_frames} frames across {num_episodes} episodes") + + # Determine which heads to compute + dual_mode = reward_model.config.uses_dual_heads + compute_sparse = head_mode in ("sparse", "both") or not dual_mode + compute_dense = head_mode in ("dense", "both") and dual_mode + + # Storage arrays + all_indices = [] + all_episode_indices = [] + all_frame_indices = [] + all_progress_sparse = [] if compute_sparse else None + all_progress_dense = [] if compute_dense else None + + if stride > 1: + logging.info(f"Using stride={stride}: computing every {stride} frames, interpolating the rest") + + # Process all episodes + for episode_idx in tqdm(range(num_episodes), desc="Episodes"): + ep = dataset.meta.episodes[episode_idx] + ep_start = ep["dataset_from_index"] + ep_end = ep["dataset_to_index"] + + # Get task description + task = dataset[ep_start].get("task", "perform the task") + + # Generate frames to compute (with stride applied) + all_ep_indices = generate_all_frame_indices(ep_start, ep_end, frame_gap) + if stride > 1: + # Only compute every stride-th frame (relative to episode start) + compute_indices = [idx for idx in all_ep_indices if (idx - ep_start) % stride == 0] + # Always include last frame for better interpolation at episode end + last_frame = ep_end - 1 + if last_frame not in compute_indices: + compute_indices.append(last_frame) + compute_indices = sorted(set(compute_indices)) + else: + compute_indices = all_ep_indices + + center_idx = reward_model.config.n_obs_steps // 2 # Center of bidirectional window + + # Dictionary to collect results + frame_results = {} + + for query_idx in tqdm(compute_indices, desc=f" Ep {episode_idx}", leave=False): + try: + sample = dataset[query_idx] + + batch = { + image_key: sample[image_key], + "task": task, + "index": query_idx, + "episode_index": episode_idx, + } + if state_key in sample: + batch[state_key] = sample[state_key] + + with torch.no_grad(): + processed = preprocess(batch) + video_features = processed["video_features"].to(device) + text_features = processed["text_features"].to(device) + state_features = processed.get("state_features") + if state_features is not None: + state_features = state_features.to(device) + lengths = processed.get("lengths") + + sparse_val = np.nan + dense_val = np.nan + + # Compute sparse prediction for center frame + if compute_sparse: + sparse_progress = reward_model.calculate_rewards( + text_embeddings=text_features, + video_embeddings=video_features, + state_features=state_features, + lengths=lengths, + return_all_frames=True, + head_mode="sparse", + ) + sparse_val = float( + sparse_progress[0, center_idx] + if sparse_progress.ndim == 2 + else sparse_progress[center_idx] + ) + + # Compute dense prediction for center frame + if compute_dense: + dense_progress = reward_model.calculate_rewards( + text_embeddings=text_features, + video_embeddings=video_features, + state_features=state_features, + lengths=lengths, + return_all_frames=True, + head_mode="dense", + ) + dense_val = float( + dense_progress[0, center_idx] + if dense_progress.ndim == 2 + else dense_progress[center_idx] + ) + + frame_results[query_idx] = (sparse_val, dense_val) + + except Exception as e: + logging.warning(f"Failed to process frame {query_idx}: {e}") + + # Interpolate to get values for all frames + computed_indices = np.array(sorted(frame_results.keys())) + computed_sparse = ( + np.array([frame_results[i][0] for i in computed_indices]) if compute_sparse else None + ) + computed_dense = np.array([frame_results[i][1] for i in computed_indices]) if compute_dense else None + + # All frame indices for this episode + all_frame_idx_array = np.arange(ep_start, ep_end) + + if stride > 1 and len(computed_indices) > 1: + # Interpolate progress values + if compute_sparse: + interp_sparse = interpolate_progress(computed_indices, computed_sparse, all_frame_idx_array) + if compute_dense: + interp_dense = interpolate_progress(computed_indices, computed_dense, all_frame_idx_array) + else: + # No interpolation needed + interp_sparse = computed_sparse if compute_sparse else None + interp_dense = computed_dense if compute_dense else None + + # Store results for all frames + for i, frame_idx in enumerate(all_frame_idx_array): + local_idx = frame_idx - ep_start + all_indices.append(frame_idx) + all_episode_indices.append(episode_idx) + all_frame_indices.append(local_idx) + if compute_sparse: + if stride > 1 and len(computed_indices) > 1: + all_progress_sparse.append(float(interp_sparse[i])) + elif frame_idx in frame_results: + all_progress_sparse.append(frame_results[frame_idx][0]) + else: + all_progress_sparse.append(np.nan) + if compute_dense: + if stride > 1 and len(computed_indices) > 1: + all_progress_dense.append(float(interp_dense[i])) + elif frame_idx in frame_results: + all_progress_dense.append(frame_results[frame_idx][1]) + else: + all_progress_dense.append(np.nan) + + # Create output table + table_data = { + "index": np.array(all_indices, dtype=np.int64), + "episode_index": np.array(all_episode_indices, dtype=np.int64), + "frame_index": np.array(all_frame_indices, dtype=np.int64), + } + if compute_sparse: + table_data["progress_sparse"] = np.array(all_progress_sparse, dtype=np.float32) + if compute_dense: + table_data["progress_dense"] = np.array(all_progress_dense, dtype=np.float32) + + # Sort by index + df = pa.table(table_data).to_pandas() + df = df.sort_values("index").reset_index(drop=True) + final_table = pa.Table.from_pandas(df, preserve_index=False) + + # Add metadata with reward model path + metadata = {b"reward_model_path": reward_model_path.encode()} + final_table = final_table.replace_schema_metadata(metadata) + + # Determine output path + output_path = Path(dataset.root) / "sarm_progress.parquet" if output_path is None else Path(output_path) + + # Save + output_path.parent.mkdir(parents=True, exist_ok=True) + pq.write_table(final_table, output_path) + logging.info(f"Saved {len(final_table)} frame progress values to {output_path}") + + # Print statistics + if "progress_sparse" in df.columns: + valid = df["progress_sparse"].dropna() + logging.info( + f"Sparse progress: mean={valid.mean():.4f}, std={valid.std():.4f}, " + f"min={valid.min():.4f}, max={valid.max():.4f}" + ) + + if "progress_dense" in df.columns: + valid = df["progress_dense"].dropna() + logging.info( + f"Dense progress: mean={valid.mean():.4f}, std={valid.std():.4f}, " + f"min={valid.min():.4f}, max={valid.max():.4f}" + ) + + # Visualize episodes after processing + if num_visualizations > 0: + viz_episodes = list(range(min(num_visualizations, num_episodes))) + logging.info(f"Generating {len(viz_episodes)} visualizations...") + visualize_sarm_predictions( + dataset=dataset, + reward_model=reward_model, + preprocess=preprocess, + episode_indices=viz_episodes, + head_mode=head_mode, + output_dir=Path(output_dir), + stride=stride, + ) + + return output_path + + +def main(): + parser = argparse.ArgumentParser( + description="Compute SARM progress values for RA-BC weighting or visualize SARM predictions", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Full RA-BC computation with visualizations + python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + --dataset-repo-id lerobot/aloha_sim_insertion_human \\ + --reward-model-path pepijn223/sarm_single_uni4 + + # Visualize predictions only (no RA-BC computation) + python src/lerobot/policies/sarm/compute_rabc_weights.py \\ + --dataset-repo-id lerobot/aloha_sim_insertion_human \\ + --reward-model-path pepijn223/sarm_single_uni4 \\ + --visualize-only \\ + --num-visualizations 10 + """, + ) + parser.add_argument( + "--dataset-repo-id", + type=str, + required=True, + help="HuggingFace dataset repo ID or local path", + ) + parser.add_argument( + "--reward-model-path", + type=str, + default=None, + help="Path to pretrained SARM model (reads from existing parquet metadata if not provided)", + ) + parser.add_argument( + "--output-path", + type=str, + default=None, + help="Output path for parquet. If not set, saves to dataset's cache directory", + ) + parser.add_argument( + "--head-mode", + type=str, + default="sparse", + choices=["sparse", "dense", "both"], + help="SARM head to use (default: sparse)", + ) + parser.add_argument( + "--device", + type=str, + default="cuda", + help="Device to use (default: cuda)", + ) + # Visualization options + parser.add_argument( + "--visualize-only", + action="store_true", + help="Only visualize SARM predictions (no RA-BC computation)", + ) + parser.add_argument( + "--num-visualizations", + type=int, + default=5, + help="Number of episodes to visualize (default: 5, set to 0 to skip)", + ) + parser.add_argument( + "--output-dir", + type=str, + default="./sarm_viz", + help="Output directory for visualizations (default: ./sarm_viz)", + ) + parser.add_argument( + "--push-to-hub", + action="store_true", + help="Upload progress file to the dataset repo on HuggingFace Hub", + default=True, + ) + parser.add_argument( + "--stride", + type=int, + default=1, + help="Compute progress every N frames, interpolate the rest (default: 1 = every frame)", + ) + + args = parser.parse_args() + + logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s") + + # Try to get reward_model_path from parquet metadata if not provided + reward_model_path = args.reward_model_path + if reward_model_path is None: + # Load dataset to find parquet path + temp_dataset = LeRobotDataset(args.dataset_repo_id, download_videos=False) + parquet_path = Path(temp_dataset.root) / "sarm_progress.parquet" + reward_model_path = get_reward_model_path_from_parquet(parquet_path) + if reward_model_path: + logging.info(f"Using reward model from parquet metadata: {reward_model_path}") + else: + raise ValueError( + "--reward-model-path is required (no existing parquet with model metadata found)" + ) + + # Handle visualize-only mode + if args.visualize_only: + dataset, reward_model, preprocess = load_sarm_resources( + args.dataset_repo_id, reward_model_path, args.device + ) + logging.info(f"Visualization-only mode: visualizing {args.num_visualizations} episodes") + viz_episodes = list(range(min(args.num_visualizations, dataset.num_episodes))) + visualize_sarm_predictions( + dataset=dataset, + reward_model=reward_model, + preprocess=preprocess, + episode_indices=viz_episodes, + head_mode=args.head_mode, + output_dir=Path(args.output_dir), + stride=args.stride, + ) + print(f"\nVisualizations saved to: {Path(args.output_dir).absolute()}") + return + + # Full RABC computation (compute_sarm_progress loads model/dataset itself) + output_path = compute_sarm_progress( + dataset_repo_id=args.dataset_repo_id, + reward_model_path=reward_model_path, + output_path=args.output_path, + head_mode=args.head_mode, + device=args.device, + num_visualizations=args.num_visualizations, + output_dir=args.output_dir, + stride=args.stride, + ) + + print(f"\nSARM progress values saved to: {output_path}") + + # Upload to Hub if requested + if args.push_to_hub: + from huggingface_hub import HfApi + + api = HfApi() + hub_path = "sarm_progress.parquet" + + print(f"\nUploading to Hub: {args.dataset_repo_id}/{hub_path}") + api.upload_file( + path_or_fileobj=str(output_path), + path_in_repo=hub_path, + repo_id=args.dataset_repo_id, + repo_type="dataset", + ) + print( + f"Successfully uploaded to: https://huggingface.co/datasets/{args.dataset_repo_id}/blob/main/{hub_path}" + ) + + print("\nTo use in training, add to your config:") + print(" use_rabc: true") + print(f" rabc_progress_path: hf://datasets/{args.dataset_repo_id}/{hub_path}") + print(" rabc_head_mode: sparse # or dense") + else: + print("\nTo use in training, add to your config:") + print(" use_rabc: true") + print(f" rabc_progress_path: {output_path}") + print(" rabc_head_mode: sparse # or dense") + + +if __name__ == "__main__": + main() diff --git a/src/lerobot/policies/sarm/configuration_sarm.py b/src/lerobot/policies/sarm/configuration_sarm.py new file mode 100644 index 000000000..59cb352d5 --- /dev/null +++ b/src/lerobot/policies/sarm/configuration_sarm.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python + +# Copyright 2025 Qianzhong Chen, Justin Yu, Mac Schwager, Pieter Abbeel, Yide Shentu, Philipp Wu +# and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +SARM: Stage-Aware Reward Modeling for Long Horizon Robot Manipulation. +Paper: https://arxiv.org/abs/2509.25358 +""" + +from dataclasses import dataclass, field + +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature +from lerobot.optim.optimizers import AdamWConfig +from lerobot.optim.schedulers import CosineDecayWithWarmupSchedulerConfig + + +@PreTrainedConfig.register_subclass("sarm") +@dataclass +class SARMConfig(PreTrainedConfig): + """Configuration class for SARM (Stage-Aware Reward Modeling). + + Supports three annotation modes: + + 1. single_stage (default): No annotations needed. Uses the episode's task description + as a single stage covering the entire episode. + + 2. dense_only: Uses dense (fine-grained) annotations from VLM, with an auto-generated + single sparse "task" stage covering the full episode. The dense head learns detailed + subtask progression while sparse provides overall task completion. + + 3. dual: Full dual-head mode with both sparse (high-level) and dense (fine-grained) + annotations from VLM. Both heads are trained on their respective annotations. + + The annotation_mode determines how sparse_temporal_proportions and dense_temporal_proportions + are loaded/generated during model initialization. + """ + + annotation_mode: str = "single_stage" # "single_stage", "dense_only", or "dual" + n_obs_steps: int = 8 # Number of observation history steps + frame_gap: int = 30 # Frame gap between frames (at 30 fps = 1 second) + max_rewind_steps: int = 4 # Maximum rewind steps for temporal augmentation + + # Total frames = 1 + n_obs_steps + max_rewind_steps (computed in property) + # During training with rewind: [obs_frames] + [rewind_frames] + # During inference: [obs_frames] only + + # Architecture params + image_dim: int = 512 + text_dim: int = 512 + hidden_dim: int = 768 + num_heads: int = 12 + num_layers: int = 8 + max_state_dim: int = 32 + drop_n_last_frames: int = 1 + batch_size: int = 64 + clip_batch_size: int = 64 + dropout: float = 0.1 + stage_loss_weight: float = 1.0 # Weight for stage classification loss when using subtask annotations + + rewind_probability: float = 0.8 + language_perturbation_probability: float = 0.2 + + # Sparse annotations (high-level stages) + num_sparse_stages: int = 1 + sparse_subtask_names: list | None = None + sparse_temporal_proportions: list | None = None + + # Dense annotations (fine-grained stages) + num_dense_stages: int | None = None + dense_subtask_names: list | None = None + dense_temporal_proportions: list | None = None + + pretrained_model_path: str | None = None + device: str | None = None + image_key: str = "observation.images.top" # Key for image used from the dataset + state_key: str = "observation.state" + + # Populated by the processor (video_features, state_features, text_features) + input_features: dict = field(default_factory=lambda: {}) + + # Output features (updated in __post_init__) + output_features: dict = field( + default_factory=lambda: { + "stage": PolicyFeature(shape=(9, 5), type=FeatureType.REWARD), + "progress": PolicyFeature(shape=(9, 1), type=FeatureType.REWARD), + } + ) + + normalization_mapping: dict[str, NormalizationMode] = field( + default_factory=lambda: { + "VISUAL": NormalizationMode.IDENTITY, + "STATE": NormalizationMode.MEAN_STD, + "LANGUAGE": NormalizationMode.IDENTITY, + "REWARD": NormalizationMode.IDENTITY, + } + ) + + def __post_init__(self): + super().__post_init__() + + if self.annotation_mode not in ["single_stage", "dense_only", "dual"]: + raise ValueError( + f"annotation_mode must be 'single_stage', 'dense_only', or 'dual', got {self.annotation_mode}" + ) + + if self.annotation_mode == "single_stage": + # Use task description as stage name, full episode as one stage + self.num_sparse_stages = 1 + self.sparse_subtask_names = ["task"] + self.sparse_temporal_proportions = [1.0] + self.num_dense_stages = None + self.dense_subtask_names = None + self.dense_temporal_proportions = None + + elif self.annotation_mode == "dense_only": + self.num_sparse_stages = 1 + self.sparse_subtask_names = ["task"] + self.sparse_temporal_proportions = [1.0] + + self.input_features = {} + self.output_features = {} + + if self.image_key: + self.input_features[self.image_key] = PolicyFeature(shape=(480, 640, 3), type=FeatureType.VISUAL) + + self.input_features[self.state_key] = PolicyFeature( + shape=(self.max_state_dim,), + type=FeatureType.STATE, + ) + + # Update output features based on annotation_mode + if self.annotation_mode in ["dense_only", "dual"]: + self.output_features["sparse_stage"] = PolicyFeature( + shape=(self.num_frames, self.num_sparse_stages), type=FeatureType.REWARD + ) + self.output_features["sparse_progress"] = PolicyFeature( + shape=(self.num_frames, 1), type=FeatureType.REWARD + ) + dense_stages = self.num_dense_stages or self.num_sparse_stages + self.output_features["dense_stage"] = PolicyFeature( + shape=(self.num_frames, dense_stages), type=FeatureType.REWARD + ) + self.output_features["dense_progress"] = PolicyFeature( + shape=(self.num_frames, 1), type=FeatureType.REWARD + ) + else: + self.output_features["sparse_stage"] = PolicyFeature( + shape=(self.num_frames, self.num_sparse_stages), type=FeatureType.REWARD + ) + self.output_features["sparse_progress"] = PolicyFeature( + shape=(self.num_frames, 1), type=FeatureType.REWARD + ) + + if self.max_rewind_steps >= self.n_obs_steps: + raise ValueError( + f"max_rewind_steps ({self.max_rewind_steps}) must be less than n_obs_steps ({self.n_obs_steps})" + ) + if self.num_sparse_stages < 1: + raise ValueError(f"num_sparse_stages must be at least 1, got {self.num_sparse_stages}") + if ( + self.annotation_mode in ["dense_only", "dual"] + and self.num_dense_stages is not None + and self.num_dense_stages < 2 + ): + raise ValueError(f"num_dense_stages must be at least 2, got {self.num_dense_stages}") + + def get_optimizer_preset(self) -> AdamWConfig: + """Get default optimizer configuration for SARM training.""" + return AdamWConfig( + lr=5e-5, + weight_decay=1e-3, + betas=(0.9, 0.999), + eps=1e-8, + ) + + def get_scheduler_preset(self) -> CosineDecayWithWarmupSchedulerConfig: + """Get default learning rate scheduler configuration.""" + return CosineDecayWithWarmupSchedulerConfig( + peak_lr=5e-5, + decay_lr=5e-6, + num_warmup_steps=500, + num_decay_steps=50000, + ) + + def validate_features(self) -> None: + pass + + @property + def uses_dual_heads(self) -> bool: + """Whether the model uses dual heads (dense_only or dual annotation modes).""" + return self.annotation_mode in ["dense_only", "dual"] + + @property + def num_frames(self) -> int: + """Total number of frames in sequence. + + For training: 1 + n_obs_steps + max_rewind_steps + The sequence is: [obs_frames (n_obs_steps + 1)] + [rewind_frames (max_rewind_steps)] + """ + return 1 + self.n_obs_steps + self.max_rewind_steps + + @property + def max_length(self) -> int: + return self.num_frames + + @property + def observation_delta_indices(self) -> list[int]: + """Bidirectional frame sampling centered on target frame. + + Example with n_obs_steps=8, gap=30: + Before: [-120, -90, -60, -30] (4 frames) + Current: [0] (1 frame) + After: [30, 60, 90, 120] (4 frames) + Total: 9 frames + """ + half_steps = self.n_obs_steps // 2 + + past_deltas = [-self.frame_gap * i for i in range(half_steps, 0, -1)] + future_deltas = [self.frame_gap * i for i in range(1, half_steps + 1)] + obs_deltas = past_deltas + [0] + future_deltas + + # Rewind placeholders + rewind_deltas = [-self.frame_gap * (i + 1) for i in range(self.max_rewind_steps)] + + return obs_deltas + rewind_deltas + + @property + def action_delta_indices(self) -> None: + """SARM is a reward model, not an action policy.""" + return None + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/src/lerobot/policies/sarm/modeling_sarm.py b/src/lerobot/policies/sarm/modeling_sarm.py new file mode 100644 index 000000000..a88b2ad64 --- /dev/null +++ b/src/lerobot/policies/sarm/modeling_sarm.py @@ -0,0 +1,793 @@ +#!/usr/bin/env python + +# Copyright 2025 Qianzhong Chen, Justin Yu, Mac Schwager, Pieter Abbeel, Yide Shentu, Philipp Wu +# and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +SARM: Stage-Aware Reward Modeling for Long Horizon Robot Manipulation. + +Paper: https://arxiv.org/abs/2509.25358 + +- StageTransformer: Predicts stage classification (sparse/dense) +- SubtaskTransformer: Predicts within-stage progress (tau) conditioned on stage +""" + +import json +import logging +import random + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F # noqa: N812 +from torch import Tensor + +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.sarm.configuration_sarm import SARMConfig +from lerobot.policies.sarm.sarm_utils import ( + normalize_stage_tau, + pad_state_to_max_dim, +) + + +class StageTransformer(nn.Module): + """ + Stage classification transformer for SARM. + + Predicts which stage/subtask the current frame belongs to. + Supports both sparse (high-level) and dense (fine-grained) annotation schemes. + + Input streams: [vis_proj, lang_proj, state_proj] concatenated -> (B, N+2, T, D) + Output: stage logits (B, T, num_classes) + """ + + def __init__( + self, + d_model: int = 512, + vis_emb_dim: int = 512, + text_emb_dim: int = 512, + state_dim: int = 32, + n_layers: int = 6, + n_heads: int = 8, + dropout: float = 0.1, + num_cameras: int = 1, + num_classes_sparse: int = 4, + num_classes_dense: int = 8, + ): + super().__init__() + self.d_model = d_model + self.num_cameras = num_cameras + + # Projections + self.lang_proj = nn.Linear(text_emb_dim, d_model) + self.visual_proj = nn.Linear(vis_emb_dim, d_model) + self.state_proj = nn.Linear(state_dim, d_model) + + # Encoder + enc_layer = nn.TransformerEncoderLayer(d_model, n_heads, 4 * d_model, dropout, batch_first=True) + self.transformer = nn.TransformerEncoder(enc_layer, n_layers) + + # Positional bias on first visual frame + self.first_pos = nn.Parameter(torch.zeros(1, d_model)) + + # Shared fusion MLP + # Fuses (num_cameras + 2) streams: cameras + lang + state + fused_in = d_model * (num_cameras + 2) + self.fusion_backbone = nn.Sequential( + nn.LayerNorm(fused_in), + nn.Linear(fused_in, d_model), + nn.ReLU(), + ) + + # Scheme-specific heads + self.heads = nn.ModuleDict( + { + "sparse": nn.Linear(d_model, num_classes_sparse), + "dense": nn.Linear(d_model, num_classes_dense), + } + ) + + def _prep_lang(self, lang_emb: torch.Tensor, B: int, T: int, D: int) -> torch.Tensor: # noqa: N803 + """ + Prepare language embeddings for fusion. + + Accepts lang_emb of shape: + - (B, text_emb_dim) -> broadcast across time + - (B, T, text_emb_dim) -> per-timestep (dense annotation mode) + + Returns: (B, 1, T, D) + """ + if lang_emb.dim() == 3: + # (B, T, E) -> (B, T, D) -> (B, 1, T, D) + lang_proj = self.lang_proj(lang_emb).unsqueeze(1) + else: + # (B, E) -> (B, 1, 1, D) -> expand to (B, 1, T, D) + lang_proj = self.lang_proj(lang_emb).unsqueeze(1).unsqueeze(2).expand(B, 1, T, D) + return lang_proj + + def forward( + self, + img_seq: torch.Tensor, # (B, N, T, vis_emb_dim) + lang_emb: torch.Tensor, # (B, E) or (B, T, E) + state: torch.Tensor, # (B, T, state_dim) + lengths: torch.Tensor, # (B,) - valid sequence lengths + scheme: str = "sparse", # "sparse" or "dense" + ) -> torch.Tensor: + """ + Forward pass for stage classification. + + Args: + img_seq: Image embeddings (B, N, T, vis_emb_dim) where N=num_cameras + lang_emb: Language embeddings (B, E) or (B, T, E) for dense + state: State features (B, T, state_dim) + lengths: Valid sequence lengths (B,) for masking + scheme: "sparse" or "dense" for head selection + + Returns: + Stage logits (B, T, num_classes) + """ + assert scheme in self.heads, f"Unknown scheme '{scheme}'. Use one of {list(self.heads.keys())}." + + B, N, T, _ = img_seq.shape # noqa: N806 + D = self.d_model # noqa: N806 + device = img_seq.device + + # Project inputs + vis_proj = self.visual_proj(img_seq) # (B, N, T, D) + state_proj = self.state_proj(state).unsqueeze(1) # (B, 1, T, D) + lang_proj = self._prep_lang(lang_emb, B, T, D) # (B, 1, T, D) + + # Concatenate streams + # cameras + lang + state -> (B, N+2, T, D) + x = torch.cat([vis_proj, lang_proj, state_proj], dim=1) + + # Add positional bias to first visual frame + x[:, :N, 0, :] = x[:, :N, 0, :] + self.first_pos + + # Flatten to tokens for Transformer + x_tokens = x.view(B, (N + 2) * T, D) + L = x_tokens.size(1) # noqa: N806 + + # Create padding mask + base_mask = torch.arange(T, device=device).expand(B, T) >= lengths.unsqueeze(1) # (B, T) + mask = base_mask.unsqueeze(1).expand(B, N + 2, T).reshape(B, (N + 2) * T) + + # Create causal mask + causal_mask = torch.triu(torch.ones(L, L, device=device, dtype=torch.bool), diagonal=1) + + # Encode + h = self.transformer(x_tokens, mask=causal_mask, src_key_padding_mask=mask, is_causal=True) + + # Reshape and fuse + h = h.view(B, N + 2, T, D).permute(0, 2, 1, 3).reshape(B, T, (N + 2) * D) + fused = self.fusion_backbone(h) # (B, T, D) + + # Scheme-specific logits + logits = self.heads[scheme](fused) # (B, T, num_classes) + return logits + + +class SubtaskTransformer(nn.Module): + """ + Subtask progress regression transformer for SARM. + + Predicts within-stage normalized progress (tau) conditioned on stage prior. + The stage prior is a one-hot encoding passed from StageTransformer predictions. + + Input streams: [vis_proj, lang_proj, state_proj, stage_emb] -> (B, N+3, T, D) + Output: tau predictions (B, T) in [0, 1] + """ + + def __init__( + self, + d_model: int = 512, + vis_emb_dim: int = 512, + text_emb_dim: int = 512, + state_dim: int = 32, + n_layers: int = 6, + n_heads: int = 8, + dropout: float = 0.1, + num_cameras: int = 1, + ): + super().__init__() + self.d_model = d_model + self.num_cameras = num_cameras + + # Projections + self.lang_proj = nn.Linear(text_emb_dim, d_model) + self.visual_proj = nn.Linear(vis_emb_dim, d_model) + self.state_proj = nn.Linear(state_dim, d_model) + + # Encoder + enc = nn.TransformerEncoderLayer(d_model, n_heads, 4 * d_model, dropout, batch_first=True) + self.transformer = nn.TransformerEncoder(enc, n_layers) + + # Learned bias on first visual frame + self.first_pos = nn.Parameter(torch.zeros(1, d_model)) + + # Shared fusion backbone + # Fuses (num_cameras + 3) streams: cameras + lang + state + stage_emb + fused_in = d_model * (num_cameras + 3) + self.fusion_backbone = nn.Sequential( + nn.LayerNorm(fused_in), + nn.Linear(fused_in, d_model), + nn.ReLU(), + ) + + # Scheme-specific regression heads + self.heads = nn.ModuleDict( + { + "sparse": nn.Linear(d_model, 1), + "dense": nn.Linear(d_model, 1), + } + ) + + def _prep_lang(self, lang_emb: torch.Tensor, B: int, T: int, D: int) -> torch.Tensor: # noqa: N803 + """ + Prepare language embeddings for fusion. + """ + if lang_emb.dim() == 3: + # (B, T, E) -> (B, T, D) -> (B, 1, T, D) + return self.lang_proj(lang_emb).unsqueeze(1) + else: + # (B, E) -> (B, 1, 1, D) -> (B, 1, T, D) + return self.lang_proj(lang_emb).unsqueeze(1).unsqueeze(2).expand(B, 1, T, D) + + def _stage_to_dmodel(self, stage_prior: torch.Tensor) -> torch.Tensor: + """ + Deterministic projection of one-hot stage to d_model by pad/truncate. + + Args: + stage_prior: One-hot stage embedding (B, 1, T, C) + + Returns: + Projected stage embedding (B, 1, T, d_model) + """ + B, one, T, C = stage_prior.shape # noqa: N806 + D = self.d_model # noqa: N806 + if D == C: + return stage_prior + elif D > C: + pad = torch.zeros(B, one, T, D - C, device=stage_prior.device, dtype=stage_prior.dtype) + return torch.cat([stage_prior, pad], dim=-1) + else: + return stage_prior[..., :D] + + def forward( + self, + img_seq: torch.Tensor, # (B, N, T, vis_emb_dim) + lang_emb: torch.Tensor, # (B, E) or (B, T, E) + state: torch.Tensor, # (B, T, state_dim) + lengths: torch.Tensor, # (B,) - valid sequence lengths + stage_prior: torch.Tensor, # (B, 1, T, C) one-hot from gen_stage_emb + scheme: str = "sparse", # "sparse" or "dense" + ) -> torch.Tensor: + """ + Forward pass for subtask progress regression. + + Args: + img_seq: Image embeddings (B, N, T, vis_emb_dim) + lang_emb: Language embeddings (B, E) or (B, T, E) + state: State features (B, T, state_dim) + lengths: Valid sequence lengths (B,) for masking + stage_prior: One-hot stage prior (B, 1, T, num_classes) + scheme: "sparse" or "dense" for head selection + + Returns: + Tau predictions (B, T) in [0, 1] via sigmoid + """ + assert scheme in self.heads, f"Unknown scheme '{scheme}'. Use one of {list(self.heads.keys())}." + + B, N, T, _ = img_seq.shape # noqa: N806 + D = self.d_model # noqa: N806 + device = img_seq.device + + # Project inputs + vis_proj = self.visual_proj(img_seq) # (B, N, T, D) + state_proj = self.state_proj(state).unsqueeze(1) # (B, 1, T, D) + lang_proj = self._prep_lang(lang_emb, B, T, D) # (B, 1, T, D) + stage_emb = self._stage_to_dmodel(stage_prior) # (B, 1, T, D) + + # Concatenate all streams + # cameras + lang + state + stage_emb -> (B, N+3, T, D) + x = torch.cat([vis_proj, lang_proj, state_proj, stage_emb], dim=1) + + # Add positional bias to first visual frame + x[:, :N, 0, :] = x[:, :N, 0, :] + self.first_pos + + # Flatten to tokens + x_tokens = x.view(B, (N + 3) * T, D) + L = x_tokens.size(1) # noqa: N806 + + # Create padding mask + base_mask = torch.arange(T, device=device).expand(B, T) >= lengths.unsqueeze(1) + mask = base_mask.unsqueeze(1).expand(B, N + 3, T).reshape(B, (N + 3) * T) + + # Create causal mask + causal_mask = torch.triu(torch.ones(L, L, device=device, dtype=torch.bool), diagonal=1) + + # Encode + h = self.transformer(x_tokens, mask=causal_mask, src_key_padding_mask=mask, is_causal=True) + + # Reshape and fuse + h = h.view(B, N + 3, T, D) + h_flat = h.permute(0, 2, 1, 3).reshape(B, T, (N + 3) * D) + fused = self.fusion_backbone(h_flat) # (B, T, D) + + # Scheme-specific regression head -> sigmoid + r = torch.sigmoid(self.heads[scheme](fused)).squeeze(-1) # (B, T) + return r + + +def gen_stage_emb(num_classes: int, targets: torch.Tensor) -> torch.Tensor: + """ + Generate one-hot stage embeddings from targets. + + Args: + num_classes: Number of stage classes + targets: Target values (B, T) where integer part is stage index + + Returns: + One-hot stage embedding (B, 1, T, num_classes) + """ + # Integer part of float targets -> [0, C-1] + idx = targets.long().clamp(min=0, max=num_classes - 1) # (B, T) + C = num_classes # noqa: N806 + # Identity-lookup one-hot + stage_onehot = torch.eye(C, device=targets.device)[idx] # (B, T, C) + stage_onehot = stage_onehot.unsqueeze(1) # (B, 1, T, C) + return stage_onehot + + +class SARMRewardModel(PreTrainedPolicy): + """ + SARM Reward Model for stage-aware task completion rewards. + + Uses two separate transformer models: + - StageTransformer: Classifies which stage/subtask + - SubtaskTransformer: Predicts within-stage progress (tau) + + Training uses 75%/25% GT/predicted stage conditioning (teacher forcing). + """ + + name = "sarm" + config_class = SARMConfig + + def __init__(self, config: SARMConfig, dataset_stats: dict | None = None, dataset_meta=None): + super().__init__(config, dataset_stats) + config.validate_features() + self.config = config + self.dataset_stats = dataset_stats + self.device = torch.device( + config.device if config.device else "cuda" if torch.cuda.is_available() else "cpu" + ) + + # Load temporal proportions based on annotation_mode + if config.annotation_mode == "single_stage": + logging.info(f"Using single_stage mode: sparse_subtask_names={config.sparse_subtask_names}") + elif dataset_meta is not None: + self._load_temporal_proportions(dataset_meta) + + # Create two separate models + self.stage_model = StageTransformer( + d_model=config.hidden_dim, + vis_emb_dim=config.image_dim, + text_emb_dim=config.text_dim, + state_dim=config.max_state_dim, + n_layers=config.num_layers, + n_heads=config.num_heads, + dropout=config.dropout, + num_cameras=1, # Single camera for now + num_classes_sparse=config.num_sparse_stages, + num_classes_dense=config.num_dense_stages or config.num_sparse_stages, + ) + + self.subtask_model = SubtaskTransformer( + d_model=config.hidden_dim, + vis_emb_dim=config.image_dim, + text_emb_dim=config.text_dim, + state_dim=config.max_state_dim, + n_layers=config.num_layers, + n_heads=config.num_heads, + dropout=config.dropout, + num_cameras=1, + ) + + self.stage_model.to(self.device) + self.subtask_model.to(self.device) + + # GT/predicted stage ratio for teacher forcing + self.gt_stage_ratio = 0.75 + + if config.uses_dual_heads: + logging.info( + f"SARM initialized with dual heads: {config.num_sparse_stages} sparse stages, " + f"{config.num_dense_stages} dense stages" + ) + else: + logging.info(f"SARM initialized with sparse head only: {config.num_sparse_stages} stages") + + logging.info(f"SARM initialized on {self.device}") + + def _load_proportions_from_json(self, path, annotation_type: str) -> tuple[list[str], list[float]]: + """Load temporal proportions from a JSON file (preserving order).""" + if not path.exists(): + raise ValueError( + f"{annotation_type.capitalize()} temporal proportions not found at {path}. " + f"Run the subtask annotation tool with --{annotation_type}-subtasks to generate annotations." + ) + with open(path) as f: + proportions_dict = json.load(f) + names = list(proportions_dict.keys()) + logging.info(f"Loaded {len(names)} {annotation_type} subtasks: {names}") + logging.info(f"{annotation_type.capitalize()} temporal proportions: {proportions_dict}") + return names, [proportions_dict[name] for name in names] + + def _load_temporal_proportions(self, dataset_meta) -> None: + """Load temporal proportions based on annotation_mode.""" + meta_path = dataset_meta.root / "meta" + + if self.config.annotation_mode == "dual": + names, props = self._load_proportions_from_json( + meta_path / "temporal_proportions_sparse.json", "sparse" + ) + ( + self.config.num_sparse_stages, + self.config.sparse_subtask_names, + self.config.sparse_temporal_proportions, + ) = len(names), names, props + + if self.config.annotation_mode in ["dense_only", "dual"]: + names, props = self._load_proportions_from_json( + meta_path / "temporal_proportions_dense.json", "dense" + ) + ( + self.config.num_dense_stages, + self.config.dense_subtask_names, + self.config.dense_temporal_proportions, + ) = len(names), names, props + if self.config.annotation_mode == "dense_only": + logging.info(f"Using auto-generated sparse 'task' stage: {self.config.sparse_subtask_names}") + + def to(self, device): + """Override to method to ensure all components move together.""" + super().to(device) + self.device = device if isinstance(device, torch.device) else torch.device(device) + self.stage_model.to(device) + self.subtask_model.to(device) + return self + + @torch.no_grad() + def calculate_rewards( + self, + text_embeddings: np.ndarray | torch.Tensor, + video_embeddings: np.ndarray | torch.Tensor, + state_features: np.ndarray | torch.Tensor | None = None, + lengths: np.ndarray | torch.Tensor | None = None, + return_all_frames: bool = False, + return_stages: bool = False, + return_confidence: bool = False, + head_mode: str | None = "sparse", + frame_index: int | None = None, + ) -> np.ndarray | tuple: + """ + Calculate rewards for given text, video, and state representations. + + This is the canonical method for SARM reward computation, used for: + - Inference/visualization + - RA-BC weight computation + + Args: + text_embeddings: Encoded text representations (batch_size, 512) + video_embeddings: Encoded video representations (batch_size, num_frames, 512) + state_features: Joint state features (batch_size, num_frames, state_dim) + lengths: Valid sequence lengths (batch_size,) + return_all_frames: If True, return rewards for all frames + return_stages: If True, also return stage predictions + return_confidence: If True, also return stage confidence + head_mode: Which head to use ("sparse" or "dense") + frame_index: Index of the target frame to extract (default: n_obs_steps). + + Returns: + Rewards and optionally stage probs/confidence. + """ + if isinstance(text_embeddings, np.ndarray): + text_embeddings = torch.tensor(text_embeddings, dtype=torch.float32) + if isinstance(video_embeddings, np.ndarray): + video_embeddings = torch.tensor(video_embeddings, dtype=torch.float32) + if state_features is not None and isinstance(state_features, np.ndarray): + state_features = torch.tensor(state_features, dtype=torch.float32) + + # Handle single sample case + if text_embeddings.dim() == 1: + text_embeddings = text_embeddings.unsqueeze(0) + video_embeddings = video_embeddings.unsqueeze(0) + if state_features is not None: + state_features = state_features.unsqueeze(0) + single_sample = True + else: + single_sample = False + + batch_size = video_embeddings.shape[0] + seq_len = video_embeddings.shape[1] + + scheme = head_mode + + # Default lengths if not provided + if lengths is None: + lengths = torch.full((batch_size,), seq_len, dtype=torch.int32) + elif isinstance(lengths, np.ndarray): + lengths = torch.tensor(lengths, dtype=torch.int32) + + # Reshape video to (B, N, T, D) for multi-camera format + # Currently single camera: (B, T, D) -> (B, 1, T, D) + img_seq = video_embeddings.unsqueeze(1).to(self.device) + lang_emb = text_embeddings.to(self.device) + state = ( + state_features.to(self.device) + if state_features is not None + else torch.zeros(batch_size, seq_len, self.config.max_state_dim, device=self.device) + ) + lens = lengths.to(self.device) + + # Pad state to max_state_dim + state = pad_state_to_max_dim(state, self.config.max_state_dim) + + # Get num_classes for this scheme + num_classes = self.config.num_sparse_stages if scheme == "sparse" else self.config.num_dense_stages + + # Run stage model + stage_logits = self.stage_model(img_seq, lang_emb, state, lens, scheme=scheme) + stage_probs = F.softmax(stage_logits, dim=-1) # (B, T, num_classes) + stage_idx = stage_probs.argmax(dim=-1) # (B, T) + stage_conf = stage_probs.gather(-1, stage_idx.unsqueeze(-1)).squeeze(-1) # (B, T) + + # Create one-hot stage prior + stage_onehot = F.one_hot(stage_idx, num_classes=num_classes).float() # (B, T, C) + stage_emb = stage_onehot.unsqueeze(1) # (B, 1, T, C) + + # Run subtask model + tau_pred = self.subtask_model(img_seq, lang_emb, state, lens, stage_emb, scheme=scheme) + + # Compute final reward: stage + tau + raw_reward = stage_idx.float() + tau_pred # (B, T) + + # Normalize to [0, 1] using temporal proportions for proper weighting + if scheme == "sparse": + normalized_reward = normalize_stage_tau( + raw_reward, + num_stages=num_classes, + temporal_proportions=self.config.sparse_temporal_proportions, + subtask_names=self.config.sparse_subtask_names, + ) + else: + normalized_reward = normalize_stage_tau( + raw_reward, + num_stages=num_classes, + temporal_proportions=self.config.dense_temporal_proportions, + subtask_names=self.config.dense_subtask_names, + ) + + # Default frame index is n_obs_steps (last observation frame) + if frame_index is None: + frame_index = self.config.n_obs_steps + + # Prepare outputs (batch mode or no smoothing) + if return_all_frames: + rewards = normalized_reward.cpu().numpy() + else: + rewards = normalized_reward[:, frame_index].cpu().numpy() + + if single_sample: + rewards = rewards[0] if not return_all_frames else rewards[0] + + outputs = [rewards] + if return_stages: + probs = stage_probs.cpu().numpy() + if single_sample: + probs = probs[0] + outputs.append(probs) + if return_confidence: + conf = stage_conf.cpu().numpy() + if single_sample: + conf = conf[0] + outputs.append(conf) + + return outputs[0] if len(outputs) == 1 else tuple(outputs) + + def train(self, mode: bool = True): + """Set training mode for both models.""" + super().train(mode) + self.stage_model.train(mode) + self.subtask_model.train(mode) + return self + + def eval(self): + """Set evaluation mode for both models.""" + return self.train(False) + + def parameters(self): + """Override to return trainable parameters from both models.""" + from itertools import chain + + return chain(self.stage_model.parameters(), self.subtask_model.parameters()) + + def get_optim_params(self): + """Override to return optimizer parameters from both models.""" + return self.parameters() + + def reset(self): + """Required by PreTrainedPolicy but not used for reward models.""" + pass + + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Required by PreTrainedPolicy but not used for reward models.""" + raise NotImplementedError("SARM model does not predict action chunks") + + def select_action(self, batch: dict[str, Tensor]) -> Tensor: + """Required by PreTrainedPolicy but not used for SARM.""" + raise NotImplementedError("SARM model does not select actions") + + def _train_step( + self, + img_emb: torch.Tensor, # (B, N, T, D) + lang_emb: torch.Tensor, # (B, E) or (B, T, E) + state: torch.Tensor, # (B, T, state_dim) + lengths: torch.Tensor, # (B,) + targets: torch.Tensor, # (B, T) - format: stage.tau + scheme: str, + ) -> dict[str, torch.Tensor]: + """ + Single training step for one annotation scheme. + + Implements 75%/25% GT/predicted stage conditioning. + + Args: + img_emb: Image embeddings (B, N, T, D) + lang_emb: Language embeddings + state: State features + lengths: Valid sequence lengths + targets: Target values where floor=stage, remainder=tau + scheme: "sparse" or "dense" + + Returns: + Dict with stage_loss, subtask_loss, total_loss + """ + num_classes = self.config.num_sparse_stages if scheme == "sparse" else self.config.num_dense_stages + + # Ground truth: stage (integer) and tau (fractional) + # Clamp stage indices to valid range [0, num_classes-1] to handle edge cases + # where targets may exceed expected range (e.g., frames between subtasks) + gt_stage = torch.floor(targets).long().clamp(0, num_classes - 1) # (B, T) + gt_tau = torch.remainder(targets, 1.0) # (B, T) + + # Run stage model + stage_pred = self.stage_model(img_emb, lang_emb, state, lengths, scheme=scheme) + + # 75%/25% GT/predicted stage conditioning + if random.random() < self.gt_stage_ratio: + # Mode 1: Use ground truth stage -> one-hot + stage_emb = gen_stage_emb(num_classes, targets) # (B, 1, T, C) + else: + # Mode 2: Use predicted stage argmax -> one-hot + stage_idx = stage_pred.argmax(dim=-1) # (B, T) + stage_onehot = F.one_hot(stage_idx, num_classes=num_classes).float() # (B, T, C) + stage_emb = stage_onehot.unsqueeze(1) # (B, 1, T, C) + + # Run subtask model with stage prior + tau_pred = self.subtask_model(img_emb, lang_emb, state, lengths, stage_emb, scheme=scheme) + + # Compute losses + stage_loss = F.cross_entropy(stage_pred.view(-1, num_classes), gt_stage.view(-1), reduction="mean") + subtask_loss = F.mse_loss(tau_pred, gt_tau, reduction="mean") + + return { + "stage_loss": stage_loss, + "subtask_loss": subtask_loss, + "total_loss": stage_loss + subtask_loss, + } + + def forward(self, batch): + """ + Forward pass for SARM reward model training. + + Uses stage+tau target format where: + - Integer part = stage index + - Fractional part = within-stage progress (tau) + + Training uses 75%/25% GT/predicted stage conditioning. + + Args: + batch: Dictionary with 'observation' containing: + - 'video_features': (B, T, 512) pre-encoded video features + - 'text_features': (B, 512) or (B, T, 512) text features + - 'state_features': (B, T, state_dim) joint state features + - 'lengths': (B,) valid sequence lengths + - 'sparse_targets': (B, T) sparse targets (stage.tau format) + - 'dense_targets': (B, T) dense targets (optional, for dual mode) + + Returns: + Tuple of (total_loss, output_dict with loss components) + """ + observation = batch.get("observation", batch) + + # Extract features + video_features = observation["video_features"].to(self.device) + text_features = observation["text_features"].to(self.device) + state_features = observation.get("state_features") + if state_features is not None: + state_features = state_features.to(self.device) + + batch_size = video_features.shape[0] + seq_len = video_features.shape[1] + + # Get lengths (default to full sequence) + lengths = observation.get("lengths") + if lengths is None: + lengths = torch.full((batch_size,), seq_len, dtype=torch.int32, device=self.device) + else: + lengths = lengths.to(self.device) + + # Reshape video to (B, N, T, D) - single camera + img_emb = video_features.unsqueeze(1) + + # Pad state to max_state_dim + if state_features is None: + state_features = torch.zeros(batch_size, seq_len, self.config.max_state_dim, device=self.device) + else: + state_features = pad_state_to_max_dim(state_features, self.config.max_state_dim) + + output_dict = {} + total_loss = torch.tensor(0.0, device=self.device) + + # Sparse training (always) + sparse_targets = observation.get("sparse_targets") + if sparse_targets is None: + # Try legacy format + sparse_targets = observation.get("targets") + if sparse_targets is None: + raise ValueError("sparse_targets (or targets) is required for SARM training") + sparse_targets = sparse_targets.to(self.device) + + sparse_result = self._train_step( + img_emb, text_features, state_features, lengths, sparse_targets, scheme="sparse" + ) + output_dict["sparse_stage_loss"] = sparse_result["stage_loss"].item() + output_dict["sparse_subtask_loss"] = sparse_result["subtask_loss"].item() + total_loss = total_loss + sparse_result["total_loss"] + + # Dense training (if dual mode) + if self.config.uses_dual_heads: + dense_targets = observation.get("dense_targets") + if dense_targets is not None: + dense_targets = dense_targets.to(self.device) + dense_result = self._train_step( + img_emb, text_features, state_features, lengths, dense_targets, scheme="dense" + ) + output_dict["dense_stage_loss"] = dense_result["stage_loss"].item() + output_dict["dense_subtask_loss"] = dense_result["subtask_loss"].item() + total_loss = total_loss + dense_result["total_loss"] + + output_dict["total_loss"] = total_loss.item() + return total_loss, output_dict + + +def compute_stage_loss(stage_logits: torch.Tensor, target_stages: torch.Tensor) -> torch.Tensor: + """Compute cross-entropy loss for stage classification.""" + _, _, num_stages = stage_logits.shape + stage_logits_flat = stage_logits.reshape(-1, num_stages) + # Clamp target stage indices to valid range [0, num_stages-1] + target_stages_flat = target_stages.reshape(-1).clamp(0, num_stages - 1) + return F.cross_entropy(stage_logits_flat, target_stages_flat) diff --git a/src/lerobot/policies/sarm/processor_sarm.py b/src/lerobot/policies/sarm/processor_sarm.py new file mode 100644 index 000000000..5c617282a --- /dev/null +++ b/src/lerobot/policies/sarm/processor_sarm.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SARM Processor for encoding images/text and generating stage+tau targets.""" + +import random +from typing import Any + +import numpy as np +import pandas as pd +import torch +from faker import Faker +from PIL import Image +from transformers import CLIPModel, CLIPProcessor + +from lerobot.configs.types import FeatureType, PolicyFeature +from lerobot.policies.sarm.configuration_sarm import SARMConfig +from lerobot.policies.sarm.sarm_utils import ( + apply_rewind_augmentation, + compute_absolute_indices, + find_stage_and_tau, + pad_state_to_max_dim, +) +from lerobot.processor import ( + AddBatchDimensionProcessorStep, + DeviceProcessorStep, + NormalizerProcessorStep, + PolicyAction, + PolicyProcessorPipeline, + ProcessorStep, + RenameObservationsProcessorStep, +) +from lerobot.processor.converters import ( + from_tensor_to_numpy, + policy_action_to_transition, + transition_to_policy_action, +) +from lerobot.processor.core import EnvTransition, TransitionKey +from lerobot.processor.pipeline import PipelineFeatureType +from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME + + +class SARMEncodingProcessorStep(ProcessorStep): + """ProcessorStep that encodes images and text with CLIP and generates stage and progress labels for SARM.""" + + def __init__( + self, + config: SARMConfig, + image_key: str | None = None, + dataset_meta=None, + dataset_stats: dict | None = None, + ): + super().__init__() + self.config = config + self.image_key = image_key or config.image_key + self.dataset_meta = dataset_meta + self.dataset_stats = dataset_stats + self.annotation_mode = config.annotation_mode + + # Helper to create temporal proportions dict + def make_props_dict(names, props): + return dict(zip(names, props, strict=True)) if names and props else None + + # Sparse annotations (always needed) + self.sparse_temporal_proportions = make_props_dict( + config.sparse_subtask_names, config.sparse_temporal_proportions + ) + self.sparse_subtask_names = config.sparse_subtask_names + + # Dense annotations (only for dual mode) + self.dense_subtask_names = config.dense_subtask_names if config.uses_dual_heads else None + self.dense_temporal_proportions = ( + make_props_dict(config.dense_subtask_names, config.dense_temporal_proportions) + if config.uses_dual_heads + else None + ) + + self.device = torch.device( + self.config.device if self.config.device else "cuda" if torch.cuda.is_available() else "cpu" + ) + + self.clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") + self.clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", use_fast=True) + self.clip_model.to(self.device) + self.clip_model.eval() + + self.verbs = ["move", "grasp", "rotate", "push", "pull", "slide", "lift", "place"] + self.fake = Faker() + + def _find_episode_for_frame(self, frame_idx: int) -> int: + """Find the episode index for a given frame index.""" + for ep_idx in range(len(self.dataset_meta.episodes)): + ep_start = self.dataset_meta.episodes[ep_idx]["dataset_from_index"] + ep_end = self.dataset_meta.episodes[ep_idx]["dataset_to_index"] + if ep_start <= frame_idx < ep_end: + return ep_idx + return 0 + + def _get_episode_indices(self, frame_indices: np.ndarray, episode_index) -> np.ndarray: + """Get episode indices for each frame index.""" + if episode_index is None: + return np.array([self._find_episode_for_frame(int(f)) for f in frame_indices]) + + episode_indices = np.atleast_1d(np.asarray(from_tensor_to_numpy(episode_index))) + + # If single episode but multiple frames, compute episode for each frame + if len(episode_indices) == 1 and len(frame_indices) > 1: + return np.array([self._find_episode_for_frame(int(f)) for f in frame_indices]) + + return episode_indices + + def _generate_perturbed_task(self) -> str: + """Generate a random perturbed task string for language perturbation.""" + num_words = random.randint(1, 5) + verb = random.choice(self.verbs) + phrase = " ".join([verb] + self.fake.words(nb=num_words)) + return phrase + + def _get_annotation_config(self, annotation_type: str) -> tuple[list[str], dict[str, float] | None]: + """Get global subtask names and temporal proportions for an annotation type.""" + if annotation_type == "dense": + return self.dense_subtask_names, self.dense_temporal_proportions + return self.sparse_subtask_names, self.sparse_temporal_proportions + + def _load_episode_annotations( + self, + ep_idx: int, + episodes_df: pd.DataFrame | None, + annotation_type: str, + global_names: list[str], + ) -> tuple[list | None, list | None, list | None]: + """Load subtask annotations for an episode from DataFrame.""" + # Single-stage mode: (linear progress 0β†’1) + if episodes_df is None or len(global_names) == 1: + return None, None, None + + # Resolve column name with fallback + def col(suffix): + prefixed = f"{annotation_type}_{suffix}" + return prefixed if prefixed in episodes_df.columns else suffix + + col_names = col("subtask_names") + if col_names not in episodes_df.columns or ep_idx >= len(episodes_df): + return None, None, None + + subtask_names = episodes_df.loc[ep_idx, col_names] + if subtask_names is None or (isinstance(subtask_names, float) and pd.isna(subtask_names)): + return None, None, None + + return ( + subtask_names, + episodes_df.loc[ep_idx, col("subtask_start_frames")], + episodes_df.loc[ep_idx, col("subtask_end_frames")], + ) + + def __call__(self, transition: EnvTransition) -> EnvTransition: + """ + Encode images, text, and normalize states in the transition. + + Implements SARM training data preparation: + - Applies language perturbation (20% probability) + - Applies rewind augmentation (80% probability) + - Generates stage+tau targets for all frames + - Outputs lengths tensor for valid sequence masking + """ + new_transition = transition.copy() if hasattr(transition, "copy") else dict(transition) + observation = new_transition.get(TransitionKey.OBSERVATION) + comp_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {}) + + frame_index = comp_data.get("index") + episode_index = comp_data.get("episode_index") + + if frame_index is None: + raise ValueError("Frame index ('index') not found in COMPLEMENTARY_DATA") + if episode_index is None: + raise ValueError("Episode index ('episode_index') not found in COMPLEMENTARY_DATA") + + frame_indices = np.atleast_1d(np.asarray(from_tensor_to_numpy(frame_index))) + episode_indices = self._get_episode_indices(frame_indices, episode_index) + + image = observation.get(self.image_key) + if isinstance(image, torch.Tensor): + image = image.cpu().numpy() + + # If 4D (T, C, H, W) from delta_timestamps, add batch dim + # If 3D (C, H, W) single frame, add batch and time dims + if image.ndim == 4: + image = image[np.newaxis, ...] # (T, C, H, W) -> (1, T, C, H, W) + elif image.ndim == 3: + image = image[np.newaxis, np.newaxis, ...] # (C, H, W) -> (1, 1, C, H, W) + + batch_size = image.shape[0] + total_frames = image.shape[1] # Should be 13: 9 obs + 4 rewind placeholders + n_obs_steps = self.config.n_obs_steps + max_rewind_steps = self.config.max_rewind_steps + n_obs_frames = 1 + n_obs_steps # 9 observation frames (including current) + + # Rewind augmentation + rewind_steps = torch.zeros(batch_size, dtype=torch.int32) + apply_rewind = self.training and random.random() < self.config.rewind_probability + + if apply_rewind and self.dataset_meta is not None: + for b_idx, (ep_idx, frame_idx) in enumerate( + zip(episode_indices.tolist(), frame_indices.tolist(), strict=True) + ): + ep_idx, frame_idx = int(ep_idx), int(frame_idx) + ep_start = self.dataset_meta.episodes[ep_idx]["dataset_from_index"] + + rewind_step, _ = apply_rewind_augmentation( + frame_idx, ep_start, n_obs_steps, max_rewind_steps, frame_gap=self.config.frame_gap + ) + rewind_steps[b_idx] = rewind_step + + # Compute valid lengths: n_obs_frames + rewind_steps + lengths = n_obs_frames + rewind_steps # (B,) + + # Apply rewind masking to images + # For frames beyond valid length, we mask with zeros (or copy last valid frame) + for b_idx in range(batch_size): + valid_len = lengths[b_idx].item() + if valid_len < total_frames: + image[b_idx, valid_len:] = 0 # Zero out frames beyond valid length + + # Encode images with CLIP + video_features = self._encode_images_batch(image) + observation["video_features"] = video_features + + state_key = self.config.state_key + state_data = observation.get(state_key) + + if isinstance(state_data, torch.Tensor): + state_tensor = state_data.float() + else: + state_tensor = torch.tensor(state_data, dtype=torch.float32) + + if state_tensor.ndim == 2: + state_tensor = state_tensor.unsqueeze(0) # (T, D) -> (1, T, D) + elif state_tensor.ndim == 1: + state_tensor = state_tensor.unsqueeze(0).unsqueeze(0) # (D,) -> (1, 1, D) + + # Apply same rewind masking to state + for b_idx in range(batch_size): + valid_len = lengths[b_idx].item() + if valid_len < state_tensor.shape[1]: + state_tensor[b_idx, valid_len:] = 0 # Zero out frames beyond valid length + + observation["state_features"] = pad_state_to_max_dim(state_tensor, self.config.max_state_dim) + + task = comp_data.get("task") + if isinstance(task, list): + task = task[0] if task else "" + + # Apply language perturbation during training (20% probability) + # When perturbed, targets will be zeroed to train model to output low values for irrelevant text + apply_perturbation = self.training and random.random() < self.config.language_perturbation_probability + if apply_perturbation: + task = self._generate_perturbed_task() + + # Encode text with CLIP + observation["text_features"] = self._encode_text_clip(task, batch_size) + + # Store lengths for model + observation["lengths"] = lengths + + # When language is perturbed, targets are zero so perturbed samples don't contribute to progress loss + if self.dataset_meta is not None: + episodes_df = None + if self.sparse_subtask_names != ["task"]: + episodes_df = self.dataset_meta.episodes.to_pandas() + + # Generate sparse targets + if self.sparse_temporal_proportions is not None: + if apply_perturbation: + # Zero targets when language is perturbed + sparse_targets = torch.zeros(batch_size, total_frames, dtype=torch.float32) + else: + sparse_targets = self._compute_batch_targets( + frame_indices, episode_indices, lengths, rewind_steps, episodes_df, "sparse" + ) + observation["sparse_targets"] = sparse_targets + + # Generate dense targets (for dual mode) + if self.config.uses_dual_heads and self.dense_temporal_proportions is not None: + if apply_perturbation: + # Zero targets when language is perturbed + dense_targets = torch.zeros(batch_size, total_frames, dtype=torch.float32) + else: + dense_targets = self._compute_batch_targets( + frame_indices, episode_indices, lengths, rewind_steps, episodes_df, "dense" + ) + observation["dense_targets"] = dense_targets + + new_transition[TransitionKey.OBSERVATION] = observation + return new_transition + + def _compute_batch_targets( + self, + frame_indices: np.ndarray, + episode_indices: np.ndarray, + lengths: torch.Tensor, + rewind_steps: torch.Tensor, + episodes_df: pd.DataFrame | None, + annotation_type: str, + ) -> torch.Tensor: + """Compute stage+tau targets for a batch of samples.""" + batch_size = len(frame_indices) + n_obs_steps = self.config.n_obs_steps + max_rewind_steps = self.config.max_rewind_steps + total_frames = 1 + n_obs_steps + max_rewind_steps + frame_gap = self.config.frame_gap + + global_names, temporal_props = self._get_annotation_config(annotation_type) + targets = torch.zeros(batch_size, total_frames, dtype=torch.float32) + + for b_idx in range(batch_size): + ep_idx = int(episode_indices[b_idx]) + frame_idx = int(frame_indices[b_idx]) + + ep_start = self.dataset_meta.episodes[ep_idx]["dataset_from_index"] + ep_end = self.dataset_meta.episodes[ep_idx]["dataset_to_index"] + ep_length = ep_end - ep_start + + subtask_names, subtask_start_frames, subtask_end_frames = self._load_episode_annotations( + ep_idx, episodes_df, annotation_type, global_names + ) + + # Compute observation frame indices + obs_indices, _ = compute_absolute_indices( + frame_idx, ep_start, ep_end, n_obs_steps, frame_gap=frame_gap + ) + obs_indices = obs_indices.tolist() + + # Compute targets for observation frames + for t_idx, abs_idx in enumerate(obs_indices): + rel_frame = abs_idx - ep_start + targets[b_idx, t_idx] = find_stage_and_tau( + rel_frame, + ep_length, + subtask_names, + subtask_start_frames, + subtask_end_frames, + global_names, + temporal_props, + return_combined=True, + ) + + # Compute targets for rewind frames (if any) + rewind_step = rewind_steps[b_idx].item() + if rewind_step > 0: + _, rewind_indices = apply_rewind_augmentation( + frame_idx, + ep_start, + n_obs_steps, + max_rewind_steps, + frame_gap=frame_gap, + rewind_step=rewind_step, + ) + + for r_idx, abs_idx in enumerate(rewind_indices[:rewind_step]): + rel_frame = max(0, abs_idx - ep_start) + targets[b_idx, n_obs_steps + 1 + r_idx] = find_stage_and_tau( + rel_frame, + ep_length, + subtask_names, + subtask_start_frames, + subtask_end_frames, + global_names, + temporal_props, + return_combined=True, + ) + + return targets + + @property + def training(self) -> bool: + return getattr(self, "_training_mode", True) + + def train(self, mode: bool = True): + """Set training mode for augmentation decisions.""" + self._training_mode = mode + return self + + def eval(self): + """Set evaluation mode (disable augmentations).""" + return self.train(False) + + @torch.no_grad() + def _encode_images_batch(self, images: np.ndarray) -> torch.Tensor: + """Encode a batch of images using CLIP. + + Args: + images: Batched images with shape: (B, T, C, H, W) + + Returns: + Encoded feature vectors with shape (B, T, 512) + """ + + batch_size, seq_length = images.shape[0], images.shape[1] + images = images.reshape(batch_size * seq_length, *images.shape[2:]) + + num_frames = images.shape[0] + images_list = [] + for i in range(num_frames): + img = images[i] + if img.shape[0] in [1, 3]: # Channel first (C, H, W) + img = img.transpose(1, 2, 0) + + # Handle single channel + if img.shape[-1] == 1: + img = np.repeat(img, 3, axis=-1) + + if img.dtype != np.uint8: + img = (img * 255).astype(np.uint8) if img.max() <= 1.0 else img.astype(np.uint8) + + images_list.append(Image.fromarray(img)) + + all_embeddings = [] + for i in range(0, num_frames, self.config.clip_batch_size): + batch_imgs = images_list[i : i + self.config.clip_batch_size] + + inputs = self.clip_processor(images=batch_imgs, return_tensors="pt") + inputs = {k: v.to(self.device) for k, v in inputs.items()} + + # Get image embeddings + embeddings = self.clip_model.get_image_features(**inputs).detach().cpu() + + # Handle single frame case + if embeddings.dim() == 1: + embeddings = embeddings.unsqueeze(0) + + all_embeddings.append(embeddings) + + all_embeddings = torch.cat(all_embeddings) # (B*T, 512) + all_embeddings = all_embeddings.reshape(batch_size, seq_length, -1) # (B, T, 512) + + return all_embeddings + + @torch.no_grad() + def _encode_text_clip(self, text: str, batch_size: int) -> torch.Tensor: + """Encode text using CLIP text encoder (per SARM paper A.4). + + Args: + text: Task description text to encode + batch_size: Batch size to replicate for + + Returns: + Encoded text features with shape (B, 512) + """ + inputs = self.clip_processor.tokenizer([text], return_tensors="pt", padding=True, truncation=True) + inputs = {k: v.to(self.device) for k, v in inputs.items()} + + text_embedding = self.clip_model.get_text_features(**inputs).detach().cpu() + text_embedding = text_embedding.expand(batch_size, -1) + + return text_embedding + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + """Add encoded features to the observation features.""" + features[PipelineFeatureType.OBSERVATION]["video_features"] = PolicyFeature( + type=FeatureType.VISUAL, shape=(self.config.num_frames, self.config.image_dim) + ) + features[PipelineFeatureType.OBSERVATION]["text_features"] = PolicyFeature( + type=FeatureType.LANGUAGE, shape=(self.config.text_dim,) + ) + features[PipelineFeatureType.OBSERVATION]["state_features"] = PolicyFeature( + type=FeatureType.STATE, shape=(self.config.num_frames, self.config.max_state_dim) + ) + return features + + +def make_sarm_pre_post_processors( + config: SARMConfig, + dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None, + dataset_meta=None, +) -> tuple[ + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + PolicyProcessorPipeline[PolicyAction, PolicyAction], +]: + """Create pre-processor and post-processor pipelines for SARM.""" + return ( + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]]( + steps=[ + AddBatchDimensionProcessorStep(), + RenameObservationsProcessorStep(rename_map={}), + NormalizerProcessorStep( + features={**config.input_features, **config.output_features}, + norm_map=config.normalization_mapping, + stats=dataset_stats, + ), + SARMEncodingProcessorStep( + config=config, dataset_meta=dataset_meta, dataset_stats=dataset_stats + ), + DeviceProcessorStep(device=config.device), + ], + name=POLICY_PREPROCESSOR_DEFAULT_NAME, + ), + PolicyProcessorPipeline[PolicyAction, PolicyAction]( + steps=[DeviceProcessorStep(device="cpu")], + name=POLICY_POSTPROCESSOR_DEFAULT_NAME, + to_transition=policy_action_to_transition, + to_output=transition_to_policy_action, + ), + ) diff --git a/src/lerobot/policies/sarm/sarm_utils.py b/src/lerobot/policies/sarm/sarm_utils.py new file mode 100644 index 000000000..5b6955d38 --- /dev/null +++ b/src/lerobot/policies/sarm/sarm_utils.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random + +import numpy as np +import torch +import torch.nn.functional as F # noqa: N812 + + +def find_stage_and_tau( + current_frame: int, + episode_length: int, + subtask_names: list | None, + subtask_start_frames: list | None, + subtask_end_frames: list | None, + global_subtask_names: list, + temporal_proportions: dict, + return_combined: bool = False, +) -> tuple[int, float] | float: + """Find stage and within-stage progress (tau) for a frame. + + Args: + current_frame: Frame index relative to episode start + episode_length: Total frames in episode + subtask_names: Subtask names for this episode (None for single_stage) + subtask_start_frames: Subtask start frames + subtask_end_frames: Subtask end frames + global_subtask_names: Global list of all subtask names + temporal_proportions: Dict of temporal proportions + return_combined: If True, return stage+tau as float; else (stage_idx, tau) tuple + + Returns: + Float (stage.tau) if return_combined, else (stage_idx, tau) tuple + """ + stage_idx, tau = 0, 0.0 + num_stages = len(global_subtask_names) + + # Single-stage mode: linear progress from 0 to 1 + if num_stages == 1: + tau = min(1.0, max(0.0, current_frame / max(episode_length - 1, 1))) + elif subtask_names is None: + pass # stage_idx=0, tau=0.0 + elif current_frame < subtask_start_frames[0]: + pass # Before first subtask: stage_idx=0, tau=0.0 + elif current_frame > subtask_end_frames[-1]: + stage_idx, tau = num_stages - 1, 0.999 # After last subtask + else: + # Find which subtask this frame belongs to + found = False + for name, start, end in zip(subtask_names, subtask_start_frames, subtask_end_frames, strict=True): + if start <= current_frame <= end: + stage_idx = global_subtask_names.index(name) if name in global_subtask_names else 0 + tau = compute_tau(current_frame, start, end) + found = True + break + # Frame between subtasks - use previous subtask's end state + if not found: + for j in range(len(subtask_names) - 1): + if subtask_end_frames[j] < current_frame < subtask_start_frames[j + 1]: + name = subtask_names[j] + stage_idx = global_subtask_names.index(name) if name in global_subtask_names else j + tau = 1.0 + break + + if return_combined: + # Clamp to avoid overflow at end + if stage_idx >= num_stages - 1 and tau >= 1.0: + return num_stages - 1 + 0.999 + return stage_idx + tau + return stage_idx, tau + + +def compute_absolute_indices( + frame_idx: int, + ep_start: int, + ep_end: int, + n_obs_steps: int, + frame_gap: int = 30, +) -> tuple[torch.Tensor, torch.Tensor]: + """Compute absolute frame indices with clamping for bidirectional observation sequence. + + Bidirectional sampling centered on target frame: + - Before: [-frame_gap * half_steps, ..., -frame_gap] (half_steps frames) + - Current: [0] (1 frame) + - After: [frame_gap, ..., frame_gap * half_steps] (half_steps frames) + - Total: n_obs_steps + 1 frames + + Out-of-bounds frames are clamped (duplicated from boundary). + + Args: + frame_idx: Target frame index (center frame of sequence) + ep_start: Episode start index + ep_end: Episode end index (exclusive) + n_obs_steps: Number of observation steps (must be even for symmetric sampling) + frame_gap: Gap between observation frames + + Returns: + Tuple of (indices, out_of_bounds_flags) + """ + half_steps = n_obs_steps // 2 + + # Bidirectional deltas: past + current + future + past_deltas = [-frame_gap * i for i in range(half_steps, 0, -1)] + future_deltas = [frame_gap * i for i in range(1, half_steps + 1)] + delta_indices = past_deltas + [0] + future_deltas + + frames = [] + out_of_bounds = [] + + for delta in delta_indices: + target_idx = frame_idx + delta + # Clamp to episode bounds (duplicate boundary frames for out-of-bounds) + clamped_idx = max(ep_start, min(ep_end - 1, target_idx)) + frames.append(clamped_idx) + # Flag as out-of-bounds if clamping occurred + out_of_bounds.append(1 if target_idx != clamped_idx else 0) + + return torch.tensor(frames), torch.tensor(out_of_bounds) + + +def apply_rewind_augmentation( + frame_idx: int, + ep_start: int, + n_obs_steps: int, + max_rewind_steps: int, + frame_gap: int = 30, + rewind_step: int | None = None, +) -> tuple[int, list[int]]: + """ + Generate rewind frame indices for temporal augmentation. + + Rewind simulates going backwards through previously seen frames, + starting from before the earliest observation frame (for bidirectional sampling). + Appends reversed frames after the observation sequence. + + Args: + frame_idx: Target frame index (center of bidirectional observation window) + ep_start: Episode start index + n_obs_steps: Number of observation steps + max_rewind_steps: Maximum rewind steps + frame_gap: Gap between frames + rewind_step: If provided, use this exact rewind step (for deterministic behavior). + If None, sample randomly. + + Returns: + Tuple of (rewind_step, rewind_indices) + """ + # For bidirectional sampling, earliest obs frame is at frame_idx - half_steps * frame_gap + half_steps = n_obs_steps // 2 + earliest_obs_frame = frame_idx - half_steps * frame_gap + + # Required history: frames before earliest observation frame + if earliest_obs_frame <= ep_start: + return 0, [] # No history before observation window + + # Max valid rewind steps based on available history before earliest obs frame + available_history = earliest_obs_frame - ep_start + max_valid_step = available_history // frame_gap + max_rewind = min(max_rewind_steps, max(0, max_valid_step)) + + if max_rewind <= 0: + return 0, [] + + # Sample rewind steps if not provided + rewind_step = random.randint(1, max_rewind) if rewind_step is None else min(rewind_step, max_rewind) + + if rewind_step == 0: + return 0, [] + + # Generate rewind indices going backwards from earliest obs frame + # rewind_indices[0] is closest to obs window, rewind_indices[-1] is furthest back + rewind_indices = [] + for i in range(1, rewind_step + 1): + idx = earliest_obs_frame - i * frame_gap + idx = max(ep_start, idx) # Clamp to episode start + rewind_indices.append(idx) + + return rewind_step, rewind_indices + + +def compute_tau(current_frame: int | float, subtask_start: int | float, subtask_end: int | float) -> float: + """Compute Ο„_t = (t - s_k) / (e_k - s_k) ∈ [0, 1]. Returns 1.0 for zero-duration subtasks.""" + duration = subtask_end - subtask_start + if duration <= 0: + return 1.0 + return float(np.clip((current_frame - subtask_start) / duration, 0.0, 1.0)) + + +def pad_state_to_max_dim(state: torch.Tensor, max_state_dim: int) -> torch.Tensor: + """Pad the state tensor's last dimension to max_state_dim with zeros.""" + current_dim = state.shape[-1] + if current_dim >= max_state_dim: + return state[..., :max_state_dim] # Truncate if larger + + # Pad with zeros on the right + padding = (0, max_state_dim - current_dim) # (left, right) for last dim + return F.pad(state, padding, mode="constant", value=0) + + +def temporal_proportions_to_breakpoints( + temporal_proportions: dict[str, float] | list[float] | None, + subtask_names: list[str] | None = None, +) -> list[float] | None: + """Convert temporal proportions to cumulative breakpoints for normalization.""" + if temporal_proportions is None: + return None + + if isinstance(temporal_proportions, dict): + if subtask_names is not None: + proportions = [temporal_proportions.get(name, 0.0) for name in subtask_names] + else: + proportions = list(temporal_proportions.values()) + else: + proportions = list(temporal_proportions) + + total = sum(proportions) + if total > 0 and abs(total - 1.0) > 1e-6: + proportions = [p / total for p in proportions] + + breakpoints = [0.0] + cumsum = 0.0 + for prop in proportions: + cumsum += prop + breakpoints.append(cumsum) + breakpoints[-1] = 1.0 + + return breakpoints + + +def normalize_stage_tau( + x: float | torch.Tensor, + num_stages: int | None = None, + breakpoints: list[float] | None = None, + temporal_proportions: dict[str, float] | list[float] | None = None, + subtask_names: list[str] | None = None, +) -> float | torch.Tensor: + """ + Normalize stage+tau reward to [0, 1] with custom breakpoints. + + Maps stage index + within-stage tau to normalized progress [0, 1]. + The breakpoints are designed to give appropriate weight to each stage + based on their importance in the task (using temporal proportions). + + Priority: breakpoints > temporal_proportions > linear fallback + + Args: + x: Raw reward value (stage index + tau) where stage ∈ [0, num_stages-1] and tau ∈ [0, 1) + num_stages: Number of stages (required if breakpoints/proportions not provided) + breakpoints: Optional custom breakpoints list of length num_stages + 1. + temporal_proportions: Optional temporal proportions dict/list to compute breakpoints. + subtask_names: Optional ordered list of subtask names (for dict proportions) + + Returns: + Normalized progress value ∈ [0, 1] + """ + if breakpoints is not None: + num_stages = len(breakpoints) - 1 + elif temporal_proportions is not None: + breakpoints = temporal_proportions_to_breakpoints(temporal_proportions, subtask_names) + num_stages = len(breakpoints) - 1 + elif num_stages is not None: + breakpoints = [i / num_stages for i in range(num_stages + 1)] + else: + raise ValueError("Either num_stages, breakpoints, or temporal_proportions must be provided") + + if isinstance(x, torch.Tensor): + result = torch.zeros_like(x) + for i in range(num_stages): + mask = (x >= i) & (x < i + 1) + tau_in_stage = x - i + result[mask] = breakpoints[i] + tau_in_stage[mask] * (breakpoints[i + 1] - breakpoints[i]) + result[x >= num_stages] = 1.0 + return result.clamp(0.0, 1.0) + else: + if x < 0: + return 0.0 + if x >= num_stages: + return 1.0 + stage = int(x) + tau = x - stage + return breakpoints[stage] + tau * (breakpoints[stage + 1] - breakpoints[stage]) diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py index 485d3e4e5..f998661f9 100644 --- a/src/lerobot/policies/smolvla/modeling_smolvla.py +++ b/src/lerobot/policies/smolvla/modeling_smolvla.py @@ -231,6 +231,7 @@ class SmolVLAPolicy(PreTrainedPolicy): def __init__( self, config: SmolVLAConfig, + **kwargs, ): """ Args: @@ -352,8 +353,19 @@ class SmolVLAPolicy(PreTrainedPolicy): def _rtc_enabled(self) -> bool: return self.config.rtc_config is not None and self.config.rtc_config.enabled - def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> dict[str, Tensor]: - """Do a full training forward pass to compute the loss""" + def forward( + self, batch: dict[str, Tensor], noise=None, time=None, reduction: str = "mean" + ) -> dict[str, Tensor]: + """Do a full training forward pass to compute the loss. + + Args: + batch: Training batch containing observations and actions. + noise: Optional noise tensor for flow matching. + time: Optional time tensor for flow matching. + reduction: How to reduce the loss. Options: + - "mean": Return scalar mean loss (default, backward compatible) + - "none": Return per-sample losses of shape (batch_size,) for RA-BC weighting + """ if self.config.adapt_to_pi_aloha: batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION]) @@ -377,11 +389,16 @@ class SmolVLAPolicy(PreTrainedPolicy): losses = losses[:, :, : self.config.max_action_dim] loss_dict["losses_after_rm_padding"] = losses.clone() - # For backward pass - loss = losses.mean() - # For backward pass - loss_dict["loss"] = loss.item() - return loss, loss_dict + if reduction == "none": + # Return per-sample losses (B,) by averaging over time and action dims + per_sample_loss = losses.mean(dim=(1, 2)) + loss_dict["loss"] = per_sample_loss.mean().item() + return per_sample_loss, loss_dict + else: + # Default: return scalar mean loss + loss = losses.mean() + loss_dict["loss"] = loss.item() + return loss, loss_dict def prepare_images(self, batch): """Apply SmolVLA preprocessing to the images, like resizing to 224x224 and padding to keep aspect ratio, and diff --git a/src/lerobot/policies/tdmpc/modeling_tdmpc.py b/src/lerobot/policies/tdmpc/modeling_tdmpc.py index 195cf6154..f83c82e21 100644 --- a/src/lerobot/policies/tdmpc/modeling_tdmpc.py +++ b/src/lerobot/policies/tdmpc/modeling_tdmpc.py @@ -65,6 +65,7 @@ class TDMPCPolicy(PreTrainedPolicy): def __init__( self, config: TDMPCConfig, + **kwargs, ): """ Args: diff --git a/src/lerobot/policies/utils.py b/src/lerobot/policies/utils.py index c4ca35b72..bfbe2bf1d 100644 --- a/src/lerobot/policies/utils.py +++ b/src/lerobot/policies/utils.py @@ -231,11 +231,20 @@ def validate_visual_features_consistency( """ Validates visual feature consistency between a policy config and provided dataset/environment features. + Validation passes if EITHER: + - Policy's expected visuals are a subset of dataset (policy uses some cameras, dataset has more) + - Dataset's provided visuals are a subset of policy (policy declares extras for flexibility) + Args: cfg (PreTrainedConfig): The model or policy configuration containing input_features and type. features (Dict[str, PolicyFeature]): A mapping of feature names to PolicyFeature objects. """ expected_visuals = {k for k, v in cfg.input_features.items() if v.type == FeatureType.VISUAL} provided_visuals = {k for k, v in features.items() if v.type == FeatureType.VISUAL} - if not provided_visuals.issubset(expected_visuals): + + # Accept if either direction is a subset + policy_subset_of_dataset = expected_visuals.issubset(provided_visuals) + dataset_subset_of_policy = provided_visuals.issubset(expected_visuals) + + if not (policy_subset_of_dataset or dataset_subset_of_policy): raise_feature_mismatch_error(provided_visuals, expected_visuals) diff --git a/src/lerobot/policies/vqbet/modeling_vqbet.py b/src/lerobot/policies/vqbet/modeling_vqbet.py index 91d609701..359b4fdb1 100644 --- a/src/lerobot/policies/vqbet/modeling_vqbet.py +++ b/src/lerobot/policies/vqbet/modeling_vqbet.py @@ -47,6 +47,7 @@ class VQBeTPolicy(PreTrainedPolicy): def __init__( self, config: VQBeTConfig | None = None, + **kwargs, ): """ Args: diff --git a/src/lerobot/policies/xvla/modeling_xvla.py b/src/lerobot/policies/xvla/modeling_xvla.py index 27c7c6e1b..0436ae527 100644 --- a/src/lerobot/policies/xvla/modeling_xvla.py +++ b/src/lerobot/policies/xvla/modeling_xvla.py @@ -273,7 +273,7 @@ class XVLAPolicy(PreTrainedPolicy): config_class = XVLAConfig name = "xvla" - def __init__(self, config: XVLAConfig): + def __init__(self, config: XVLAConfig, **kwargs): super().__init__(config) config.validate_features() florence_config = config.get_florence_config() diff --git a/src/lerobot/processor/converters.py b/src/lerobot/processor/converters.py index 6b0b67598..126be0e36 100644 --- a/src/lerobot/processor/converters.py +++ b/src/lerobot/processor/converters.py @@ -170,8 +170,9 @@ def _extract_complementary_data(batch: dict[str, Any]) -> dict[str, Any]: task_key = {"task": batch["task"]} if "task" in batch else {} index_key = {"index": batch["index"]} if "index" in batch else {} task_index_key = {"task_index": batch["task_index"]} if "task_index" in batch else {} + episode_index_key = {"episode_index": batch["episode_index"]} if "episode_index" in batch else {} - return {**pad_keys, **task_key, **index_key, **task_index_key} + return {**pad_keys, **task_key, **index_key, **task_index_key, **episode_index_key} def create_transition( diff --git a/src/lerobot/scripts/lerobot_train.py b/src/lerobot/scripts/lerobot_train.py index 1ebdee600..6cf733442 100644 --- a/src/lerobot/scripts/lerobot_train.py +++ b/src/lerobot/scripts/lerobot_train.py @@ -62,6 +62,7 @@ def update_policy( accelerator: Accelerator, lr_scheduler=None, lock=None, + rabc_weights_provider=None, ) -> tuple[MetricsTracker, dict]: """ Performs a single training step to update the policy's weights. @@ -78,6 +79,7 @@ def update_policy( accelerator: The Accelerator instance for distributed training and mixed precision. lr_scheduler: An optional learning rate scheduler. lock: An optional lock for thread-safe optimizer updates. + rabc_weights_provider: Optional RABCWeights instance for sample weighting. Returns: A tuple containing: @@ -87,9 +89,30 @@ def update_policy( start_time = time.perf_counter() policy.train() + # Get RA-BC weights if enabled + rabc_batch_weights = None + rabc_batch_stats = None + if rabc_weights_provider is not None: + rabc_batch_weights, rabc_batch_stats = rabc_weights_provider.compute_batch_weights(batch) + # Let accelerator handle mixed precision with accelerator.autocast(): - loss, output_dict = policy.forward(batch) + # Use per-sample loss when RA-BC is enabled for proper weighting + if rabc_batch_weights is not None: + # Get per-sample losses + per_sample_loss, output_dict = policy.forward(batch, reduction="none") + + # Apply RA-BC weights: L_RA-BC = Ξ£(w_i * l_i) / (Ξ£w_i + Ξ΅) + # rabc_batch_weights is already normalized to sum to batch_size + epsilon = 1e-6 + loss = (per_sample_loss * rabc_batch_weights).sum() / (rabc_batch_weights.sum() + epsilon) + # Log raw mean weight (before normalization) - this is the meaningful metric + output_dict["rabc_mean_weight"] = rabc_batch_stats["raw_mean_weight"] + output_dict["rabc_num_zero_weight"] = rabc_batch_stats["num_zero_weight"] + output_dict["rabc_num_full_weight"] = rabc_batch_stats["num_full_weight"] + else: + loss, output_dict = policy.forward(batch) + # TODO(rcadene): policy.unnormalize_outputs(out_dict) # Use accelerator's backward method @@ -141,8 +164,6 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None): cfg: A `TrainPipelineConfig` object containing all training configurations. accelerator: Optional Accelerator instance. If None, one will be created automatically. """ - cfg.validate() - # Create Accelerator if not provided # It will automatically detect if running in distributed mode or single-process mode # We set step_scheduler_with_optimizer=False to prevent accelerate from adjusting the lr_scheduler steps based on the num_processes @@ -159,6 +180,8 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None): # When using accelerate, only the main process should log to avoid duplicate outputs is_main_process = accelerator.is_main_process + cfg.validate() + # Only log on main process if is_main_process: logging.info(pformat(cfg.to_dict())) @@ -217,6 +240,10 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None): # Only provide dataset_stats when not resuming from saved processor state processor_kwargs["dataset_stats"] = dataset.meta.stats + # For SARM, always provide dataset_meta for progress normalization + if cfg.policy.type == "sarm": + processor_kwargs["dataset_meta"] = dataset.meta + if cfg.policy.pretrained_path is not None: processor_kwargs["preprocessor_overrides"] = { "device_processor": {"device": device.type}, @@ -248,6 +275,29 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None): logging.info("Creating optimizer and scheduler") optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy) + # Load precomputed SARM progress for RA-BC if enabled + # Generate progress using: src/lerobot/policies/sarm/compute_rabc_weights.py + rabc_weights = None + if cfg.use_rabc: + from lerobot.utils.rabc import RABCWeights + + # Get chunk_size from policy config + chunk_size = getattr(policy.config, "chunk_size", None) + if chunk_size is None: + raise ValueError("Chunk size is not found in policy config") + + head_mode = getattr(cfg, "rabc_head_mode", "sparse") + logging.info(f"Loading SARM progress for RA-BC from {cfg.rabc_progress_path}") + logging.info(f"Using chunk_size={chunk_size} from policy config, head_mode={head_mode}") + rabc_weights = RABCWeights( + progress_path=cfg.rabc_progress_path, + chunk_size=chunk_size, + head_mode=head_mode, + kappa=getattr(cfg, "rabc_kappa", 0.01), + epsilon=getattr(cfg, "rabc_epsilon", 1e-6), + device=device, + ) + step = 0 # number of policy updates (forward + backward + optim) if cfg.resume: @@ -327,7 +377,9 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None): ) if is_main_process: - logging.info("Start offline training on a fixed dataset") + logging.info( + f"Start offline training on a fixed dataset, with effective batch size: {effective_batch_size}" + ) for _ in range(step, cfg.steps): start_time = time.perf_counter() @@ -343,6 +395,7 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None): cfg.optimizer.grad_clip_norm, accelerator=accelerator, lr_scheduler=lr_scheduler, + rabc_weights_provider=rabc_weights, ) # Note: eval and checkpoint happens *after* the `step`th training update has completed, so we @@ -359,6 +412,16 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None): wandb_log_dict = train_tracker.to_dict() if output_dict: wandb_log_dict.update(output_dict) + # Log RA-BC statistics if enabled + if rabc_weights is not None: + rabc_stats = rabc_weights.get_stats() + wandb_log_dict.update( + { + "rabc_delta_mean": rabc_stats["delta_mean"], + "rabc_delta_std": rabc_stats["delta_std"], + "rabc_num_frames": rabc_stats["num_frames"], + } + ) wandb_logger.log_dict(wandb_log_dict, step) train_tracker.reset_averages() diff --git a/src/lerobot/utils/rabc.py b/src/lerobot/utils/rabc.py new file mode 100644 index 000000000..c529f3ccc --- /dev/null +++ b/src/lerobot/utils/rabc.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +import numpy as np +import pandas as pd +import torch + + +class RABCWeights: + """ + Load precomputed SARM progress values and compute RA-BC weights during training. + + Progress values are loaded from a parquet file (generated by compute_rabc_weights.py). + During training, computes: + - progress_delta = progress[t + chunk_size] - progress[t] + - rabc_weight based on the delta (paper Eq. 8-9) + + Args: + progress_path: Path to parquet file with precomputed progress values + chunk_size: Number of frames ahead for computing progress delta + head_mode: Which SARM head to use ("sparse" or "dense") + kappa: Hard threshold for high-quality samples (default: 0.01) + epsilon: Small constant for numerical stability (default: 1e-6) + fallback_weight: Weight to use for frames without valid delta (default: 1.0) + device: Device to return tensors on + """ + + def __init__( + self, + progress_path: str | Path, + chunk_size: int = 50, + head_mode: str = "sparse", + kappa: float = 0.01, + epsilon: float = 1e-6, + fallback_weight: float = 1.0, + device: torch.device = None, + ): + self.progress_path = Path(progress_path) + self.chunk_size = chunk_size + self.head_mode = head_mode + self.kappa = kappa + self.epsilon = epsilon + self.fallback_weight = fallback_weight + self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # Determine progress column name + self.progress_column = f"progress_{head_mode}" + + # Load progress values + logging.info(f"Loading SARM progress values from {self.progress_path}") + self.df = pd.read_parquet(self.progress_path) + + # Check if the requested head mode column exists + if self.progress_column not in self.df.columns: + available = [c for c in self.df.columns if c.startswith("progress")] + raise ValueError( + f"Column '{self.progress_column}' not found. Available progress columns: {available}" + ) + + logging.info(f"Using progress column: {self.progress_column}") + + self.progress_lookup = {} + self.episode_lookup = {} + + for _, row in self.df.iterrows(): + global_idx = int(row["index"]) + progress = row[self.progress_column] + episode_idx = int(row["episode_index"]) + + if not np.isnan(progress): + self.progress_lookup[global_idx] = float(progress) + self.episode_lookup[global_idx] = episode_idx + + # Build episode boundaries for delta computation + self.episode_boundaries = {} + for episode_idx in self.df["episode_index"].unique(): + ep_df = self.df[self.df["episode_index"] == episode_idx] + self.episode_boundaries[int(episode_idx)] = { + "start": int(ep_df["index"].min()), + "end": int(ep_df["index"].max()) + 1, + } + + logging.info(f"Loaded {len(self.progress_lookup)} frame progress values") + logging.info(f"Chunk size for delta computation: {chunk_size}") + + # Compute global statistics for weight computation + self._compute_global_stats() + + def _compute_global_stats(self): + """Compute global mean and std of progress deltas for weight calculation.""" + all_deltas = [] + + for global_idx, progress in self.progress_lookup.items(): + episode_idx = self.episode_lookup.get(global_idx) + if episode_idx is None: + continue + + bounds = self.episode_boundaries.get(episode_idx) + if bounds is None: + continue + + future_idx = global_idx + self.chunk_size + if future_idx >= bounds["end"]: + # Near end of episode: use last frame's progress + future_idx = bounds["end"] - 1 + + future_progress = self.progress_lookup.get(future_idx) + if future_progress is not None: + delta = future_progress - progress + all_deltas.append(delta) + + if all_deltas: + self.delta_mean = max(np.mean(all_deltas), 0.0) + self.delta_std = max(np.std(all_deltas), self.epsilon) + logging.info(f"Progress delta stats: mean={self.delta_mean:.4f}, std={self.delta_std:.4f}") + else: + self.delta_mean = 0.0 + self.delta_std = self.epsilon + logging.warning("No valid progress deltas found, using default stats") + + def compute_batch_weights(self, batch: dict) -> tuple[torch.Tensor, dict]: + """ + Compute RA-BC weights for a batch. + + For each sample: + 1. Get progress at current frame + 2. Get progress at frame + chunk_size (within same episode) + 3. Compute delta = future_progress - current_progress + 4. Compute weight using paper Eq. 8-9 + + Args: + batch: Training batch containing "index" key with global frame indices + + Returns: + Tuple of: + - Weights tensor (batch_size,) normalized to sum to batch_size + - Stats dict with raw_mean_weight, num_zero_weight, num_full_weight + """ + indices = batch.get("index") + if indices is None: + logging.warning("RA-BC: Batch missing 'index' key, using uniform weights") + batch_size = self._get_batch_size(batch) + return torch.ones(batch_size, device=self.device), {"raw_mean_weight": 1.0} + + # Convert to list of ints + if isinstance(indices, torch.Tensor): + indices = indices.cpu().numpy().tolist() + elif isinstance(indices, np.ndarray): + indices = indices.tolist() + + # Compute deltas and weights for each sample + deltas = [] + for idx in indices: + idx = int(idx) + delta = self._compute_delta(idx) + deltas.append(delta) + + deltas = np.array(deltas, dtype=np.float32) + + # Compute weights from deltas + weights = self._compute_weights(deltas) + + # Compute stats before normalization for logging + raw_mean_weight = float(np.nanmean(weights)) + num_zero_weight = int(np.sum(weights == 0)) + num_full_weight = int(np.sum(weights == 1.0)) + batch_stats = { + "raw_mean_weight": raw_mean_weight, + "num_zero_weight": num_zero_weight, + "num_full_weight": num_full_weight, + } + + weights = torch.tensor(weights, device=self.device, dtype=torch.float32) + + # Normalize to sum to batch_size + batch_size = len(weights) + weight_sum = weights.sum() + self.epsilon + weights = weights * batch_size / weight_sum + + return weights, batch_stats + + def _compute_delta(self, global_idx: int) -> float: + """Compute progress delta for a single frame.""" + current_progress = self.progress_lookup.get(global_idx) + if current_progress is None: + return np.nan + + episode_idx = self.episode_lookup.get(global_idx) + if episode_idx is None: + return np.nan + + bounds = self.episode_boundaries.get(episode_idx) + if bounds is None: + return np.nan + + future_idx = global_idx + self.chunk_size # Ξ” = chunk_size + if future_idx >= bounds["end"]: + # Near end of episode: use last frame's progress instead + future_idx = bounds["end"] - 1 + + future_progress = self.progress_lookup.get(future_idx) + if future_progress is None: + return np.nan + + return future_progress - current_progress + + def _compute_weights(self, deltas: np.ndarray) -> np.ndarray: + """ + Compute RA-BC weights from progress deltas. + + Following paper Eq. 8-9: + - Soft weight: ˜wi = clip((ri βˆ’ (Β΅ βˆ’ 2Οƒ)) / (4Οƒ + Ξ΅), 0, 1) + - Final weight: wi = 1{ri > ΞΊ} + 1{0 ≀ ri ≀ ΞΊ}˜wi + + Returns: + Array of weights + """ + valid_mask = ~np.isnan(deltas) + + # Compute soft weights using global statistics + lower_bound = self.delta_mean - 2 * self.delta_std + soft_weights = (deltas - lower_bound) / (4 * self.delta_std + self.epsilon) + soft_weights = np.clip(soft_weights, 0.0, 1.0) + + # Apply paper's Eq. 9 + weights = np.zeros_like(deltas, dtype=np.float32) + + # High quality: ri > kappa β†’ weight = 1 + high_quality_mask = deltas > self.kappa + weights[high_quality_mask] = 1.0 + + # Moderate quality: 0 <= ri <= kappa β†’ weight = soft_weight + moderate_mask = (deltas >= 0) & (deltas <= self.kappa) + weights[moderate_mask] = soft_weights[moderate_mask] + + # Negative progress: ri < 0 β†’ weight = 0 (already 0) + # Invalid (NaN): use fallback weight + weights[~valid_mask] = self.fallback_weight + + return weights + + def _get_batch_size(self, batch: dict) -> int: + """Determine batch size from batch.""" + for key in ["action", "index"]: + if key in batch: + val = batch[key] + if isinstance(val, (torch.Tensor, np.ndarray)): + return val.shape[0] + return 1 + + def get_stats(self) -> dict: + """Get statistics.""" + return { + "num_frames": len(self.progress_lookup), + "chunk_size": self.chunk_size, + "head_mode": self.head_mode, + "delta_mean": self.delta_mean, + "delta_std": self.delta_std, + "kappa": self.kappa, + } diff --git a/tests/policies/test_sarm_processor.py b/tests/policies/test_sarm_processor.py new file mode 100644 index 000000000..66404f663 --- /dev/null +++ b/tests/policies/test_sarm_processor.py @@ -0,0 +1,694 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +pytest.importorskip("faker") + +from unittest.mock import MagicMock, patch + +import numpy as np +import pandas as pd +import pytest +import torch + +from lerobot.processor.core import TransitionKey + + +class MockDatasetMeta: + """Mock dataset metadata for testing processor.""" + + def __init__(self, episodes: list[dict]): + self._episodes = episodes + + @property + def episodes(self): + """Return episodes as a mock object with to_pandas() method.""" + mock = MagicMock() + mock.__len__ = lambda s: len(self._episodes) + mock.__getitem__ = lambda s, idx: self._episodes[idx] + mock.to_pandas = lambda: pd.DataFrame(self._episodes) + return mock + + +class MockConfig: + """Mock SARMConfig for testing processor methods.""" + + def __init__( + self, + n_obs_steps: int = 8, + max_rewind_steps: int = 4, + frame_gap: int = 30, + sparse_subtask_names: list = None, + sparse_temporal_proportions: list = None, + dense_subtask_names: list = None, + dense_temporal_proportions: list = None, + image_key: str = "observation.images.top", + state_key: str = "observation.state", + max_state_dim: int = 32, + device: str = None, + rewind_probability: float = 0.8, + language_perturbation_probability: float = 0.2, + annotation_mode: str = "dual", + clip_batch_size: int = 64, + text_dim: int = 512, + ): + self.n_obs_steps = n_obs_steps + self.max_rewind_steps = max_rewind_steps + self.frame_gap = frame_gap + self.sparse_subtask_names = sparse_subtask_names or ["task"] + self.sparse_temporal_proportions = sparse_temporal_proportions or [1.0] + self.dense_subtask_names = dense_subtask_names + self.dense_temporal_proportions = dense_temporal_proportions + self.uses_dual_heads = annotation_mode in ["dense_only", "dual"] + self.image_key = image_key + self.state_key = state_key + self.max_state_dim = max_state_dim + self.device = device + self.rewind_probability = rewind_probability + self.language_perturbation_probability = language_perturbation_probability + self.annotation_mode = annotation_mode + self.clip_batch_size = clip_batch_size + self.text_dim = text_dim + + # Compute observation delta indices (same as config: bidirectional) + half_steps = self.n_obs_steps // 2 + past_deltas = [-self.frame_gap * i for i in range(half_steps, 0, -1)] + future_deltas = [self.frame_gap * i for i in range(1, half_steps + 1)] + obs_deltas = past_deltas + [0] + future_deltas + rewind_deltas = [-self.frame_gap * (i + 1) for i in range(self.max_rewind_steps)] + self.observation_delta_indices = obs_deltas + rewind_deltas + + @property + def num_frames(self) -> int: + return 1 + self.n_obs_steps + self.max_rewind_steps + + +class TestSARMEncodingProcessorStepEndToEnd: + """End-to-end test for SARMEncodingProcessorStep with dummy batch data.""" + + @pytest.fixture + def mock_clip_model(self): + """Mock CLIP model to avoid loading real weights.""" + with ( + patch("lerobot.policies.sarm.processor_sarm.CLIPModel") as mock_model_cls, + patch("lerobot.policies.sarm.processor_sarm.CLIPProcessor") as mock_processor_cls, + ): + # Mock the CLIP model - return embeddings based on input batch size + mock_model = MagicMock() + + def get_image_features_side_effect(**kwargs): + pixel_values = kwargs.get("pixel_values") + batch_size = pixel_values.shape[0] if pixel_values is not None else 1 + return torch.randn(batch_size, 512) + + mock_model.get_image_features.side_effect = get_image_features_side_effect + mock_model.get_text_features.return_value = torch.randn(1, 512) + mock_model.to.return_value = mock_model + mock_model_cls.from_pretrained.return_value = mock_model + + # Mock the CLIP processor - return tensors based on input images + mock_processor = MagicMock() + + def processor_side_effect(images=None, **kwargs): + num_images = len(images) if images is not None else 1 + return { + "pixel_values": torch.randn(num_images, 3, 224, 224), + } + + mock_processor.side_effect = processor_side_effect + # Mock tokenizer for text encoding + mock_processor.tokenizer.return_value = { + "input_ids": torch.ones(1, 77, dtype=torch.long), + "attention_mask": torch.ones(1, 77, dtype=torch.long), + } + mock_processor_cls.from_pretrained.return_value = mock_processor + + yield mock_model, mock_processor + + @pytest.fixture + def processor_with_mocks(self, mock_clip_model): + """Create a processor with mocked CLIP and dataset metadata for dual mode.""" + from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + + # Dual mode config with both sparse and dense annotations + config = MockConfig( + n_obs_steps=8, + max_rewind_steps=4, + frame_gap=30, + rewind_probability=0.0, # Disable for deterministic test + language_perturbation_probability=0.0, # Disable for deterministic test + annotation_mode="dual", + sparse_subtask_names=["reach", "grasp", "lift"], + sparse_temporal_proportions=[0.3, 0.4, 0.3], + dense_subtask_names=["approach", "contact", "close_gripper", "lift_up"], + dense_temporal_proportions=[0.25, 0.25, 0.25, 0.25], + ) + + # Create mock dataset metadata with one episode of 300 frames + # Include annotation columns for dual mode + episodes = [ + { + "dataset_from_index": 0, + "dataset_to_index": 300, + "task": "pick up the cube", + "sparse_subtask_names": ["reach", "grasp", "lift"], + "sparse_subtask_start_frames": [0, 90, 210], + "sparse_subtask_end_frames": [90, 210, 300], + "dense_subtask_names": ["approach", "contact", "close_gripper", "lift_up"], + "dense_subtask_start_frames": [0, 75, 150, 225], + "dense_subtask_end_frames": [75, 150, 225, 300], + } + ] + dataset_meta = MockDatasetMeta(episodes) + + processor = SARMEncodingProcessorStep( + config=config, + dataset_meta=dataset_meta, + ) + processor.train(True) # Use train() method, not direct assignment + + return processor, config + + def test_call_with_single_frame_batch(self, processor_with_mocks): + """Test processor __call__ with a single-frame batch.""" + processor, config = processor_with_mocks + + # Create dummy input transition + batch_size = 1 + num_frames = config.num_frames # 13 frames (9 obs + 4 rewind) + + # Image: (T, C, H, W) format as expected by processor + dummy_image = np.random.rand(num_frames, 3, 224, 224).astype(np.float32) + + # State: (T, D) format + dummy_state = np.random.rand(num_frames, 6).astype(np.float32) + + transition = { + TransitionKey.OBSERVATION: { + config.image_key: dummy_image, + config.state_key: dummy_state, + }, + TransitionKey.COMPLEMENTARY_DATA: { + "index": 150, # Middle of episode + "episode_index": 0, + "task": "pick up the cube", + }, + } + + # Run processor + result = processor(transition) + + # Verify output structure + obs = result[TransitionKey.OBSERVATION] + + # Check video features exist and have correct shape + assert "video_features" in obs + video_features = obs["video_features"] + assert video_features.shape[0] == batch_size + assert video_features.shape[1] == num_frames + assert video_features.shape[2] == 512 # CLIP embedding dim + + # Check state features exist and have correct shape + assert "state_features" in obs + state_features = obs["state_features"] + assert state_features.shape[0] == batch_size + assert state_features.shape[1] == num_frames + assert state_features.shape[2] == config.max_state_dim # Padded to max_state_dim + + # Check text features exist and have correct shape + assert "text_features" in obs + text_features = obs["text_features"] + assert text_features.shape[0] == batch_size + assert text_features.shape[1] == 512 # CLIP embedding dim + + # Check lengths tensor + assert "lengths" in obs + lengths = obs["lengths"] + assert lengths.shape[0] == batch_size + assert lengths.dtype == torch.int32 + + # Check sparse_targets exist + assert "sparse_targets" in obs + sparse_targets = obs["sparse_targets"] + assert sparse_targets.shape == (batch_size, num_frames) + # All targets should be in [0, max_stages] range (stage.tau format) + assert (sparse_targets >= 0).all() + + # Check dense_targets exist (for dual mode) + assert "dense_targets" in obs + dense_targets = obs["dense_targets"] + assert dense_targets.shape == (batch_size, num_frames) + assert (dense_targets >= 0).all() + + def test_call_with_batched_input(self, mock_clip_model): + """Test processor __call__ with a batched input (multiple frames) in dual mode.""" + from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + + config = MockConfig( + n_obs_steps=8, + max_rewind_steps=4, + frame_gap=30, + rewind_probability=0.0, + language_perturbation_probability=0.0, + annotation_mode="dual", + sparse_subtask_names=["reach", "grasp"], + sparse_temporal_proportions=[0.5, 0.5], + dense_subtask_names=["step1", "step2", "step3"], + dense_temporal_proportions=[0.33, 0.34, 0.33], + ) + + # Two episodes with different lengths, each with sparse+dense annotations + episodes = [ + { + "dataset_from_index": 0, + "dataset_to_index": 200, + "task": "task A", + "sparse_subtask_names": ["reach", "grasp"], + "sparse_subtask_start_frames": [0, 100], + "sparse_subtask_end_frames": [100, 200], + "dense_subtask_names": ["step1", "step2", "step3"], + "dense_subtask_start_frames": [0, 66, 133], + "dense_subtask_end_frames": [66, 133, 200], + }, + { + "dataset_from_index": 200, + "dataset_to_index": 500, + "task": "task B", + "sparse_subtask_names": ["reach", "grasp"], + "sparse_subtask_start_frames": [200, 350], + "sparse_subtask_end_frames": [350, 500], + "dense_subtask_names": ["step1", "step2", "step3"], + "dense_subtask_start_frames": [200, 300, 400], + "dense_subtask_end_frames": [300, 400, 500], + }, + ] + dataset_meta = MockDatasetMeta(episodes) + + processor = SARMEncodingProcessorStep(config=config, dataset_meta=dataset_meta) + processor.train(True) + + batch_size = 2 + num_frames = config.num_frames + + # Image: (B, T, C, H, W) format + dummy_image = np.random.rand(batch_size, num_frames, 3, 224, 224).astype(np.float32) + dummy_state = np.random.rand(batch_size, num_frames, 6).astype(np.float32) + + transition = { + TransitionKey.OBSERVATION: { + config.image_key: dummy_image, + config.state_key: dummy_state, + }, + TransitionKey.COMPLEMENTARY_DATA: { + "index": np.array([100, 350]), # One frame from each episode + "episode_index": np.array([0, 1]), + "task": ["task A", "task B"], + }, + } + + result = processor(transition) + obs = result[TransitionKey.OBSERVATION] + + # Verify batch dimension is preserved for all outputs + assert obs["video_features"].shape[0] == batch_size + assert obs["state_features"].shape[0] == batch_size + assert obs["lengths"].shape[0] == batch_size + assert obs["sparse_targets"].shape[0] == batch_size + assert obs["dense_targets"].shape[0] == batch_size # Dual mode has dense targets + + def test_targets_increase_with_progress(self, mock_clip_model): + """Test that both sparse and dense targets increase as frame index progresses.""" + from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + + config = MockConfig( + n_obs_steps=8, + max_rewind_steps=4, + frame_gap=30, + rewind_probability=0.0, + language_perturbation_probability=0.0, + annotation_mode="dual", + sparse_subtask_names=["phase1", "phase2"], + sparse_temporal_proportions=[0.5, 0.5], + dense_subtask_names=["a", "b", "c", "d"], + dense_temporal_proportions=[0.25, 0.25, 0.25, 0.25], + ) + + episodes = [ + { + "dataset_from_index": 0, + "dataset_to_index": 300, + "task": "test task", + "sparse_subtask_names": ["phase1", "phase2"], + "sparse_subtask_start_frames": [0, 150], + "sparse_subtask_end_frames": [150, 300], + "dense_subtask_names": ["a", "b", "c", "d"], + "dense_subtask_start_frames": [0, 75, 150, 225], + "dense_subtask_end_frames": [75, 150, 225, 300], + } + ] + dataset_meta = MockDatasetMeta(episodes) + + processor = SARMEncodingProcessorStep(config=config, dataset_meta=dataset_meta) + processor.train(True) + + num_frames = config.num_frames + + # Test at early, middle, and late points in episode + frame_indices = [30, 150, 270] + sparse_center_targets = [] + dense_center_targets = [] + + for frame_idx in frame_indices: + dummy_image = np.random.rand(num_frames, 3, 224, 224).astype(np.float32) + dummy_state = np.random.rand(num_frames, 6).astype(np.float32) + + transition = { + TransitionKey.OBSERVATION: { + config.image_key: dummy_image, + config.state_key: dummy_state, + }, + TransitionKey.COMPLEMENTARY_DATA: { + "index": frame_idx, + "episode_index": 0, + "task": "test task", + }, + } + + result = processor(transition) + obs = result[TransitionKey.OBSERVATION] + # Get target at center frame (index 4 in 9-frame observation window) + sparse_center_targets.append(obs["sparse_targets"][0, 4].item()) + dense_center_targets.append(obs["dense_targets"][0, 4].item()) + + # Both sparse and dense targets should increase with frame index + assert sparse_center_targets[0] < sparse_center_targets[2], ( + f"Early sparse target ({sparse_center_targets[0]}) should be < late ({sparse_center_targets[2]})" + ) + assert dense_center_targets[0] < dense_center_targets[2], ( + f"Early dense target ({dense_center_targets[0]}) should be < late ({dense_center_targets[2]})" + ) + + def test_progress_labels_exact_values(self, mock_clip_model): + """Test that progress labels (stage.tau) are computed correctly for known positions.""" + from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + + # Simple setup: 2 sparse stages, 4 dense stages, 100 frame episode + config = MockConfig( + n_obs_steps=8, + max_rewind_steps=4, + frame_gap=10, # Smaller gap for easier calculation + rewind_probability=0.0, + language_perturbation_probability=0.0, + annotation_mode="dual", + sparse_subtask_names=["A", "B"], + sparse_temporal_proportions=[0.5, 0.5], + dense_subtask_names=["d1", "d2", "d3", "d4"], + dense_temporal_proportions=[0.25, 0.25, 0.25, 0.25], + ) + + # Episode: frames 0-99, sparse stages at [0-49], [50-99] + # Dense stages at [0-24], [25-49], [50-74], [75-99] + episodes = [ + { + "dataset_from_index": 0, + "dataset_to_index": 100, + "task": "test", + "sparse_subtask_names": ["A", "B"], + "sparse_subtask_start_frames": [0, 50], + "sparse_subtask_end_frames": [50, 100], + "dense_subtask_names": ["d1", "d2", "d3", "d4"], + "dense_subtask_start_frames": [0, 25, 50, 75], + "dense_subtask_end_frames": [25, 50, 75, 100], + } + ] + dataset_meta = MockDatasetMeta(episodes) + + processor = SARMEncodingProcessorStep(config=config, dataset_meta=dataset_meta) + processor.train(True) + + num_frames = config.num_frames + + # Test at frame 50 (center of episode) + # With frame_gap=10, n_obs_steps=8: + # obs indices around frame 50: [10, 20, 30, 40, 50, 60, 70, 80, 90] (9 frames) + dummy_image = np.random.rand(num_frames, 3, 224, 224).astype(np.float32) + dummy_state = np.random.rand(num_frames, 6).astype(np.float32) + + transition = { + TransitionKey.OBSERVATION: { + config.image_key: dummy_image, + config.state_key: dummy_state, + }, + TransitionKey.COMPLEMENTARY_DATA: { + "index": 50, + "episode_index": 0, + "task": "test", + }, + } + + result = processor(transition) + obs = result[TransitionKey.OBSERVATION] + sparse_targets = obs["sparse_targets"][0] # (13,) + dense_targets = obs["dense_targets"][0] # (13,) + + # First 9 frames are observation frames, last 4 are rewind placeholders (zeros when no rewind) + # Check that obs frames have non-zero targets + obs_sparse = sparse_targets[:9] + obs_dense = dense_targets[:9] + + # Verify targets are monotonically increasing for observation frames + for i in range(1, 9): + assert obs_sparse[i] >= obs_sparse[i - 1], ( + f"Sparse targets should be monotonic: {obs_sparse[i - 1].item():.3f} -> {obs_sparse[i].item():.3f}" + ) + assert obs_dense[i] >= obs_dense[i - 1], ( + f"Dense targets should be monotonic: {obs_dense[i - 1].item():.3f} -> {obs_dense[i].item():.3f}" + ) + + # Rewind slots should be zero when rewind is disabled + rewind_targets = sparse_targets[9:] + assert (rewind_targets == 0).all(), "Rewind slots should be zero when rewind is disabled" + + # Check stage transitions: frame 50 is at boundary of sparse stage A->B + # Center frame (index 4) corresponds to actual frame 50 + center_sparse = obs_sparse[4].item() + # At frame 50, sparse stage B starts, so target should be ~1.0 (stage 1 + tau 0) + assert 0.9 <= center_sparse <= 1.1, ( + f"At sparse boundary, target should be ~1.0, got {center_sparse:.3f}" + ) + + def test_rewind_augmentation_applied(self, mock_clip_model): + """Test that rewind augmentation correctly extends sequence and generates targets.""" + import random + + from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + + config = MockConfig( + n_obs_steps=8, + max_rewind_steps=4, + frame_gap=10, + rewind_probability=1.0, # Always apply rewind + language_perturbation_probability=0.0, + annotation_mode="dual", + sparse_subtask_names=["A", "B"], + sparse_temporal_proportions=[0.5, 0.5], + dense_subtask_names=["d1", "d2"], + dense_temporal_proportions=[0.5, 0.5], + ) + + episodes = [ + { + "dataset_from_index": 0, + "dataset_to_index": 200, + "task": "test", + "sparse_subtask_names": ["A", "B"], + "sparse_subtask_start_frames": [0, 100], + "sparse_subtask_end_frames": [100, 200], + "dense_subtask_names": ["d1", "d2"], + "dense_subtask_start_frames": [0, 100], + "dense_subtask_end_frames": [100, 200], + } + ] + dataset_meta = MockDatasetMeta(episodes) + + processor = SARMEncodingProcessorStep(config=config, dataset_meta=dataset_meta) + processor.train(True) + + num_frames = config.num_frames # 13 + + # Test at frame 150 (center of bidirectional window) + # With n_obs_steps=8, half_steps=4, frame_gap=10: + # - Earliest obs frame = 150 - 4*10 = 110 + # - Rewind can go back from 110 to frames like 100, 90, 80, 70 + # - History available = 110 - 0 = 110, so max rewind = 110/10 = 11 (capped at 4) + dummy_image = np.random.rand(num_frames, 3, 224, 224).astype(np.float32) + dummy_state = np.random.rand(num_frames, 6).astype(np.float32) + + transition = { + TransitionKey.OBSERVATION: { + config.image_key: dummy_image, + config.state_key: dummy_state, + }, + TransitionKey.COMPLEMENTARY_DATA: { + "index": 150, + "episode_index": 0, + "task": "test", + }, + } + + # Seed random for reproducibility + random.seed(42) + result = processor(transition) + obs = result[TransitionKey.OBSERVATION] + + lengths = obs["lengths"][0].item() + sparse_targets = obs["sparse_targets"][0] + + # With rewind_probability=1.0 and enough history, lengths should be > 9 (9 obs + some rewind) + assert lengths > 9, f"With rewind enabled, lengths should be > 9, got {lengths}" + assert lengths <= num_frames, f"Lengths should not exceed total frames {num_frames}, got {lengths}" + + # Rewind targets should be non-zero for frames within valid length + n_obs_frames = 9 + rewind_count = lengths - n_obs_frames + + if rewind_count > 0: + # Check that rewind frames have targets + rewind_targets = sparse_targets[n_obs_frames : n_obs_frames + rewind_count] + # Rewind frames are from BEFORE the earliest obs frame (110) + # These frames (100, 90, 80, 70) are earlier in the episode + earliest_obs_target = sparse_targets[0].item() # Frame 110 + + # Rewind targets should be less than earliest obs (they're from earlier frames) + for i, rt in enumerate(rewind_targets): + assert rt.item() < earliest_obs_target, ( + f"Rewind target {i} ({rt.item():.3f}) should be < earliest obs ({earliest_obs_target:.3f})" + ) + + # Rewind targets should be decreasing (going further back in time) + for i in range(1, len(rewind_targets)): + assert rewind_targets[i] <= rewind_targets[i - 1], ( + f"Rewind targets should decrease: {rewind_targets[i - 1].item():.3f} -> {rewind_targets[i].item():.3f}" + ) + + def test_full_sequence_target_consistency(self, mock_clip_model): + """Test that the full sequence of targets is consistent with frame positions.""" + from lerobot.policies.sarm.processor_sarm import SARMEncodingProcessorStep + from lerobot.policies.sarm.sarm_utils import find_stage_and_tau + + config = MockConfig( + n_obs_steps=8, + max_rewind_steps=4, + frame_gap=10, + rewind_probability=0.0, + language_perturbation_probability=0.0, + annotation_mode="dual", + sparse_subtask_names=["s1", "s2", "s3"], + sparse_temporal_proportions=[0.33, 0.34, 0.33], + dense_subtask_names=["d1", "d2"], + dense_temporal_proportions=[0.5, 0.5], + ) + + # 3 sparse stages: [0-33), [33-66), [66-99] + # 2 dense stages: [0-50), [50-100) + episodes = [ + { + "dataset_from_index": 0, + "dataset_to_index": 100, + "task": "test", + "sparse_subtask_names": ["s1", "s2", "s3"], + "sparse_subtask_start_frames": [0, 33, 66], + "sparse_subtask_end_frames": [33, 66, 100], + "dense_subtask_names": ["d1", "d2"], + "dense_subtask_start_frames": [0, 50], + "dense_subtask_end_frames": [50, 100], + } + ] + dataset_meta = MockDatasetMeta(episodes) + + processor = SARMEncodingProcessorStep(config=config, dataset_meta=dataset_meta) + processor.train(True) + + num_frames = config.num_frames + + # Test at frame 50 (middle of episode) + dummy_image = np.random.rand(num_frames, 3, 224, 224).astype(np.float32) + dummy_state = np.random.rand(num_frames, 6).astype(np.float32) + + transition = { + TransitionKey.OBSERVATION: { + config.image_key: dummy_image, + config.state_key: dummy_state, + }, + TransitionKey.COMPLEMENTARY_DATA: { + "index": 50, + "episode_index": 0, + "task": "test", + }, + } + + result = processor(transition) + obs = result[TransitionKey.OBSERVATION] + sparse_targets = obs["sparse_targets"][0] + dense_targets = obs["dense_targets"][0] + + # Manually compute expected targets for observation frames + # With frame_gap=10, n_obs_steps=8, center at 50: + # obs frames: [10, 20, 30, 40, 50, 60, 70, 80, 90] + expected_obs_frames = [10, 20, 30, 40, 50, 60, 70, 80, 90] + + sparse_names = ["s1", "s2", "s3"] + sparse_starts = [0, 33, 66] + sparse_ends = [33, 66, 100] + sparse_props = {"s1": 0.33, "s2": 0.34, "s3": 0.33} + + dense_names = ["d1", "d2"] + dense_starts = [0, 50] + dense_ends = [50, 100] + dense_props = {"d1": 0.5, "d2": 0.5} + + for i, frame in enumerate(expected_obs_frames): + expected_sparse = find_stage_and_tau( + frame, + 100, + sparse_names, + sparse_starts, + sparse_ends, + sparse_names, + sparse_props, + return_combined=True, + ) + expected_dense = find_stage_and_tau( + frame, + 100, + dense_names, + dense_starts, + dense_ends, + dense_names, + dense_props, + return_combined=True, + ) + + actual_sparse = sparse_targets[i].item() + actual_dense = dense_targets[i].item() + + assert abs(actual_sparse - expected_sparse) < 0.01, ( + f"Frame {frame}: sparse mismatch {actual_sparse:.3f} vs expected {expected_sparse:.3f}" + ) + assert abs(actual_dense - expected_dense) < 0.01, ( + f"Frame {frame}: dense mismatch {actual_dense:.3f} vs expected {expected_dense:.3f}" + ) diff --git a/tests/policies/test_sarm_subtask_annotations.py b/tests/policies/test_sarm_subtask_annotations.py new file mode 100644 index 000000000..0dc087288 --- /dev/null +++ b/tests/policies/test_sarm_subtask_annotations.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +pytest.importorskip("transformers") + +from lerobot.data_processing.sarm_annotations.subtask_annotation import ( + Subtask, + SubtaskAnnotation, + Timestamp, + compute_temporal_proportions, +) + + +def make_annotation(subtasks: list[tuple[str, int, int]]) -> SubtaskAnnotation: + """Helper to create SubtaskAnnotation from list of (name, start_sec, end_sec).""" + return SubtaskAnnotation( + subtasks=[ + Subtask( + name=name, + timestamps=Timestamp( + start=f"{start // 60:02d}:{start % 60:02d}", end=f"{end // 60:02d}:{end % 60:02d}" + ), + ) + for name, start, end in subtasks + ] + ) + + +class TestComputeTemporalProportions: + """Tests for compute_temporal_proportions (SARM Paper Formula 1). + + Formula: αΎ±_k = (1/M) Γ— Ξ£_i (L_{i,k} / T_i) + + Key insight: This averages the PROPORTION of each subtask within each trajectory, + giving equal weight to all trajectories regardless of absolute length. + """ + + def test_basic_two_trajectories_equal_proportions(self): + """Test with two trajectories that have equal proportions.""" + # Both trajectories: subtask1 = 50%, subtask2 = 50% + # Traj 1: T=100s, subtask1=50s, subtask2=50s + # Traj 2: T=200s, subtask1=100s, subtask2=100s + annotations = { + 0: make_annotation([("subtask1", 0, 50), ("subtask2", 50, 100)]), + 1: make_annotation([("subtask1", 0, 100), ("subtask2", 100, 200)]), + } + + result = compute_temporal_proportions(annotations) + + # Both should be 0.5 + assert abs(result["subtask1"] - 0.5) < 1e-6 + assert abs(result["subtask2"] - 0.5) < 1e-6 + + def test_paper_example_different_from_avg_durations(self): + """Test that compute_temporal_proportions differs from naive average duration approach. + + This is the key test showing the difference between: + - Paper formula: average of (L_i,k / T_i) + - Naive approach: mean(L_i,k) / sum(mean(L_i,j)) + """ + # Episode 1: T=100s, subtask1=80s, subtask2=20s (proportions: 0.8, 0.2) + # Episode 2: T=200s, subtask1=40s, subtask2=160s (proportions: 0.2, 0.8) + annotations = { + 0: make_annotation([("subtask1", 0, 80), ("subtask2", 80, 100)]), + 1: make_annotation([("subtask1", 0, 40), ("subtask2", 40, 200)]), + } + + result = compute_temporal_proportions(annotations) + + # Paper formula: + # αΎ±_1 = (1/2) Γ— (80/100 + 40/200) = (1/2) Γ— (0.8 + 0.2) = 0.5 + # αΎ±_2 = (1/2) Γ— (20/100 + 160/200) = (1/2) Γ— (0.2 + 0.8) = 0.5 + assert abs(result["subtask1"] - 0.5) < 1e-6 + assert abs(result["subtask2"] - 0.5) < 1e-6 + + def test_single_trajectory(self): + """Test with a single trajectory.""" + # T=100s, reach=30s, grasp=20s, lift=50s + annotations = { + 0: make_annotation([("reach", 0, 30), ("grasp", 30, 50), ("lift", 50, 100)]), + } + + result = compute_temporal_proportions(annotations) + + assert abs(result["reach"] - 0.3) < 1e-6 + assert abs(result["grasp"] - 0.2) < 1e-6 + assert abs(result["lift"] - 0.5) < 1e-6 + + def test_sum_to_one(self): + """Test that proportions always sum to 1.""" + # Three episodes with varying proportions + annotations = { + 0: make_annotation([("a", 0, 10), ("b", 10, 50), ("c", 50, 100)]), # 0.1, 0.4, 0.5 + 1: make_annotation([("a", 0, 20), ("b", 20, 70), ("c", 70, 100)]), # 0.2, 0.5, 0.3 + 2: make_annotation([("a", 0, 30), ("b", 30, 90), ("c", 90, 100)]), # 0.3, 0.6, 0.1 + } + + result = compute_temporal_proportions(annotations) + + total = sum(result.values()) + assert abs(total - 1.0) < 1e-6 + + def test_empty_annotations_returns_empty(self): + """Test that empty annotations returns empty dict.""" + result = compute_temporal_proportions({}) + assert result == {} + + def test_uniform_proportions(self): + """Test with uniform proportions across subtasks.""" + # Each subtask takes 25% of each episode + annotations = { + 0: make_annotation([("a", 0, 25), ("b", 25, 50), ("c", 50, 75), ("d", 75, 100)]), + 1: make_annotation([("a", 0, 50), ("b", 50, 100), ("c", 100, 150), ("d", 150, 200)]), + } + + result = compute_temporal_proportions(annotations) + + for name in ["a", "b", "c", "d"]: + assert abs(result[name] - 0.25) < 1e-6 diff --git a/tests/policies/test_sarm_utils.py b/tests/policies/test_sarm_utils.py new file mode 100644 index 000000000..510477ec8 --- /dev/null +++ b/tests/policies/test_sarm_utils.py @@ -0,0 +1,615 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import pytest +import torch + +from lerobot.policies.sarm.sarm_utils import ( + apply_rewind_augmentation, + compute_absolute_indices, + compute_tau, + find_stage_and_tau, + normalize_stage_tau, + temporal_proportions_to_breakpoints, +) + + +class TestProgressLabelsWithModes: + """End-to-end tests for progress label generation in different modes.""" + + def test_sparse_mode_single_stage(self): + """Sparse mode with single stage should give linear progress.""" + episode_length = 300 + global_names = ["task"] + proportions = {"task": 1.0} + + # Test at various frames + for frame in [0, 100, 200, 299]: + stage, tau = find_stage_and_tau( + frame, episode_length, None, None, None, global_names, proportions + ) + + expected_tau = frame / (episode_length - 1) + assert stage == 0 + assert abs(tau - expected_tau) < 1e-5 + + def test_sparse_mode_multi_stage(self): + """Sparse mode with multiple stages.""" + global_names = ["reach", "grasp", "lift", "place"] + proportions = {"reach": 0.2, "grasp": 0.2, "lift": 0.3, "place": 0.3} + + subtask_names = ["reach", "grasp", "lift", "place"] + subtask_starts = [0, 60, 120, 210] + subtask_ends = [59, 119, 209, 299] + + # Check stages are correctly identified + stage_at_30, _ = find_stage_and_tau( + 30, 300, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage_at_30 == 0 + + stage_at_90, _ = find_stage_and_tau( + 90, 300, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage_at_90 == 1 + + stage_at_150, _ = find_stage_and_tau( + 150, 300, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage_at_150 == 2 + + def test_dense_mode_more_stages(self): + """Dense mode should work with more fine-grained stages.""" + global_names = ["a", "b", "c", "d", "e", "f", "g", "h"] + proportions = dict.fromkeys(global_names, 1 / 8) + + subtask_names = global_names + subtask_starts = [i * 50 for i in range(8)] + subtask_ends = [(i + 1) * 50 - 1 for i in range(8)] + + # Each stage should occupy 50 frames + for stage_idx in range(8): + mid_frame = stage_idx * 50 + 25 + stage, _ = find_stage_and_tau( + mid_frame, 400, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage == stage_idx + + +class TestComputeAbsoluteIndices: + """Tests for compute_absolute_indices (bidirectional sampling).""" + + def test_no_clamping_when_in_middle(self): + """When frame is in middle of episode, no clamping should occur.""" + frame_idx = 300 + ep_start = 0 + ep_end = 1000 + n_obs_steps = 8 + frame_gap = 30 + + indices, out_of_bounds = compute_absolute_indices(frame_idx, ep_start, ep_end, n_obs_steps, frame_gap) + + # All should be valid (no out of bounds) + assert out_of_bounds.sum() == 0 + + # Check bidirectional indices: [-120, -90, -60, -30, 0, 30, 60, 90, 120] from center + half_steps = n_obs_steps // 2 + expected = ( + [frame_idx - frame_gap * i for i in range(half_steps, 0, -1)] + + [frame_idx] + + [frame_idx + frame_gap * i for i in range(1, half_steps + 1)] + ) + assert indices.tolist() == expected + + # Center frame (index 4) should be frame_idx + assert indices[half_steps] == frame_idx + + def test_clamping_at_episode_start(self): + """Early frames should be clamped to episode start.""" + frame_idx = 50 # Not enough history for full past window + ep_start = 0 + ep_end = 1000 + n_obs_steps = 8 + frame_gap = 30 + + indices, out_of_bounds = compute_absolute_indices(frame_idx, ep_start, ep_end, n_obs_steps, frame_gap) + + # Some past frames should be clamped (out_of_bounds = 1) + assert out_of_bounds.sum() > 0 + + # All indices should be >= ep_start + assert (indices >= ep_start).all() + + # Center index should be frame_idx + half_steps = n_obs_steps // 2 + assert indices[half_steps] == frame_idx + + def test_clamping_at_episode_end(self): + """Late frames should be clamped to episode end.""" + frame_idx = 950 # Not enough future for full window + ep_start = 0 + ep_end = 1000 + n_obs_steps = 8 + frame_gap = 30 + + indices, out_of_bounds = compute_absolute_indices(frame_idx, ep_start, ep_end, n_obs_steps, frame_gap) + + # Some future frames should be clamped + assert out_of_bounds.sum() > 0 + + # All indices should be < ep_end + assert (indices < ep_end).all() + + # Center index should be frame_idx + half_steps = n_obs_steps // 2 + assert indices[half_steps] == frame_idx + + def test_sequence_is_monotonic(self): + """Frame indices should be monotonically increasing.""" + for frame_idx in [50, 100, 300, 950]: + indices, _ = compute_absolute_indices(frame_idx, 0, 1000, 8, 30) + + # Check monotonic (non-decreasing due to clamping) + diffs = indices[1:] - indices[:-1] + assert (diffs >= 0).all(), f"Non-monotonic at frame {frame_idx}" + + +class TestComputeTau: + """Tests for compute_tau (within-subtask progress). + + Formula: Ο„_t = (t - s_k) / (e_k - s_k) ∈ [0, 1] + """ + + def test_at_start(self): + """Ο„ should be 0 at subtask start.""" + tau = compute_tau(current_frame=10, subtask_start=10, subtask_end=50) + assert tau == 0.0 + + def test_at_end(self): + """Ο„ should be 1 at subtask end.""" + tau = compute_tau(current_frame=50, subtask_start=10, subtask_end=50) + assert tau == 1.0 + + def test_at_middle(self): + """Ο„ should be 0.5 at subtask midpoint.""" + tau = compute_tau(current_frame=30, subtask_start=10, subtask_end=50) + assert abs(tau - 0.5) < 1e-6 + + def test_quarter_progress(self): + """Test Ο„ at 25% through subtask.""" + tau = compute_tau(current_frame=20, subtask_start=0, subtask_end=80) + assert abs(tau - 0.25) < 1e-6 + + def test_zero_duration_subtask(self): + """Ο„ should be 1.0 for zero-duration subtask.""" + tau = compute_tau(current_frame=10, subtask_start=10, subtask_end=10) + assert tau == 1.0 + + def test_clamps_below_zero(self): + """Ο„ should be clamped to 0 if frame is before subtask.""" + tau = compute_tau(current_frame=5, subtask_start=10, subtask_end=50) + assert tau == 0.0 + + def test_clamps_above_one(self): + """Ο„ should be clamped to 1 if frame is after subtask.""" + tau = compute_tau(current_frame=60, subtask_start=10, subtask_end=50) + assert tau == 1.0 + + def test_float_inputs(self): + """Test with float frame indices (from interpolation).""" + tau = compute_tau(current_frame=25.5, subtask_start=10.0, subtask_end=50.0) + expected = (25.5 - 10.0) / (50.0 - 10.0) + assert abs(tau - expected) < 1e-6 + + +class TestFindStageAndTau: + """Tests for find_stage_and_tau logic. + + This function is the core of progress label computation. It determines + which stage a frame belongs to and the within-stage progress (tau). + """ + + def test_single_stage_mode_linear_progress(self): + """Single-stage mode should give linear progress from 0 to 1.""" + episode_length = 100 + + # Frame 0 -> tau = 0 + stage, tau = find_stage_and_tau(0, episode_length, None, None, None, ["task"], {"task": 1.0}) + assert stage == 0 + assert abs(tau - 0.0) < 1e-6 + + # Frame 50 -> tau = 0.505 (50/99) + stage, tau = find_stage_and_tau(50, episode_length, None, None, None, ["task"], {"task": 1.0}) + assert stage == 0 + assert abs(tau - 50 / 99) < 1e-6 + + # Frame 99 -> tau = 1.0 + stage, tau = find_stage_and_tau(99, episode_length, None, None, None, ["task"], {"task": 1.0}) + assert stage == 0 + assert abs(tau - 1.0) < 1e-6 + + def test_multi_stage_within_subtask(self): + """Test finding stage when frame is within a subtask.""" + global_names = ["reach", "grasp", "lift"] + proportions = {"reach": 0.3, "grasp": 0.2, "lift": 0.5} + + subtask_names = ["reach", "grasp", "lift"] + subtask_starts = [0, 30, 50] + subtask_ends = [29, 49, 99] + + # Frame 15 in "reach" stage (index 0) + stage, tau = find_stage_and_tau( + 15, 100, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage == 0 + assert abs(tau - 15 / 29) < 1e-6 + + # Frame 40 in "grasp" stage (index 1) + stage, tau = find_stage_and_tau( + 40, 100, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage == 1 + # tau = (40 - 30) / (49 - 30) = 10/19 + assert abs(tau - 10 / 19) < 1e-6 + + # Frame 75 in "lift" stage (index 2) + stage, tau = find_stage_and_tau( + 75, 100, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage == 2 + # tau = (75 - 50) / (99 - 50) = 25/49 + assert abs(tau - 25 / 49) < 1e-6 + + def test_frame_at_subtask_boundaries(self): + """Test frames exactly at subtask boundaries.""" + global_names = ["a", "b"] + proportions = {"a": 0.5, "b": 0.5} + + subtask_names = ["a", "b"] + subtask_starts = [0, 50] + subtask_ends = [49, 99] + + # Frame at start of first subtask + stage, tau = find_stage_and_tau( + 0, 100, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage == 0 + assert tau == 0.0 + + # Frame at end of first subtask + stage, tau = find_stage_and_tau( + 49, 100, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage == 0 + assert tau == 1.0 + + # Frame at start of second subtask + stage, tau = find_stage_and_tau( + 50, 100, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage == 1 + assert tau == 0.0 + + def test_frame_after_last_subtask(self): + """Frames after last subtask should return last stage with high tau.""" + global_names = ["a", "b"] + proportions = {"a": 0.5, "b": 0.5} + + subtask_names = ["a", "b"] + subtask_starts = [0, 30] + subtask_ends = [29, 59] + + # Frame 80 is after last subtask + stage, tau = find_stage_and_tau( + 80, 100, subtask_names, subtask_starts, subtask_ends, global_names, proportions + ) + assert stage == 1 # Last stage + assert tau == 0.999 # Nearly complete + + +class TestEndToEndProgressLabeling: + """End-to-end tests for progress label computation using normalize_stage_tau.""" + + def test_consistent_semantic_meaning(self): + """Test that same subtask completion maps to same progress across trajectories. + + This is the key semantic property: "end of subtask 1" should always + mean the same progress value regardless of trajectory speed. + """ + proportions = [0.3, 0.5, 0.2] + + # Fast trajectory: subtask 1 ends at frame 30 (of 100) + tau_fast = compute_tau(30, 0, 30) # = 1.0 + y_fast = normalize_stage_tau(0 + tau_fast, temporal_proportions=proportions) + + # Slow trajectory: subtask 1 ends at frame 90 (of 300) + tau_slow = compute_tau(90, 0, 90) # = 1.0 + y_slow = normalize_stage_tau(0 + tau_slow, temporal_proportions=proportions) + + # Both should map to same progress (0.3 = end of subtask 1) + assert abs(y_fast - y_slow) < 1e-6 + assert abs(y_fast - 0.3) < 1e-6 + + def test_monotonic_within_subtask(self): + """Test that progress is monotonically increasing within a subtask.""" + proportions = [0.4, 0.6] + + prev_y = -1 + for tau in np.linspace(0, 1, 11): + y = normalize_stage_tau(0 + tau, temporal_proportions=proportions) + assert y > prev_y or (tau == 0 and y == 0) + prev_y = y + + def test_continuous_across_subtasks(self): + """Test that progress is continuous at subtask boundaries.""" + proportions = [0.3, 0.5, 0.2] + + # End of subtask 0 (stage=0, tau=1.0) -> stage.tau = 1.0 + y_end_0 = normalize_stage_tau(0 + 1.0, temporal_proportions=proportions) + + # Start of subtask 1 (stage=1, tau=0.0) -> stage.tau = 1.0 + y_start_1 = normalize_stage_tau(1 + 0.0, temporal_proportions=proportions) + + # Should be equal (P_1 = 0.3) + assert abs(y_end_0 - y_start_1) < 1e-6 + + # End of subtask 1 (stage=1, tau=1.0) -> stage.tau = 2.0 + y_end_1 = normalize_stage_tau(1 + 1.0, temporal_proportions=proportions) + + # Start of subtask 2 (stage=2, tau=0.0) -> stage.tau = 2.0 + y_start_2 = normalize_stage_tau(2 + 0.0, temporal_proportions=proportions) + + # Should be equal (P_2 = 0.8) + assert abs(y_end_1 - y_start_2) < 1e-6 + + +class TestTemporalProportionsToBreakpoints: + """Tests for temporal_proportions_to_breakpoints. + + Converts temporal proportions to cumulative breakpoints for normalization. + Example: [0.3, 0.5, 0.2] -> [0.0, 0.3, 0.8, 1.0] + """ + + def test_basic_conversion(self): + """Test basic conversion from proportions to breakpoints.""" + proportions = [0.3, 0.5, 0.2] + breakpoints = temporal_proportions_to_breakpoints(proportions) + + assert breakpoints is not None + assert len(breakpoints) == 4 + assert breakpoints[0] == 0.0 + assert abs(breakpoints[1] - 0.3) < 1e-6 + assert abs(breakpoints[2] - 0.8) < 1e-6 + assert breakpoints[3] == 1.0 + + def test_dict_input(self): + """Test with dict input.""" + proportions = {"a": 0.25, "b": 0.25, "c": 0.5} + breakpoints = temporal_proportions_to_breakpoints(proportions) + + assert breakpoints is not None + assert len(breakpoints) == 4 + assert breakpoints[0] == 0.0 + assert breakpoints[-1] == 1.0 + + def test_dict_with_subtask_names_order(self): + """Test that subtask_names determines order for dict input.""" + proportions = {"c": 0.5, "a": 0.2, "b": 0.3} # Dict order + subtask_names = ["a", "b", "c"] # Different order + + breakpoints = temporal_proportions_to_breakpoints(proportions, subtask_names) + + # Breakpoints should follow subtask_names order: a=0.2, b=0.3, c=0.5 + assert abs(breakpoints[1] - 0.2) < 1e-6 # a + assert abs(breakpoints[2] - 0.5) < 1e-6 # a + b = 0.5 + assert breakpoints[3] == 1.0 # a + b + c = 1.0 + + def test_uniform_proportions(self): + """Test with uniform proportions.""" + proportions = [0.25, 0.25, 0.25, 0.25] + breakpoints = temporal_proportions_to_breakpoints(proportions) + + expected = [0.0, 0.25, 0.5, 0.75, 1.0] + for i, (bp, exp) in enumerate(zip(breakpoints, expected, strict=True)): + assert abs(bp - exp) < 1e-6, f"Breakpoint {i} mismatch" + + def test_none_input(self): + """Test that None input returns None.""" + result = temporal_proportions_to_breakpoints(None) + assert result is None + + def test_normalization(self): + """Test that non-normalized proportions are normalized.""" + # Proportions sum to 2.0, not 1.0 + proportions = [0.6, 1.0, 0.4] + breakpoints = temporal_proportions_to_breakpoints(proportions) + + # Should be normalized: [0.3, 0.5, 0.2] -> [0, 0.3, 0.8, 1.0] + assert breakpoints[-1] == 1.0 + assert abs(breakpoints[1] - 0.3) < 1e-6 + + +class TestNormalizeStageTau: + """Tests for normalize_stage_tau. + + Normalizes stage+tau values to [0, 1] using breakpoints. + """ + + def test_linear_fallback(self): + """Test linear normalization when only num_stages is provided.""" + # 4 stages, linear: [0, 0.25, 0.5, 0.75, 1.0] + + # Stage 0 start + assert normalize_stage_tau(0.0, num_stages=4) == 0.0 + + # Stage 0 end / Stage 1 start + assert abs(normalize_stage_tau(1.0, num_stages=4) - 0.25) < 1e-6 + + # Stage 1 middle + assert abs(normalize_stage_tau(1.5, num_stages=4) - 0.375) < 1e-6 + + # Stage 3 end + assert normalize_stage_tau(4.0, num_stages=4) == 1.0 + + def test_with_custom_breakpoints(self): + """Test with custom breakpoints.""" + # Non-linear breakpoints + breakpoints = [0.0, 0.1, 0.5, 1.0] # 3 stages + + # Stage 0: maps [0, 1) to [0.0, 0.1) + assert abs(normalize_stage_tau(0.5, breakpoints=breakpoints) - 0.05) < 1e-6 + + # Stage 1: maps [1, 2) to [0.1, 0.5) + assert abs(normalize_stage_tau(1.5, breakpoints=breakpoints) - 0.3) < 1e-6 + + # Stage 2: maps [2, 3) to [0.5, 1.0) + assert abs(normalize_stage_tau(2.5, breakpoints=breakpoints) - 0.75) < 1e-6 + + def test_with_temporal_proportions(self): + """Test with temporal proportions (auto-computed breakpoints).""" + proportions = {"a": 0.2, "b": 0.3, "c": 0.5} + subtask_names = ["a", "b", "c"] + + # Stage 0 end should map to 0.2 + result = normalize_stage_tau(1.0, temporal_proportions=proportions, subtask_names=subtask_names) + assert abs(result - 0.2) < 1e-6 + + # Stage 1 end should map to 0.5 + result = normalize_stage_tau(2.0, temporal_proportions=proportions, subtask_names=subtask_names) + assert abs(result - 0.5) < 1e-6 + + def test_tensor_input(self): + """Test with tensor input.""" + x = torch.tensor([0.0, 0.5, 1.0, 1.5, 2.0]) + breakpoints = [0.0, 0.3, 0.8, 1.0] # 3 stages + + result = normalize_stage_tau(x, breakpoints=breakpoints) + + assert isinstance(result, torch.Tensor) + assert result.shape == x.shape + assert abs(result[0].item() - 0.0) < 1e-6 + assert abs(result[2].item() - 0.3) < 1e-6 # End of stage 0 + assert abs(result[4].item() - 0.8) < 1e-6 # End of stage 1 + + def test_clamping(self): + """Test that output is clamped to [0, 1].""" + # Below 0 + assert normalize_stage_tau(-0.5, num_stages=4) == 0.0 + + # Above num_stages + assert normalize_stage_tau(5.0, num_stages=4) == 1.0 + + def test_batch_tensor(self): + """Test with batched tensor.""" + x = torch.tensor([[0.0, 1.0, 2.0], [0.5, 1.5, 2.5]]) # (2, 3) + + result = normalize_stage_tau(x, num_stages=3) + + assert result.shape == (2, 3) + assert (result >= 0).all() + assert (result <= 1).all() + + def test_requires_one_of_inputs(self): + """Test that at least one input method is required.""" + with pytest.raises(ValueError): + normalize_stage_tau(1.0) + + +class TestRewindAugmentation: + """Tests for rewind augmentation logic with bidirectional observation sampling. + + Rewind appends frames before the earliest observation frame, going backwards. + With bidirectional sampling centered at frame_idx: + - Earliest obs frame = frame_idx - half_steps * frame_gap + - Rewind goes backwards from that point + """ + + def test_rewind_indices_go_backwards_from_earliest_obs(self): + """Rewind indices should go backwards from earliest observation frame.""" + frame_idx = 300 # Center of bidirectional window + ep_start = 0 + n_obs_steps = 4 # half_steps = 2 + frame_gap = 30 + + # Earliest obs frame = 300 - 2*30 = 240 + # Rewind goes backwards: 210, 180 + rewind_step, rewind_indices = apply_rewind_augmentation( + frame_idx, + ep_start, + n_obs_steps=n_obs_steps, + max_rewind_steps=2, + frame_gap=frame_gap, + rewind_step=2, + ) + + assert rewind_step == 2 + assert len(rewind_indices) == 2 + # First rewind frame is closest to obs window, second is further back + assert rewind_indices[0] == 210 # 240 - 30 + assert rewind_indices[1] == 180 # 240 - 60 + assert rewind_indices[0] > rewind_indices[1], "Rewind should be descending" + + def test_rewind_goes_backward_through_history(self): + """Rewind frames should go backward before the observation window.""" + frame_idx = 450 # Center of bidirectional window + ep_start = 0 + n_obs_steps = 8 # half_steps = 4 + frame_gap = 30 + + # Earliest obs frame = 450 - 4*30 = 330 + # Rewind from 330: [300, 270, 240] + rewind_step, rewind_indices = apply_rewind_augmentation( + frame_idx, + ep_start, + n_obs_steps=n_obs_steps, + max_rewind_steps=4, + frame_gap=frame_gap, + rewind_step=3, + ) + + assert rewind_step == 3 + expected = [300, 270, 240] # Going backwards from 330 + assert rewind_indices == expected + + def test_no_rewind_when_obs_window_at_episode_start(self): + """No rewind when observation window reaches episode start.""" + frame_idx = 120 # Center of window + ep_start = 0 + n_obs_steps = 8 # half_steps = 4 + frame_gap = 30 + + # Earliest obs frame = 120 - 4*30 = 0 (at episode start) + rewind_step, rewind_indices = apply_rewind_augmentation( + frame_idx, ep_start, n_obs_steps=n_obs_steps, max_rewind_steps=4, frame_gap=frame_gap + ) + + # No room for rewind + assert rewind_step == 0 + assert rewind_indices == [] + + def test_rewind_targets_are_decreasing(self): + """Progress targets for rewind frames should be decreasing.""" + # Simulate progress values + obs_progress = [0.1, 0.2, 0.3, 0.4, 0.5] # Forward progress + + # Rewind reverses progress + rewind_indices = [4, 3, 2] # Go backwards through indices + rewind_progress = [obs_progress[i] for i in rewind_indices] + + # Should be decreasing + for i in range(len(rewind_progress) - 1): + assert rewind_progress[i] > rewind_progress[i + 1] From 0bd1969d0aea605df5a35ed2cdd6f11224eb5a4f Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Thu, 18 Dec 2025 19:45:13 +0100 Subject: [PATCH 12/12] feat(docs): modernize readme (#2660) --- README.md | 393 +++++------------- media/gym/aloha_act.gif | Bin 3027981 -> 0 bytes media/gym/pusht_diffusion.gif | Bin 189800 -> 0 bytes media/gym/simxarm_tdmpc.gif | Bin 475007 -> 0 bytes media/hope_jr/hopejr.png | Bin 73277 -> 0 bytes media/lekiwi/kiwi.webp | Bin 224412 -> 0 bytes media/lerobot-logo-light.png | Bin 204038 -> 0 bytes media/readme/VLA_architecture.jpg | Bin 0 -> 792640 bytes media/{ => readme}/lerobot-logo-thumbnail.png | Bin media/readme/robots_control_video.webp | Bin 0 -> 2425850 bytes media/readme/so100_video.webp | Bin 0 -> 492230 bytes media/so100/leader_follower.webp | Bin 120188 -> 0 bytes media/so101/so101-leader.webp | Bin 154650 -> 0 bytes media/so101/so101.webp | Bin 133522 -> 0 bytes media/wandb.png | Bin 416489 -> 0 bytes 15 files changed, 103 insertions(+), 290 deletions(-) delete mode 100644 media/gym/aloha_act.gif delete mode 100644 media/gym/pusht_diffusion.gif delete mode 100644 media/gym/simxarm_tdmpc.gif delete mode 100644 media/hope_jr/hopejr.png delete mode 100644 media/lekiwi/kiwi.webp delete mode 100644 media/lerobot-logo-light.png create mode 100644 media/readme/VLA_architecture.jpg rename media/{ => readme}/lerobot-logo-thumbnail.png (100%) create mode 100644 media/readme/robots_control_video.webp create mode 100644 media/readme/so100_video.webp delete mode 100644 media/so100/leader_follower.webp delete mode 100644 media/so101/so101-leader.webp delete mode 100644 media/so101/so101.webp delete mode 100644 media/wandb.png diff --git a/README.md b/README.md index 964af4c1d..f4c2a8406 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,5 @@

- LeRobot, Hugging Face Robotics Library -
-
+ LeRobot, Hugging Face Robotics Library

@@ -12,323 +10,130 @@ [![Status](https://img.shields.io/pypi/status/lerobot)](https://pypi.org/project/lerobot/) [![Version](https://img.shields.io/pypi/v/lerobot)](https://pypi.org/project/lerobot/) [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.1-ff69b4.svg)](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md) -[![Discord](https://dcbadge.vercel.app/api/server/C5P34WJ68S?style=flat)](https://discord.gg/s3KuuzsPFb) - -
-

-

- Build Your Own HopeJR Robot!

-

+**LeRobot** aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry so that everyone can contribute to and benefit from shared datasets and pretrained models. -
- HopeJR robot +πŸ€— A hardware-agnostic, Python-native interface that standardizes control across diverse platforms, from low-cost arms (SO-100) to humanoids. -

Meet HopeJR – A humanoid robot arm and hand for dexterous manipulation!

-

Control it with exoskeletons and gloves for precise hand movements.

-

Perfect for advanced manipulation tasks! πŸ€–

+πŸ€— A standardized, scalable LeRobotDataset format (Parquet + MP4 or images) hosted on the Hugging Face Hub, enabling efficient storage, streaming and visualization of massive robotic datasets. -

- See the full HopeJR tutorial here.

-
+πŸ€— State-of-the-art policies that have been shown to transfer to the real-world ready for training and deployment. -
+πŸ€— Comprehensive support for the open-source ecosystem to democratize physical AI. -

-

- Build Your Own SO-101 Robot!

-

+## Quick Start -
- - - - - -
SO-101 follower armSO-101 leader arm
- -

Meet the updated SO100, the SO-101 – Just €114 per arm!

-

Train it in minutes with a few simple moves on your laptop.

-

Then sit back and watch your creation act autonomously! 🀯

- -

- See the full SO-101 tutorial here.

- -

Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!

-

Check out the LeKiwi tutorial and bring your robot to life on wheels.

- - LeKiwi mobile robot -
- -
- -

-

LeRobot: State-of-the-art AI for real-world robotics

-

- ---- - -πŸ€— LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. - -πŸ€— LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning. - -πŸ€— LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulation environments to get started without assembling a robot. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there. - -πŸ€— LeRobot hosts pretrained models and datasets on this Hugging Face community page: [huggingface.co/lerobot](https://huggingface.co/lerobot) - -#### Examples of pretrained models on simulation environments - - - - - - - - - - - - -
ACT policy on ALOHA envTDMPC policy on SimXArm envDiffusion policy on PushT env
ACT policy on ALOHA envTDMPC policy on SimXArm envDiffusion policy on PushT env
- -## Installation - -LeRobot works with Python 3.10+ and PyTorch 2.2+. - -### Environment Setup - -Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniforge`](https://conda-forge.org/download/): - -```bash -conda create -y -n lerobot python=3.10 -conda activate lerobot -``` - -When using `conda`, install `ffmpeg` in your environment: - -```bash -conda install ffmpeg -c conda-forge -``` - -> **NOTE:** This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can: -> -> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using: -> -> ```bash -> conda install ffmpeg=7.1.1 -c conda-forge -> ``` -> -> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`. - -### Install LeRobot πŸ€— - -#### From Source - -First, clone the repository and navigate into the directory: - -```bash -git clone https://github.com/huggingface/lerobot.git -cd lerobot -``` - -Then, install the library in editable mode. This is useful if you plan to contribute to the code. - -```bash -pip install -e . -``` - -> **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run: -> `sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg) - -For simulations, πŸ€— LeRobot comes with gymnasium environments that can be installed as extras: - -- [aloha](https://github.com/huggingface/gym-aloha) -- [xarm](https://github.com/huggingface/gym-xarm) -- [pusht](https://github.com/huggingface/gym-pusht) - -For instance, to install πŸ€— LeRobot with aloha and pusht, use: - -```bash -pip install -e ".[aloha, pusht]" -``` - -### Installation from PyPI - -**Core Library:** -Install the base package with: +LeRobot can be installed directly from PyPI. ```bash pip install lerobot +lerobot-info ``` -_This installs only the default dependencies._ +> [!IMPORTANT] +> For detailed installation guide, please see the [Installation Documentation](https://huggingface.co/docs/lerobot/installation). -**Extra Features:** -To install additional functionality, use one of the following: +## Robots & Control + +
+ Reachy 2 Demo +
+ +LeRobot provides a unified `Robot` class interface that decouples control logic from hardware specifics. It supports a wide range of robots and teleoperation devices. + +```python +from lerobot.robots.myrobot import MyRobot + +# Connect to a robot +robot = MyRobot(config=...) +robot.connect() + +# Read observation and send action +obs = robot.get_observation() +action = model.select_action(obs) +robot.send_action(action) +``` + +**Supported Hardware:** SO100, LeKiwi, Koch, HopeJR, OMX, EarthRover, Reachy2, Gamepads, Keyboards, Phones, OpenARM, Unitree G1. + +While these devices are natively integrated into the LeRobot codebase, the library is designed to be extensible. You can easily implement the Robot interface to utilize LeRobot's data collection, training, and visualization tools for your own custom robot. + +For detailed hardware setup guides, see the [Hardware Documentation](https://huggingface.co/docs/lerobot/integrate_hardware). + +## LeRobot Dataset + +To solve the data fragmentation problem in robotics, we utilize the **LeRobotDataset** format. + +- **Structure:** Synchronized MP4 videos (or images) for vision and Parquet files for state/action data. +- **HF Hub Integration:** Explore thousands of robotics datasets on the [Hugging Face Hub](https://huggingface.co/lerobot). +- **Tools:** Seamlessly delete episodes, split by indices/fractions, add/remove features, and merge multiple datasets. + +```python +from lerobot.datasets.lerobot_dataset import LeRobotDataset + +# Load a dataset from the Hub +dataset = LeRobotDataset("lerobot/aloha_mobile_cabinet") + +# Access data (automatically handles video decoding) +episode_index=0 +print(f"{dataset[episode_index]['action'].shape=}\n") +``` + +Learn more about it in the [LeRobotDataset Documentation](https://huggingface.co/docs/lerobot/lerobot-dataset-v3) + +## SoTA Models + +LeRobot implements state-of-the-art policies in pure PyTorch, covering Imitation Learning, Reinforcement Learning, and Vision-Language-Action (VLA) models, with more coming soon. It also provides you with the tools to instrument and inspect your training process. + +

+ Gr00t Architecture +

+ +Training a policy is as simple as running a script configuration: ```bash -pip install 'lerobot[all]' # All available features -pip install 'lerobot[aloha,pusht]' # Specific features (Aloha & Pusht) -pip install 'lerobot[feetech]' # Feetech motor support +lerobot-train \ + --policy=act \ + --dataset.repo_id=lerobot/aloha_mobile_cabinet ``` -_Replace `[...]` with your desired features._ +| Category | Models | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Imitation Learning** | [ACT](./docs/source/policy_act_README.md), [Diffusion](./docs/source/policy_diffusion_README.md), [TDMPC](./docs/source/policy_tdmpc_README.md), [VQ-BeT](./docs/source/policy_vqbet_README.md) | +| **Reinforcement Learning** | [HIL-SERL](./docs/source/hilserl.mdx) & QC-FQL (coming soon) | +| **VLAs Models** | [Pi0.5](./docs/source/pi05.mdx), [GR00T N1.5](./docs/source/policy_groot_README.md), [SmolVLA](./docs/source/policy_smolvla_README.md), [XVLA](./docs/source/xvla.mdx) | -**Available Tags:** -For a full list of optional dependencies, see: -https://pypi.org/project/lerobot/ +Similarly to the hardware, you can easily implement your own policy & leverage LeRobot's data collection, training, and visualization tools, and share your model to the HF Hub -> [!NOTE] -> For lerobot 0.4.0, if you want to install pi tags, you will have to do: `pip install "lerobot[pi]@git+https://github.com/huggingface/lerobot.git"`. -> -> This will be solved in the next patch release +For detailed policy setup guides, see the [Policy Documentation](https://huggingface.co/docs/lerobot/bring_your_own_policies). -### Weights & Biases +## Inference & Evaluation -To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with +Evaluate your policies in simulation or on real hardware using the unified evaluation script. LeRobot supports standard benchmarks like **LIBERO**, **MetaWorld** and more to come. ```bash -wandb login +# Evaluate a policy on the LIBERO benchmark +lerobot-eval \ + --policy.path=lerobot/pi0_libero_finetuned \ + --env.type=libero \ + --env.task=libero_object \ + --eval.n_episodes=10 ``` -(note: you will also need to enable WandB in the configuration. See below.) +Learn how to implement your own simulation environment or benchmark and distribute it from the HF Hub by following the [EnvHub Documentation](https://huggingface.co/docs/lerobot/envhub) -### Visualize datasets +## Resources -Check out [example 1](https://github.com/huggingface/lerobot/blob/main/examples/dataset/load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub. - -You can also locally visualize episodes from a dataset on the hub by executing our script from the command line: - -```bash -lerobot-dataset-viz \ - --repo-id lerobot/pusht \ - --episode-index 0 -``` - -or from a dataset in a local folder with the `root` option and the `--mode local` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`) - -```bash -lerobot-dataset-viz \ - --repo-id lerobot/pusht \ - --root ./my_local_data_dir \ - --mode local \ - --episode-index 0 -``` - -It will open `rerun.io` and display the camera streams, robot states and actions, like this: - -https://github-production-user-asset-6210df.s3.amazonaws.com/4681518/328035972-fd46b787-b532-47e2-bb6f-fd536a55a7ed.mov?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240505T172924Z&X-Amz-Expires=300&X-Amz-Signature=d680b26c532eeaf80740f08af3320d22ad0b8a4e4da1bcc4f33142c15b509eda&X-Amz-SignedHeaders=host&actor_id=24889239&key_id=0&repo_id=748713144 - -Our script can also visualize datasets stored on a distant server. See `lerobot-dataset-viz --help` for more instructions. - -### The `LeRobotDataset` format - -A dataset in `LeRobotDataset` format is very simple to use. It can be loaded from a repository on the Hugging Face hub or a local folder simply with e.g. `dataset = LeRobotDataset("lerobot/aloha_static_coffee")` and can be indexed into like any Hugging Face and PyTorch dataset. For instance `dataset[0]` will retrieve a single temporal frame from the dataset containing observation(s) and an action as PyTorch tensors ready to be fed to a model. - -A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](https://github.com/huggingface/lerobot/blob/main/examples/dataset/load_lerobot_dataset.py) for more details on `delta_timestamps`. - -Under the hood, the `LeRobotDataset` format makes use of several ways to serialize data which can be useful to understand if you plan to work more closely with this format. We tried to make a flexible yet simple dataset format that would cover most type of features and specificities present in reinforcement learning and robotics, in simulation and in real-world, with a focus on cameras and robot states but easily extended to other types of sensory inputs as long as they can be represented by a tensor. - -Here are the important details and internal structure organization of a typical `LeRobotDataset` instantiated with `dataset = LeRobotDataset("lerobot/aloha_static_coffee")`. The exact features will change from dataset to dataset but not the main aspects: - -``` -dataset attributes: - β”œ hf_dataset: a Hugging Face dataset (backed by Arrow/parquet). Typical features example: - β”‚ β”œ observation.images.cam_high (VideoFrame): - β”‚ β”‚ VideoFrame = {'path': path to a mp4 video, 'timestamp' (float32): timestamp in the video} - β”‚ β”œ observation.state (list of float32): position of an arm joints (for instance) - β”‚ ... (more observations) - β”‚ β”œ action (list of float32): goal position of an arm joints (for instance) - β”‚ β”œ episode_index (int64): index of the episode for this sample - β”‚ β”œ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode - β”‚ β”œ timestamp (float32): timestamp in the episode - β”‚ β”œ next.done (bool): indicates the end of an episode ; True for the last frame in each episode - β”‚ β”” index (int64): general index in the whole dataset - β”œ meta: a LeRobotDatasetMetadata object containing: - β”‚ β”œ info: a dictionary of metadata on the dataset - β”‚ β”‚ β”œ codebase_version (str): this is to keep track of the codebase version the dataset was created with - β”‚ β”‚ β”œ fps (int): frame per second the dataset is recorded/synchronized to - β”‚ β”‚ β”œ features (dict): all features contained in the dataset with their shapes and types - β”‚ β”‚ β”œ total_episodes (int): total number of episodes in the dataset - β”‚ β”‚ β”œ total_frames (int): total number of frames in the dataset - β”‚ β”‚ β”œ robot_type (str): robot type used for recording - β”‚ β”‚ β”œ data_path (str): formattable string for the parquet files - β”‚ β”‚ β”” video_path (str): formattable string for the video files (if using videos) - β”‚ β”œ episodes: a DataFrame containing episode metadata with columns: - β”‚ β”‚ β”œ episode_index (int): index of the episode - β”‚ β”‚ β”œ tasks (list): list of tasks for this episode - β”‚ β”‚ β”œ length (int): number of frames in this episode - β”‚ β”‚ β”œ dataset_from_index (int): start index of this episode in the dataset - β”‚ β”‚ β”” dataset_to_index (int): end index of this episode in the dataset - β”‚ β”œ stats: a dictionary of statistics (max, mean, min, std) for each feature in the dataset, for instance - β”‚ β”‚ β”œ observation.images.front_cam: {'max': tensor with same number of dimensions (e.g. `(c, 1, 1)` for images, `(c,)` for states), etc.} - β”‚ β”‚ β”” ... - β”‚ β”” tasks: a DataFrame containing task information with task names as index and task_index as values - β”œ root (Path): local directory where the dataset is stored - β”œ image_transforms (Callable): optional image transformations to apply to visual modalities - β”” delta_timestamps (dict): optional delta timestamps for temporal queries -``` - -A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely: - -- hf_dataset stored using Hugging Face datasets library serialization to parquet -- videos are stored in mp4 format to save space -- metadata are stored in plain json/jsonl files - -Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location. - -#### Reproduce state-of-the-art (SOTA) - -We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances. -You can reproduce their training by loading the config from their run. Simply running: - -```bash -lerobot-train --config_path=lerobot/diffusion_pusht -``` - -reproduces SOTA results for Diffusion Policy on the PushT task. - -## Contribute - -If you would like to contribute to πŸ€— LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md). - -### Add a pretrained policy - -Once you have trained a policy you may upload it to the Hugging Face hub using a hub id that looks like `${hf_user}/${repo_name}` (e.g. [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)). - -You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain: - -- `config.json`: A serialized version of the policy configuration (following the policy's dataclass config). -- `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format. -- `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility. - -To upload these to the hub, run the following: - -```bash -huggingface-cli upload ${hf_user}/${repo_name} path/to/pretrained_model -``` - -See [lerobot_eval.py](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/lerobot_eval.py) for an example of how other people may use your policy. - -### Acknowledgment - -- The LeRobot team πŸ€— for building SmolVLA [Paper](https://arxiv.org/abs/2506.01844), [Blog](https://huggingface.co/blog/smolvla). -- Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io). -- Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io). -- Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM). -- Thanks to Antonio Loquercio and Ashish Kumar for their early support. -- Thanks to [Seungjae (Jay) Lee](https://sjlee.cc/), [Mahi Shafiullah](https://mahis.life/) and colleagues for open sourcing [VQ-BeT](https://sjlee.cc/vq-bet/) policy and helping us adapt the codebase to our repository. The policy is adapted from [VQ-BeT repo](https://github.com/jayLEE0301/vq_bet_official). +- **[Documentation](https://huggingface.co/docs/lerobot/index):** The complete guide to tutorials & API. +- **[Discord](https://discord.gg/3gxM6Avj):** Join the `LeRobot` server to discuss with the community. +- **[X](https://x.com/LeRobotHF):** Follow us on X to stay up-to-date with the latest developments. +- **[Robotics Learning Tutorial](https://huggingface.co/spaces/lerobot/robot-learning-tutorial):** A free, hands-on course to learn robot learning using LeRobot. ## Citation -If you want, you can cite this work with: +If you use LeRobot in your research, please cite: ```bibtex @misc{cadene2024lerobot, @@ -339,6 +144,14 @@ If you want, you can cite this work with: } ``` -## Star History +## Contribute -[![Star History Chart](https://api.star-history.com/svg?repos=huggingface/lerobot&type=Timeline)](https://star-history.com/#huggingface/lerobot&Timeline) +We welcome contributions from everyone in the community! To get started, please read our [CONTRIBUTING.md](./CONTRIBUTING.md) guide. Whether you're adding a new feature, improving documentation, or fixing a bug, your help and feedback are invaluable. We're incredibly excited about the future of open-source robotics and can't wait to work with you on what's nextβ€”thank you for your support! + +

+ SO101 Video +

+ +
+Built by the LeRobot team at Hugging Face with ❀️ +
diff --git a/media/gym/aloha_act.gif b/media/gym/aloha_act.gif deleted file mode 100644 index 0285a3dd10b2e3b8f787b4e3f0eb740968074c3b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3027981 zcmeF&^w#?iejOLTMNs5_=&HBHP&Lu8Dw@fRu=&ba$uH(xsG& z$=CPa@%`L?yRP%ob#Bkcxt;Sow@dGVu7aZT4PhwZ_d9}r2>%B|L_`Dt0EhvZ#Qy+$ zK!AD>arwP__b4eTX=p=esaomi=$IIanV6WEnVDG_2Ux&=SXfxt*w{EYI5^p_IXOAG zShu;jxVTv-c)0)a@bK{QdGYiA65u@(5D*X)uovQ=6%zFk5q>HnA|fX6LPA0UDq1Eb z_D1R-4Vbi)i;UQ|%s(3Hva+&r;=OW`&*bFfM{6QCsPMRFu?U%Kww0qNJ?y zKcT9ss%k10YHDigYQ^g6>KbY)uzxfkX+niG)nS_d6RM>N)A}EMZEbCxe{_>||Iu^Q zla@`GJzYIKJUrb! zJU!ez|MBwm@_Fv$)&-2aT`OowJd7-9OR8;ix<;!9znd0K&l9H10^72=&UR6|7R8>{g)YR10*4FV0 zzmbxD^X5%MLqlU@E-o%FFR!Smtgfzp{P=NWV`F=Jdxu|W=YiJl?(TuE>hbaM z`F)lP6~!NN@_)!E{y-(~_yh><2)OUaO>`_Rv|w-@DRC%(@LwLde-HzK|As{W7mokM zB<}x0@_!-u|2ZV2cLao_Z2AqweHhaF!uF#LB?ECZyaomOZ%c=gz|ubJqi@Sb(zvt| z*&Z~Ok7YqDs_n-bUrppeUB?R^ysMacq44Bnee7N3%u5Xn8M{GK)m)i=maxNk)9Zyw zc$q<=L38y|jrBX9jq&E1l{biiM0UfL+7Iu%7OEX4TI$wXk-OuChOPA*9bwle8xyT> zwt8X!E={N>zA!w3^}JsXWQlOXqJf6bZ7hZ;cS`VOOvkmUr$%x zJ>Ht`>iGU;dm!nK)3m$u$Mw-djnhnb*RSsvyAv->d%FMpy1hQ#n(67eyCVQWwquAH zy|z)L0#(~+3b~_g42>?{j)4A=*A5o!QMH3(3pv_}RlBJw-;Q?EG(bXo>Dr9mdl~uyulF*IAJ$l<#8U)C^rVtlH-a3|C7PWVw~_vRe7$@$?K|$>XYi) zmXnj3hT%Km)7qwGpPFhoJm$C>esgmArWYh~)-cHU`0VYdK+Rd>q}=J*yIEb4^QJ`( zRvlo+C)@Lu^^nu^)@_`~=e8}!7k<@y6*ZsVpS7HR?wHMIwE=eaKmO8ndr*Y|P=?0I%RnDUT9hn_1=G?1P%&G%{y zQdxU7F4}r_H31zFy`Gd=@x7i>IAnYGhD}=c&9nxI*v+gqli$spzF?hH%e7L;%>rCc z?CYYX6{XC=os!?zW&6}^?O^HTAl4u>EJD|+%JZb8sCnh zc6mB#>c4k~IOQVg$0|F2e;1Va+&K~X^TVH8L9HF_FN>{CPiYPL#IJwY zc?PUr-o)K?o%7nIs!b%HTK=y8-jiNu8u8hXiy93lUZHIWM7(oy_R#CdeCm6zAMoL? z+-M(F#07?nqU5}OJC+T95v%A`d`bZ6XM9f+#bd$wi@vItlY%f>q+p9YRA1>H9WJoz zCq3xyVJPvD7%Ec5sFW_ROeqA;79NmIRlaV12VaTC6u9{x=5IkLf?R#_Q00 z^9`h0FFYyz-od3EK-w=-#&)Q$2VpD0&HsV{B44R1{k#kW6*x0p-VX6p{lPBF6%eDt$j8~}qa^BJ6US(pct8Cb;CGC?ceM(&Bvs8MD zE^Dh;Zi>E?wq!?f>G(k^&`C~j6gNE@X~03T6E8qs!Bzq%K7QVSFWMud$E3sDqc4(+ zo}f>B!zb(`vhq2}xr=Kbm{6f7vmGejNO^u zex(a%V-r*(@lg!=D~_U=ED^bP(hp(VNw_NTlzf^u8!2@Y_?CjA>vn98b@;7mx9FnI zhPQ!1X;#E*woQHYc$GbN8Z2GmA(K^QbB>Eeh{?8>o_DocCD5{5q`5*GwbGGgGJ|{A z=>k{9kMh#ouaLwF=zRAwi`rZaQ(Ss&(rzSw-98f+{#u{=`!H;goxP0F zYU7Ly+-yclME}Ygvu(`XDI?=dYfruce3X&UbH*WrB_A1YJ16R_uIbiGehrf;bqJd@&h>ugS!SBFcCeI z@4m#eMbQksBeKJ;%k|1^Y_fw#<3|%#lQ%7!Gx|S!JvJ)|N5zSe<~qufbbWbggULp| zC(p|wwXbQH=ZmSD{lAoN6RWZ0(-sOO7?Y_4GtRwrD)gmcuj<}87jL_$dtCTZVPE8z z?Mix4O?ls#hZz&u0?Sg+vsRW>YtNkzv%2(6Fa6GZc)yfrQMUZ@cnp{Sne|`$Q@P!y zcH_@i)_nq#ugqxMK3hCip2*&7Y)(sA>6x7vn)q9xXLajMZ>nrIyV;+2=y&Yg&AQ3n zT`89ket}btS~KCgElw}Aa^GFc-E7(ycAh%$wy^s+!+6`vZz9CQ#I#C%msj;=8@%rC zqPP#8@uf{P4pcK+-(>8$P5NGadHZK!YYr67sI7LDuk1r+lJjN4)loH3yzm&Fc1+{t zc>Utn{-)B3qPU-pZ6)@82JDUHsH*=>netqVS=z~)h3Ri}rd?-G8zY<=ih`p&3r=fK zWmc0+s$wDwz90n`>2oh`TE4u$z?uHO*<%lDzs5RNCq;>!`iJ_@-*i8d&)$;e44Yyq z8BhG~cB5(f(*@Li{l4z|(wNwfF^j*S>gB`^3AZYHo^X6cYbw1oeA&z9Mb;93_;k@T zHDcro@TxBK&*}BK&pOe@H&mnQ&41KY;`XSHeyivHxTrPCY4w;C$j7JHW)ArOW>%tM1K zO->Hc7bGYnJl3`?R$CsZsP9r98i>mS49a@S(d#+P0I%J3>l~ZK|wGf@l0X( zxbfB~%h+(6)wmpRqOAgul`#Q(>tEsKwmle=eDq)HC=L>$M|FtE7fhTFO$g0M$d8Z7 zBk^pxP3l#^l#{9=!V<4@^wg{Y4Gh4uJP+;L#4|yEqz=x!#dPG>p&6V!i%SYAN$MC) zD6B~Nq>$<+pZsO%=`h&aG{|k3-W7J3I15hUw0`C}tjapK1vIc zG~|>E&u=rXZ%wdYv9^E)fta)U!n~9~-tG|e$2*Ye%#r&GlC(FZS+071=kk7`rN-zW z0})UC82Riy{mdsY39z=Ie0HWF*!EShKCw`i?SnWhF7;+V(qtAvITph8Bl$uh%Z9-8 zORxd&fv!P&4##13AdIl;osi&zoQH}z{aQiD4|yd=IX4f9m-BMF01?nUKsG3Eap=LS zpkrN&iNtZIJZb(QGs+N&BpQj)8i}4Hc1P-)nql!T z#{^?5)0=fH$@L2YnVINmo->C(?i_nzd3WsgYQOM{Rl$*Wqy+)q>3PBYN&$s{yH|5a zRW3d{+;-_0zeo=pYA-~Z5&i|)@BDa?KPpta8x@mTM5g2?d(T^ayQrl#{=H6ev#g6H zv2e-*eRagk=zA|Z4|GNb(NXvE{{CPjFz_Q%Dh|DeWsdcFAX`ArQpP8fe{b*Kbfoan z=Wu@0)!R!7HGif*R)TB?dhS2aX^5UX%Huy_B*-s#|Eg4k3>%>*#iE^I4=w}VL-#Qf z-*1mS;FU^*1U}8QSPr+G=gUZZuFrk+ApHX_v%TDJu6(iL1w23V_dWCVg=il6Qfu1+ zxJFs{9&yyqB&!&|nLTAzrsv*DMniW;YTMbsWQ8iJdzHT2m5`s$g!3~$gKcJxDxNC& zW%{J^X&a>oKh)E3FuF~wQ+P@gYN!RxIAE^c4FftL3tmN7{>0-R-#*WN&&Ze_?q5|F zmSD$un3P(DeK4M;O`4Agi>K>CdysurAm6?99h|HsQ!|0O>b5F>2tlc{F+U& zh87#amKC65{A=R;D4m;3p7sLu2bfD+%*Z&FS-G4A@^G=d>e=Xdk0Lf-ed0Z38!>chpmuTit{Z_lgWv5H_6C*FSd@!|aS+Q!x^Hm;HOtBzh1 zu4cSuC!Yz*TCy5ZFUy*G@%`OGVzUivn%YFOZ99;CBG+EGNs<}3j%|v|e^i**06cE= zrfoJJMGt^;M17jgYntahGe~P%32IvH6oJgbFSra_K0%PNhZto6QxT^B2*1}oJ#O(L zY^ANXJJD*TdE6Rm_txsPm3FkXalG{zYa9NRUaVdEs_=X0zVhm7o3C=_dpc&MUF;X1 zC)?ref8V!<{R%A-L1Nx_v`w^2h68`;se0wTX|{Q9q@SVK9GSG7y9KPSn@F>`c%M%A z&N1?}l2Y;XN_+8(_M)1Oj=Qyv9g(aLwpRB&vLzY2Pdd8iSQ9gvfwPG!8rA7=hG?!| z^$S0m!koK`$>FN}!Y)nCy{B2-tg+cVa_71k^Zjc4jH`csOz z50r^p?J~|qQmr()(olpn>pj!MT|lGWt%>&}zP)rRea}XlHc$E_><3PNB{>Kg+spMZ zjJxi=>IaCX{)}V-(^Lo*U}^39#cBuT)(1^$TQhzRnyU=u8~Oz3At=JHWWyd&14dE9KApq9>%+IKfo6yb`k~H*4usxpmValri#-z~{8jkxkxuf_ z6gX+M7!@)&-#>!ZzBwBA%`5{F__BloLm>u z&>zPw_)edxOm);utx(KFtxdOxPOY9z{j?u&AOKS8j@;S;(C=s7#7`q(ghY!<{Jt|E z3TEJEvqg_*zS~dH{h6gPo&`J1{Oxodbq82n06X&MKz|rX#WXIp-qM{g9E&2YpQg|hV=vwhsUH!7Y zk|XvZPjyu*?gOrCCZ64*GGbZ!>2!$Sd|_i=WZ_tW*ud+;57?=XUX*JEg&&(7)>@vf zHU9yc5v<6_%x0ym^-|6xcQK|KFEXrutWQ~N{PVF@Y;}BN9nQW!ue!15H}igKtwPS3 z?hkPN4{+%&Wz7yLlwr0$%)a5Uy}{76o=v&2UAQhvxw*x@b;Z8EqKYdqdNI&&7X3 z)Wk_Y(hdPVa3BDgsqI?)J$QMQsVKhT9K9Fpzv%dPf8lIDa_Z3M?_q56o{gMKcjX~u z`oR3|?}0_CU1ZUr_2!YS{}J`)!?5T>p~Axu|KsMaVO{Z~kGTLnu4AgyBmerN0FDo+ z&qrx$$M~Y1RLA4|&FO0K6Q%l-*}7r1&&PcpnxVCulqM&H{wIl_l|-#i-%;(=Zk{0g zPbccn8l%sq{+?tOo;0Pd_oQsUulJLKI#0a*)cY4qBz{&@Dnv-}8iCk(@Mm}S@7XEU z@%;4X^XZ9^)HB@_1k0eK#Gq0Tt>*!BQf6h};v*kRW!Im~E(vO~c4SI`z zAvFCm%<*MW?aP?Sc#qz5oStkpwq<^WFO9D}gkbz9#x|4Kw{t z>vX9V@EyE0y|QviB2QdceRIVeC)YsC82y+jhe#e4(hmHOR{>D`tFMPLK4?d7j^r(Xnf zI@`VCP81y4c=(Pc?jE^Z>@9YCP(ml{cW->gh;AyoYBcfKr5=r-O$$Xb-h7`4YUX24 z!7P11&Eeo7X_sr^Iqf#n7`!%Y3~BY#X=dKgtuxP-rRY<^lI8G&F)Rf3L-`L+AJsc_ zMFUwMomn@!_n2FM(muCsd7RgY{nZlB<2sqSA)71z#ldB!#Tmc`wreTVdH0lj`OrI0 zOpvoamd)nMZJbt+?6=gOi*CM@YUE$Sq08NIkv_K{&Slq!c*Q{bGoDjsnWE4;KMBK@ zGp`-Lk4^3XDz-H_??}hr$insJNnW}>Ygtx@pFt-}b&qw{9(8231*D1ocpJAW<67ORIjt{KRimtyo9O|exy_HHTIvo zmwtGy>6!bVz>9AY*J27=Q8l1+hA?=&_WdUi%~Ek_TfGjGQ|#N?vzEs-%7Xpm0?e+{ zyJmW>#oUvvlpC&ZSP?B?o}{1pS)$dIR5HfZ0)^|;$>J4Y%|0O0Dd97SOOjM^(pyvYyIOx;mJReo0@t@uzq=8R8R=nW zj|T3+?&2r)>H@QXWf4VFM zz7M6D?e9JYzwnvg6Jd7Qt3nBgsbaQm($xP-+p_GSKh3fw%!%0fJ(=)kdC(s<5t%GLv~q$|qMlnV3cIM-Ond{ozjTjd2K%$EOizg`VuKKK?(cE}`GpaI!< z85&IoE1q-sjU4>mL6j&YP}vzcAu+Ok1*rgwox~S3^exRm$x2CU_M>)xwJ>QP@o#qi@}`gM+JiM9<~1)lwB@#x z;(S8ClY8*>&ELqJ?k_>yR}&I`kzZFld!BxOZLsvDvVzM@z)F^^AF*iT^t4jvD~qXY zqZy?$ZJA2shq)8cgOzR9m$gXM8*cX)g~5<>!{`GH`p9kmIoqf+ z<4inA#t2NQ2-AuU%uknT($^8eT`s49P$=3=>V*rC`hb03UldCkCSoZwSSuu5H`TN6ow;=qopt)a%Jiuh`qmh zL9bBTcMoPsN?WL@#&!VB)|xb>UYOOoeo!tcs95^rJqr6$&pg* zC~qigDy1Q-F5MpM4M2zZ{=(x-LQ2#=SeUuiBS#cDoGGhrvHVJsWQO$gbU3iKCgIyS zrBN7F49$>mkR-VVA}vlshEpUselC`VHaj_TU5`~W-HP#u+)=~Zz)aZucebZ9P6il-vE;9B&5kI z)`8;8Gynp;;J|pox!gxGtO{HOyV^Cuqyz*cF@mH4*weR;4L}V@e|>0|2p_#6&l94EMq9`B7INj0fq9~mPU6{IWf&6iE_r2Y;Seko#evxz=SMymh+UOeny`%qhV`mpZR4t>wG$|g1fp;%#i+Jnv3l0j zQ;ovBli_~&RJL*MO^1lwAiNgL6-VK!=MTr)XhkBDIbufy1=_fT%snL6+Y z&CP`(vnJqW9>D9$a(&49hFyK6N3o^8u*QN$Z7Q-$M$@RG1K=l zog`Le^fkZXd!xAwiKCT?n z;9gSQhL&pM2Q0g8kbxKD7j%@Az#-NTRXXh5F)sA-L-zs*hNa-NyWTRa(4Vh{WeOyd zOh!v6Fr>QgDp83d1%abmpwPCVu6Hufa0>hBIJKJ$|KQfhjbE#AYTq)_!YW^E{y2(L zzuKXGdi`Sa_eGpkEG_5`RbJArzIX(KE?{jzwWL5La&$Vr~qITc33XjjPOlC z^WqGd#{5jgBl1ZX7KTq|cNat&_MySxwo4I~&_S5~*9yo|?;r!4&$+B9j9QixDC--g zxn0%8vVa(tFybbWOD1Aj=p7aonDgULTl?~iMCHeLin;jrfs0?zsP6j#VbRi?oQcn( zcKK%l#`SC{z7k?s=VOrOa(rZ)LA5+lB`KDGey&#+!%BKlO3^qb!qAA~0+txq^1S&!^W$WrvnddG*lEwF@%c)}sh@n5{)(!|v zCj05D2-I(TIHN6V`xy>GfuaU)|C%Se&};3-iulZtm?0p+&jol-S_reL?!BTnL?nn- z0U+R5uQo8OG4=sMz)%A_T?+D!0jl1jt&7`zZUJBWvwIhMDJeSpuF+Ou*a5B1A>O{x z3HfKMB;(4k@g$XJ1PZ7HG*n-xNT?Bes{(pXWLbp~f&lHvaSnK(9GDmZAz2oQbGOD` zX<^OvH8iTQ-tgGPK(u*7Qr9h%CO<{u1Q&qdM&M)Jkw6O?pcz8_+pq42F~Dm(tgPF> zoh*gczf+=z29RvFd((#Cei$JOJcS#|uF)F{_=Qnl1*leGG-U>z2Z$|hyRGp++bXPm z70`JeD4~KjXdu>yR2ix8!3;qDx9RkySn-QYi8i#sD#!tXkw9SnZIwBSQ4C>#9-4Mj zeKD6KkoTyB1_5U6RGI2VhtiTWO}JG`q%%53K1^Dd&T@DJqcUdKcR#jI>Y7OE0{t4F z80#7jX}F)-fD%IjHQICy*f$t@Suj_Eyjb37p#|mg`e;+K%-c zACc4sS|9*ARanF=(5bD~Ax2w285_M9OL%{O4SKoO?oDykbuufOY;a0a!dS6{eHyEl6?X4{&)i%uH7SFm9&lW>u0*;mKCAOO= zcALfAW`ca-X=bBDBbt&5qtYh5IA;VV;w5%9O7lUqHrXOJaqG=-n~OYjc;3DOu5qlffu7{HUxK;w>U>L<>tpN>x&Y-;9{Tc zd$ut^gP%a_hFJSsVh%9&u2Egg1$2)VD`icDg1ol+iMGEOq&$|jyhV#u#kz-q^zqfQ zVHjCbB32DTjU!NV$GFHW#%&fi-;YyoBZAIlH}K68u^Tq1Oe_f-ag;v#6` zLW^g~>fnLJ@Q`9S#l|c}9h`Cs9#I_kNC6Aw0{9fbRukhzq1h<}Mt!0HBQPUkHA7Wl zB&Zzc55Z=;VyYtmidBH1WRL@5W?g~WvY}E8p2;tJk3|z(?~EfclGXqHZx&ECa9+y> z38yfgKKS%DDOKOB#EJ+Ocr?0IPDP}Fz4Pl;mIz+(G@SO~0@;jWox^3Vs$%V}aVAw* ze}%+1W+n#Y{SGeX0vl*s$PA^VfmH*_Axv542yKD}$lan?G*If>IF>b#$te1dn^8uG zhS3K@R^eH5zn9!f4|yUu8|`PW=ZYk6iD3G%7G6MOFQB#;)^_Tl-PDrf`9sI%hsHF^ zcB(*Q$g-_*tTq_nBZHx5;Gb9}y3?Q%jG5~%FYi3Uux=Qz=e*;n6CR$&Q1>fwz_6l- z>L;pJPnuc0k7icGm3+=hXG;K>7Fy(2UDTGH@z)1w=u>;$ zV#LD$P-HDvgQd}urO6V&wHQP<4$??35G#fW+Hi)^e=K4DX#Q~N;W_tPm5-13IoHSl z=%hZ0S+qpV$H2AD1I^dsqhk9$^_ABAhY=sai$s$8nEQUqIksXsZFJ#dgg)%C;IG0g zvN&Izk-9w`d$4GLJ4OnDLG+gD2w4T`tW-r{Y+^wBi@3aZtS;#wf3DmZ6(g5hAWv|Y zWb^t2`@-*W9MASBS}f_6od`$o&~GB^Np=fgB*>-;>ylq?1;E)Hy^8#rAisq3Ca}q1 zx5?75eDH&2roZVf{}iBznUPQbU^un-qRGa!s@*Leq&Q0W^QegM;Y(Qv;M@G#>ComW z|Ht!Jj-MsrRu65t$hJ5PguHzlp)`P-ryrqUz&F3f@4;J8>zD3Nx7=Up(iaZ%y-ODP zw?NcA;v8INecI7+zRbbCDMbMC9!TK0XWyr~B7^|g5YR<`71@dejcfo$Uo|@wW9xUP90jAk?K@dSAkLI> zXW~4E4Z2fC9+R@3NMId9S{y^#yKmnTL$$q6LrO?#jlPry8iLE+FBE(bMxBv3&dJ*4 zt{tZs2i5%@`9C{r44*RiKXKF287%Wyt?DrFIowcT zyF+#?@hf5s}G7jwH)Ur}wte=AKCf!k*lnVi{qp#q5OF&MU#~ z+wVV$-Zxhk0U!y;dI{*k84hH-S*to@mbL0ffFeG;N-~P3YR35px0;;wZ%uJ(I4P{* ztHC)qAaKrLQ6JDJ$gRtU?1yg`Zfj9E9}OTWZbXV}lLbF+bk6vKALqmk3VL*S_Fy;K z5d*0@Y->C0W%LaeM)#|sdEgRI=)*zDhifip!_jBoGMp+@;mBWTQEMVwvIE#HIteEW z@UaPE+fJRf3rLxPAsjR1(ykP|Nld&=K~(U+v?EKHJlyev{*@uieVXCpJLw!KOD}PU zZG}03x|MamRIR|Jg|^|guT}|4rJV3SDcZIn)&b${W8FO2e^U2R(Ygj`bPM#vtD1%^ z_)vX#JMH0zEBDraY=L-u@)W~K7;yS?Y-Eb83#p&(@|;MR+d|Q6-eRAtHHV202fHknvkC0grOmjUIFc%j)SFRf3e{-3=1h1GKfmcZMS3ZE;s@jx*cvd9sH(2 z3!0B>>21%)P(N}{jW>uCZvzo9&p!uWPQC`1ZxqRrUJ%LO5K8zCq`v;_hXlp>lBQh% zttA}7_)HuArbKbYD^ z)O-Kc}TizDp5xq{DD;QGG+I`yBzHIA${JfWz9)i5-^s=UX$(!0F1whR-ox-f=l}2>S_gl2 z6_(3DRLj`OOrQl4Yh&GS+8g_z3L^qRbA=K5Q4^9^5s+FF0HHBgoCGJztH<#7Z%AVZ zAc?W(K*i`FgUd`jrRy>fdtc5sQ&`#zDK&=7I-*c zXV*%)*=pEbYjgVj%g3e|lKbMmw-WfffkY;0|4HyKf zg{q~EnFMMZ(D%p-V>6_21Qw}sD$aZ<@R_JQ3b7^?45esc8+uIeW$&kP;a8JV6M_Lo zm%~POi-UG1%3qt@WoK1*H>B_0-Mo5458H!lQ9{m2*rgZ1E}Hbi8c)`xopdAxSY25_ z3$%u-ONVqQvY&@9!-s%6E~!HAt6U6@#~|ey;viinIEAwoeLZ*{C^$x(We6RPVzf0} z$K&#SYEY67UNnRsXZXF_Teb@bEiFxw=$LdN6U@E!iRaGB<{{&%qRC2@p7n}NkrR0e zo>o`_Td+1y6ny6oby9jKhF>>hFqWB*KRwAC1K&4-b}tf8W+qmav9^)Q;^iA zk*%FYff%!SCL~Uc?ao8guBztbwlr8jDeza^UJM-J%S_~KiZ9TblR;U=a_g}GO1?oP zuf5!=n^ua<-*m)*S=_6I1(kh{5b^631VK=pPe&M||H{o-8ju#cwflq)vMBc3SyMH@QS-kYk{r zzwD~M)|Bol@v97(jjtwlehZ|p%=YWefm5NDsuokht2qnM1ezCj4N5b6F>tDhsNE^~ zk4D!ik~yNh>b`dqFC*FBH>xBgiop949>j2yIaq5kjU%x_x3%Ub$o}HMM#`+`wQdWy zkIV$#=E~cnNIcQ`92GDMSCukq0X&f;g11m~mW#{|-_P>5A-{XkPt1+g(qSGa*YZ}3 z%n8rB;nN0YnV*$}Sz6Kzg~;>KU5W;PCs+~Aq`iLpOx^l8j$WF4KTB8Enksm;xkVhJ zQ`detVTrLc=9NSPt(4PIKg0Y{n|$p~iZ7U0@b5!%hk_c$o~j{nTt=sF@g6sCt#P|b zW3oi}o=R`z9`C!M!tfKV_G4c-EkmVAi;UwG_I6&n^yB%+=Aaa^pJ-9Hv-~`BqDX>4uD=8%a{bPfm}1~g z0e6bJml?}C^tnhccvz6GEO{P@YtDs{EBK3}Qe6OSM+$Eq3?4qmYYatHJxuXc!LUq1 zhb5E<<5d@(sln{TmYAjY5C1iuKBVjNCXkHj(WSA#Y|7Kb6$(qU8*2*&IUqv}l&)uY znv7qButK<3h9*Ip^eK3t(rD#)U=y9o(QS?5GHr^GXrG!Bn25LxiiIc;lG*4_vVNUS zupK1=&#dYQ=}MAm(bJGUO$Vj2eM$+U8KALHAnK0p#Vj3gk++9!$;I}olz`tLqa+2D zo)nwVH!Oe`&;YX11`;_96bMq>M;>^AVd*Ske7+yc^KI6w@i#v?jpdp>MZR3@$Dw;p ztUOw*O8AS!?-Dx>knjmZv!1s9s1wr#xK1uWO8z%bvYs^^OGpidkzG2od}4nuyb{#L^z35SInKwjlH^nT5(29k@W3iB)qHX9Dfh5EF$WOMDtfO4Ca=o-U_u$jLqWr&+vKt2LMt<_zTLb!Ut4|7g$JxGj%W5-D6Q<;_XG!ReIL9 zMPH-MUaCU)J?scDvAn+rRXgQ)SxZ2YjnIEjFs8NQYk7kY2_^~{@Z>GMSdmPUb+HS5 z7Ymjj1mo1f^Dupy%e`sI*$of_-1jBbtiLqJsr8~7+&eU1`BP$rd`rlkkYAp56zKi2JbBii#Yq7Lf!VrG2R?A`Bn8zDupfKjMdNo;%icM;(1?t@ z+m`6@DbTK+qdk^B@JSGtdE^y2(a&j3ByiKmxT?X6$7mr2q~R>mi)es2#eirsaRop) zR)gKBUm`ipk=8+^`K4k5XJYLDVYQZ0HsYS<21zbP$(~&aPArSiWWFF6{!JurpC*$G zkR0u!p+NbB(@?JxfXkUJ`SOJ*uzbcC2;cZ;vAe#~65Hwrm`Bhg`C7oz*nr9?VCalU z7}C$sfZ|Ek6ooSi@0s(unj97lh~=x&mapL3kl6}|1_aBeE16MMlf zyvB+mWII9)ZcqruqlE_1=1YW58YniSeo^fKg-L+gDn=C#c&9LPnFEW3ndmX2vsAYv zpMqGynu0Q#?46E9wZlF)Ys(KX*N_<&WD(C|KZz83VI5ksNeiFwVW8F=pAs*hoGCr2 z$-ml5#YjjG&3!4_M}5Y{m5dg$MpOQVDJCcP>vQ$?&B&@v$OrBIio%RgXwqh{iX-$C zXdLFAoBpaR8?41}R~6r@&{FWi$nf0(9vSZ_HEl2d*UxlBPIlDq1zwe3?5E>np=NWX zCn)Cch0!X&=x<=*qKHt-fxrB^@7Q5fzqy{>vfq17A>V{i8N}!hWBAP3l`IFK#hTGU zFSxyOg)IA}e`HM*4l#AYlh{#o1c|IyENtUDY(Pw#@ zj~msLIe{@#Oz|D{%fw=ojCj;W0V=&173)vL!L(xAmL~swV!i2;_-*q|Wne3)uNqIp zmy9)^%o89&OUMulIRj*T`v9_N3B1<5(t)^IBFQ5ji6T;^)KSo*GHDI|j`Ng5F_;nb z;VHhK`B9m?TZKmiz!0%2r*=0H7v`!ErX`2hW#{*RU)!?9(THt_!7eK&zjDcnfT5JgYttpj^)&Nd1cA8CeWlS3N(PV4N(3`fv zwRri`ZGBNCA9~4WHE0O>W#TPOqulgU-Bj8DD(U5_wUtUy15!7AoHsBhNIxHbhwBW* zb_0B6y>PH!Zt}St)>p!KF=UzvV?S}`Amis$f*`=|=0#XGg#PDXq9|}b%OZ@OKvPsj zYhehZIyn&0=&2H0t`fVP=Y|pOM6vb4OoTYu7W+iK27LR5WhRNhMku@GjEVO3wcoP= z@#*czVTnc9GnTu4iP~Z%yq0{jmMF!5(uIqQ4_5-z2yd({%csSUpJp1Jr1eEX;JG4D zHetkpN8o`$ZLECD>lZkHyaJ|jh)6IQ9Vln?xT4IdPk{6UrV#c4ud|^-Gax#urKU6} zrJ>0VL30yma*V~jL7=(M`k2YAcqxdu2sAm*2rGGxMZ+GnMP*7K^hp^CYX{UOS~^b_)f0o zotj9K?&JG6$;_5D%;_j*6$nNb5-VO6`w`wMm1rK1WxGM~X!O6ifiaUZ@!`>uh<}xP z$}oN>+?5NEE1(zqgJwe>C8fTIeM=}@FnK*R%qNq>LCRyv?`t9}9KSJn8inR>Tj7HA z3x*AlJ=#?mMQ59P=j4(OBm?5JwRGY|KmYE}BGBX|#)wa9ip}P!G23eQT zmBa%cM`*xuzbHIaz-C5z5ylj+!$=RKAtnUZYKl&dH2o%$Q6Od*7p`0OQQq^bd#@ES z4N$=kND}~{QD_cdl=bK!E5jj!N*^8cl%8}dnAWSfm!N)bs3`)57WRVE?Q?YBaU2TI z9fo{93_PV38ls+~Z9nPbKnO>>!I%_WWPG)hCPnH`{OVdNH-@#EdN6gI4x z|Nao_^m-iTv`;!D+NL=%ui2~eMFQE+64Vd9MK9sV0%-v6!^UNziWP&(MCb)ZLTX*2 zYZcX!mixBk*Pn|jXzA82LEF+i>V^Ic`{vhc;|c{o`;PFWn2ObU2%j%G3$QB}y^QDk z*On148Q1cw9?;&?xA{Xnl1-r-G$2~s%R;}^l`;Z!~f(Ol*!U(}$efObvrwn)$%{l=C z^pcVbbzlB1y|N@7l#tH;^1hQKy^|yZ62%L%ST(Vn2*ue?myf zxjy-{`0s1`UzV_vq_LAcLCR5J%3=tq7kg=ZQQ|6?zkx=kL3bFX(TmNG)sz4!E<^Tv z)c~cYO7SbG0MhG7rSCpu`93M^9yCT%LPkrzNlU#+D>8<3U8A`I#$0pB?0UoePEwE6 zUC+IbE(pd>ffi~@Q_6PYUJ<9flZ*^`WysX{nQAFF>;MY4V`rXj_4OEeyg8g)R(_EP)n@~L){F=MNXF}| zbaIUD*aK-7!l$omH>K6IS{8K*{iHUQ#vW&hYY9=@5z(vkQSk*4(rXaKfAbR5@Xl)S zstoapYays@dHm*^m$U}8WlgJpklvP@f1K4G8;ZkPQwR*0--Ip}2D$BuskLgUwgy?T z1U4ZjT>>!TF@4?dSCGN0Cf{YQ&%QcZVBWa&dvOILluhNr0w&)RLCXW3>KZ{UQeG`W zg*jTk{a^FP>)z;(q)<}7-x zLZTx-+b@hrLpX)m(1z^YphQNAxH_jevNbQUPlWum)RXoo7A_(V{LUM1k<#8-A+Elc z_)2I$BeaiFMg!p1OX$`Mx>ZQO`*(4t0V22yPp4MQyx*S5`y-txh6Qm7wr@H-kjt_9 z;g?1vJT^dS72>DqK*<^U-c7y`=xd} zgIo`+IXeWkna-p4Z6pX{p@zJEx&-3DAQ?L=Eaar-H|iF>Z{-*G8TMu?O74U?F#A=V z`j%XNB-glh#&uHLbt;I^({8QNZr#)2bPT%N(-|J#=A;d?!MA5dX4|WESM``J^d~kt zsVOm3Q?4uLPikNngEYut1uDr(lzz$E`1344DZ;2}dUpu#>9$T(>TuYTtWK4DmHS1L zPmTZFKy`=nsw>dh1T+U!Qhv(cd+_S@j>pQ6@wfpj;YbDAbqK>%3UpPTbtLu!MK2r} z)&Y6hAJmjFN8D?#4Zm3DM3brps$kdx{?cS9S7c?P>Ed7H2|7wFI{Rn7$RAvJBQ!6f zrxWZC?EP`n2o!Hr#1Y*31yh*Vi%A`>J?+NF9ZxTHL};wu>uC=HK*~u7o39v3OZWRr z@6x~8a;={(WHsw`fae=}XEk1Y{3e|DTty8iGs24@o)%a8&$!X3oDT>By<)p<{mBd! zrM~n59Zjc)mk*6JRTg%_?}1QGk@594M%vw;E52_$#8uD_D~$h41<>Wv3e?jIAl*9S zWdq}jC)Gu|)7Jw>cQhwC2RNCS@15Wm1!HjcUQL3iQmna!d3}`lc0-q|muX;p( zaaC`NRwwF|ey4GJ(FDm%?B#%!qQ7l@S|+h@K3OR39sk@a{(Hpqb=AI=P zB+!Bn_P0`;Pq>z6v|4S+sh;gM?K9iV{;aKmU)fhb_VSPin^1Sadn+b;&osuZ#lTO) zCDOd#Z`nTcd{uEx;I`nZ|DC>}oMYLw&cMMGOZ$bPW!2lkUnjUV^Koze!aWL(b6(-c+m!giC7L`<++G7|*=-t;MF6c{d=$R7g8~Va_(n z+o8j6wcP6Xv)oP1d!ojr=W67HetpKyR=`i^v0JZ4(C;4p{G7^n!OGMm-`}}6^w*c^ zqA%3-{`+ia*9Ic`F2{2JU>5tl*!MIdA{OLn4@{B;&^CC3R+tjYL3-w+v@AP*NM%85 zqAW)39m!Ss(gL6?+0Z~5DODbKSR=_^QEF80?=ZdH+A^{dS3l|%Gu|QqXVldd6HAvr z7f2^4#5c6x9&EwUmaagu?8XlD^M(bJO=S5c2gZ%0SN9M~o21fV+C=3q$>Q4T>Ted2?*gLc48k?b`M)rAg8{ip8y)r=PezA}L!Y?f zo2(4WsxLf^8msZCUKZ!$EOKvH=28)O#pSdTrXYAb?ohXt zf0o&JOk|zkAxbKo3P9Hu!$n4>W)UM6o$nw8NZ^Lld1TTbY< zE|(9#H{sSfOqEqQ&oy5Um)SQLmlA#>)zw(5`0upQy83UgY;mf6y6iGhym@7Nq{z+X zP!xs(tl5|vzM*hk`Z=?DW79-ye$MOK`Rb-Q3v0iXtt#&OI%x*WBg_ar6I;$Fp}H_{ z>zl$S7b}U2=W=@tUgxXZ#!cRsZRbo39L~9(e_y$_jDif3Y8v%Ws60`xr&krCx)f{r_fZ{Rs#6a zpbOte+{D$pLzJ9^2DjydVW+qf$ajpij<1b9!NHGA8od08cSa&!sg{;{dvM6$-EIl$IlHd#ZMLB4GO$wf9>QCd6v~8Qt|z%+El$u!v%) zQ$rW(l?nogjj@jB^4!o@n?zG}_#fN_pRLG9op|#pUW@Ak`q)y6^|R9nNscnE{OizbUN3(lBl#v|q)L;bdgIbUSq8Yj8rbl5?y;)*m9X`YDwT-p zNMd#=dvI6(8Gu3p{x^+#(nepUU>b>O`6N9lAj{Li?{E)2m8z1#;Ox^0)wxb$k5Dac`TfHeaH1 z8dPH+wWNmlyPbPvH$`+(RK7jv0&}9G6+H*yB=3p{Ic$unXshEU7ZpI%OohI zxM@$u2ks4j{x_QXX!z6f2s}tUQvuWxe(o2vvsF7+3E`VCS&`CuQ`udj4+v|>cP!qaXutX$rLaP?i}YDtKd zjuPJV>|*BFGH|pcdWqg$HkO-%4$FxvB-RJsj-UMI35gJO>~`1_xH2>d=bvUZHu->* z55+^UL;#Ql3jj*|Xh2ReaWUqIsQmm`X%Z5C8mXef4}bz*r$M2rGe}q)50#>mKnAM- zfImsBH~W>_95Z(%ZU{Tr8S8BDNpgTcrUk}aC7;kEX#UMnwHD*xQK-XiAF5NgmJQ-x zB7!0g;*vTuklXDtLXepF3P?+=M0N>VdTeXjt3vddi1XRnY>Jh1&oP7`iOGiY6$5s8 zDV{Fa^!N03pGJteWK+tsPB)<#Fr(x&!~~zKiVuF7139%J)W7Z~Yh7cqa^tUKdwHNg z8uv3rE)<$k?LTz70!;PZRDgDZNz6PrKj>ZBy`wd-l-qRfoX~_*u5^mZurtDcQJekK z(t3G>7mvs#0wY23gDzB^0$|2y|FG)pu4e_1m{{lwOa#DtZvW6e`6qjU)v+EC>nYxv zqI{Q;!!igwf?l0?jYQ@9sk)GOn&n~&g*YpJsYh4!-}SGW;CURJ z(TtRD@S9jjE**MBdxe>@!jr!1L)ZnA0Q(bQzVu!m|J{?cg|XLQSuCuWgk7Q_j%n6`^c}b^{ z6te^cXaWF*G@$k=sQcvJyUn|y&4Tj*Ld(TT7RVQVP9u(f&}lPK+GwkYMJKu zNl;bAbB38v6Z3P4Sky0^n4Yju$c#_R~%m9;2hY@bN}q*J@`DI)F#x9M45M|8mAI0>CY}1JA|6 zmWY^PktevRn^Ps6EKNd9)0n1!U7u9QpERaVf%1nlKNwdiBI@O5)4w3WOq}|7F5MDN z6DAJW=-Hfc8^uI8tyDlgUYW9k>?VPI03f$?{@YOYLkj%jmo+<+XRtqf{M^itCwlj!kTJX&8h{< zoIm@aP8`p_8^yCtM@f~i=h95D=dMdYyLJ60C2^S8}mO9il zMrGbt+!e%5zobBui0gMt3}cL+NW2leEG*Zguse2(iB(?cxD_vSSBQ0z`3Rz|-DIYi zfxSE_a$X^;1IP~!d_0w=Q$2j`B?YX^ImECs5#SewSCTkM@%#r02mQXnr9mPdv+xNL zA}tFsQ*|{g8}VpPBj&C_;$4GauB)Dbh;xnlC0T}|AJoK3)gC|NlIJkui`pDaECxGo zy`y3ZJz>0Jnin?UkDS%gfSgpROJ_^}DYje_iuFPFUp{UH$~bF12EdYt$P_01$_)N* z6@svA{Mow%R!ccn6!6T4)8POq?kFS`3nw}U4zE8Srg2aKrn(4(5+{goBSgFe(Lv!< z_u~Ht(DZCGdwiowTW`Pkzi8#nZROZ*8U4d*h=kN;+aPjVRwx+-Ot4vS zi$wh~G;U!_{bR!$wV5LPJc?ie32V> z>Ae`D!R!dHVaXCSDZuy@g^;X6Tv%jQ;_lc*l*^op;P3XIdZTLBht zbuF&{HqH0uIAeLEB>qOg>GMOYjev-4j%8CqlM^GrEH>EY=?%B5Jg$8G*9?@+@Ex{a zHf-Cu>Z`eVJw41wC>3v4pXw5K%_}^1KQ^qy%qQCAg-Kuv7FLOcAn(So-go=7;QGGf_A4Wu>fqv1ZjTqg?RbYg?(8E0C6d!L&H+3s z^oI-6e};jfX5Lzs-xucb7yw?Z&oP>@8vjs2{8nz2yc|b_vtUXp+YIb z&Y)j#W$vMI9+zj@E{h$0DpvK)3&Q_GLLECpukYV(>bR4)820quoj?1bd6xc=PIu%z zG}_vu<)RM8nr(r~G0b#wdN_6{FhV(q)p9|Q0(?Y=7n3+wT%MZzcavZpytjzA{ssENys!|}dFRP}vz$GZ>PWwWoY0&+_y2f^4J!9b7M)bOA~cqw zPlw>rdHowyUNNAH?x;>>j(Z+2c*hmX;T#`DS4srQV}bIEHuIf?x@$}_Sn!y3ph#Ze zIJ41=p4^lDo4-^NUk5}ebi{0hpl*r>zM`Q+n_Q`MsL>7x=bUtA-%O2(TpQ_kF*jNo zBdovVVjLR!*ZR(4e#$T9u-6A?C-Wm_yIf6G!#k}5><_T(SXdxaCrQ{hXxK$wF&>`7 z#7$u)kRUJVC_p5uYYC4BjrWW`?QKsUK~!w?gvPRZmTqd;!jU;Ntxs2kw^n;k=6; z>*wH%Okw;AlhDE5I&@hLspjL|jQP)W;UMjYI<{DY2r4E*{~56fQnTg{vzRD8iOYFn zjc|alI!7gECBU>i1@edlaiMd%EZ^%>z2D4)9+!9jy43i2$q;3yHhYklS`f90DP6_5 z3moNxR0=FqJ^mbo2bJ-!mC#dQrswXL1egXb9+sQgMXWAItg8vTWMCFb1ZSLi8tL!o zi{MZ!#EbegeDRGc-mj&krweEvl~XQzKhJQbFzB%QD^JYBi7W%!UF zp(L>S`eyfm4*5gDHgTDRa{H*>!hKAs>W-qV#PQmP1%*{R@?h!;hByuB$1o?$|a_n0S1=>e@ zde^&MB@J4yo0?LI=yCk#eJoqNc2y7&T1n;Z#|yYYI0W@j`ZO*gjoU^RzJt7Vaf^Zy zq)Fa?o~J?MJCWe+R?rjRd6mIqh36qKJ%~nOYJ0-fj(Am>CpWNz49p4AsHfNLq+gHr!*|n~M z-WValu^i3Os^SBwe(gv2Oo%W{u^bUdvs-5A33$eq$)sddl5c!6Xz}1pPUuj2;3m&e zD2M17f^$yaYt}|$j{r!ptYm<+?Y-2<| z`a16dHnN1$qL1M2pv8mL!7u4BTnYcUus}%NAAOqzflD(CWbcft(8}4j*NQLI3;kWL z6I!h+p7}e*k%`|Xm6$GTqv!z4aLK&{+IprZ@`xxn7s#5INLIZ_&LaxEln#?(;)nQMajBW`848}K@VrF6Fn<&cGvdwngvMutXKSRu6lIwdtMsn;Vh_Y*Kix%eT-QCLwC;oD6X$Zhab>pN8YS#=pWz&+&PG z)&5Q7zg+>Y_mYoy(jWN0r=q*7dCJ)&r@kx1uuSQ$SAU3c-Su1M>-&1?^2Uot-)NV2 zmS65%*ZO$%>52to!|4GqNLJmS07BV$AahH3quXuLCVU%tHEWoI0$ zW;k^|+%xWrx#)(f_Qc$;l6)YV@Ss|t?X)3YqO9ch-ZBLg&_w1uTwtasl zBYqFP|26+~Wk^Pw4iNQm_7(yj50s~5(oUCyaKIHdL1h*=zh(liOWV- zCYUFKs@3dP6rQQjMJif8-}N?+H8<98@M-n2h_P!3bhm30D^?G-68p*{=hqeRxyySX z4!P7BlHWbD%&#nO9e?Kd=r3HgnV5jpyHc^j-87>ZZA6O<>NKmec^3HXsQp%M4L!c87lSSnkjjT2vSkXvuBrby7Cg4B z`Bblk!}4OQK((qTk39bS;%qL7*|hNKNc1Wde4!@sB+)Q2$0)5fRl%#=*^b7j_~~rM%R@(U>mm$PIDZ zn>2N2x)cY5uNrB#44|TW6$bP9Q~bwfBFkwnMT1USM|Lt1pT^l|HJ=Tu^$6CF9?h6= z^rY{@gC?&G6kM3O`m{c9*1&}tG^HW!7c^k$^dNZna(u7Sg5xdOparX2f=bhRi!%F- z7U>T|!X-42_lTJMhd(CPR$j=z3;XzHe?h;>F>prfSSV;I>b$Ya_=Ep0HNA^`DRgh+ zj5jv>{r&6r)s{$QFFIDzf?hnI3qhH7Z9Wda%s6UH%=`EJGsfvmyA|2kG+dv*KJDm3 zHKPs!()OY*No;g|U;f#2&+5GEhZ!1IVmtml z6a7@}@Z-H7FMdrwO}|X8s}g#2G$TJL_50M&diXD^Pm8Pk_j#!HQK}b-IXe>o^m;k| z_eYpvofC7cRUhNek4WT>(|;%TXcV(4H=c1K(>h7(Z{k$Q$uWiXdO|-}t$(b{|1`*; z0~jd-HX%$2QnvLH-?o3e=1vJJ0y>D5OOI2LDb)>>IBjYMB%1h^a=IJniW;#eUav3Z zZb}%EIl?8G%ZPBdLx*um0V!@VCdi)6Ar3}vK&r2AnSkYpo>Ce)Ff=P#=tsgU&Fz5n z2Rmh_a_NXRERY-{Q!b239MP2u%t-Mq$4*MTVxLI$xl>;*CcmX`psyH?-6DlKuP0E%t5)H$C`P=fr?5QDGs? zF|zf}sN|(VsgRZwzk1iR@Z}Mskk;&mdUyHc<#C^oXT{&@J+~YLOR+S8vO1gudZ!}boPF0xDx?iqr>iX4aznKq$jU!NZorr?$;Rf z6#jP8=wA0+Lt}7L^4lGsdp&E2Z;ki*;O};mRH~Vn{x=ZscnZW%005^zHiPwl1KI1@ z@yP!_kUjGuJM(`7**nmH{|98BpUArWzk%!|^ccr^lm823TSl9vaj6CVUm&~kuKBqU zREt-iexjlnY@q%2?Dhi#>+ia)_?_(vn9BG5_He%|dvrU z^8jNBkBAefqT5KO1OdYgs#|K^D)n34uc$jZ%XOY~y{ebv$SF+_cuC&YTwvX+DRgq| z%*(7l+tMRtssYTOM$pxRX8MvY(c|T84p7B{@8ssO-77{|AqSQ zo#Pdl6D%M3^375BOa`j2&-i1Ux7ZX)bGLapSdZ&-UW8EOJp%gbZ)Nm7OErbl(7%-h z>otKaUydvmVi(Y{cB@85Kpu0KxN$yeKZ_C{R^J~cf9ZV5 z%x0?GTysn{pT--zAsO>{(QROc~tPm@HvzHSt~ar(JO9b{>D_*uTR16P0P$|FdzjpjkmecXp7`@gXKoh{a6N#NafQjFqstb4dkp zeP`GzKlEX}x5(ByDz8&o*u|H?!R+)mH#7r!CgTy`$<>+}9iqJcA`KR@OHX zdsfE~wKekU^6dcml`$*ND|3AsA~7ije3k{poE_Ad6mO;rHtfh6G~|(6`or;aB8*LL zJX`B!-!eBli7*fwep-p?IL%Yg@|9e|PpsW*m)ugys7c^Y3B4s}9nFFUH2!OjH;Zvt zp+&BQotU3m=mJ*<-3X(6E|PV6O&4Qrto3e2;`yBWOU*<|LzlPeK-*1$A(6Ugp}P=5 zm+N1j_R+X=TmIav{T#KUuWo%%%;Q*h|HZ$^P?*v*E3__{dw1Ze>)$>bdx!Q~l{fRb z$P{Pc{y8J%dy*p#-zyXRIIl}GVusQrWFzi#aaey=yWp9NEN#~H!jnatx)8!io^_Kt zX8dNkGDbx#C2Xd#`3(+H8M0enZ3z-ZQbgInMXxd-=1%uO;O2 zS1!X=J?6drh`Y=ual9tS(!B%Qpr21g{9<6#pVly& z{SOlNM8>{m7B?>^CIot{yq$B`X~irx4t9LJHZmJ$zxJzR(0Pm<8pPZG&LL2`*--+g zI<%ef{LBXYS)hC3qc_^5Fl z`eGBGnr4;+(USJQ+%?fJ&^7+jMI}pPgJjw67}2(Tzjv7dr6chbUD^9&uJyS)(;7-~ z=($3j{hb>%upEhX3*nu&Z*CVm=swDkW#}tSKvC7JWHzUr`?$Eo0pBh=yU#g;&Zddr zt$gCX`2lvhEz5!`A*X!-8uaJVydr#UiLJBQ^Zc1-Ccek!>h61C{;l0IyHg%}{cULUw9f{8|;>KO7z1O;fR^ zyq!0947-Wz>BOLX?0~nA_+C8QqVf=!5InE^;7=aMq8@wQ7avwt&PiT;P-$q`K1sHP$4%ET zWIvSW;Ld=uZ-k0w*sQ(aQY88OgOt7SO>37da~7nlfs|Ji`JVJYUbFrT;MR>O#SwM| z%$bX!*Mc&Eqn*)`)6thhZ;obzx;$fa6WB$s%4&@m;>=l$4`Qy3aF{^PnjPriGJ#LC zVyz|Eq19-j4Y#|OhVVxge$_bVrMT4~p0cpYWX zmP$fQSG+V{k>r)29^jR<$q~bt@nMd1{?`&4EoKf{Opq_Z4n`$9Mk=fhC(1ZW_N?O6JNn5b=GN><$&$C3)^e~&im(wY% zTjr9*@J|zx6kVkt8QO@4&F(6!;=HV2gJsb7uI!#q?1k*0DbF0$f}GY21V;(-Jq~dT z=Noq;Th2L0ah=SJ&oxZU4HokZ3gNgImSw}5mzwcE5+i#H@*MzihZ4tJz@B5s@qm>R zb(p8@o$IK5-+Rjp{L^Jz30+W=Z{3;$#0ynMAID&KC~; z5Y_cxVRg+p2R_8CCB&W1p2x>Qea_LMhK8vroh>bzQG47~6KYhQN892K^k#Q&#&Q)D zv*XEPjFw_4_994F)@7c@bI+g5^06!?KF(`HkCPt@J0n2r`HW`dB}ue>4f1#yxweZ0 zMrK0`9!VQMfeSwoiKIw4Kl!pHTR?llkyNBce=?rW*&k$-@BuMCRr2dO;+HLY-|#8d z9%71sp4mmcqZFQup7N{6_vkX_BUB((p39r!P5>hc(J&Z?hED-(oq{?7k(EpI?XZTJ zRMqM18BP}8UDSDgB!gUf{s=LNMNbgWQ~ssrYT3!!@WYy?D+J^$q0++$IYCF1`h%1+s~h9Oo47{SKsunPJlg)0~$SNa9yw` zvi4EnK;fDmI=pWeH9$lRHzO)3&@2LL{!|4l(wRsKm;KEpuMA(KJnh55tFf@hQ>>}G zsOTw3I1U<4I2A#`?i5H4m9>!o3nze$jMy)&u;1;b+_NLwl2kfNfXdXW%YbT9(v^iV zp#CljvIT||fY-LcNn_~3$Y7^tNNzK%0>>KB4B0tCMF61DIB47yG=GY<6bH-4K~n*& z;mx40tx0j{W~Mr5G>Mf;MKAl8Hd9&e10eppDF0pX9W2^i&DVDx)V7O!4M4VQ!+W&h z?Yr<(7j%?}m_#GT>2NaP^kCLWZRGH2^etXByn^OnByx_9nWz*(9$=wMneMjF`Zc(sto3SY(fXo*o~9(+nuy_87rl%DkTcnv58vqUW*bcXVVQ0gfF7`%>$4 z_?*vs*c;Pfle?Wu&B$d!U{De6+ptkGf_(urnuLx#M%ms+A3WE~t3?mu`dAbYzcdiQ zl1>O7G1dHhjgDkYy|5kyyElWO1=gQRvhAsezFqh{0XYLe&SH^sSY$!Z=^J=HI08C(iLexxI)6sR=;lDms|Ph@+03dEw3oHEE$DRzzkqhXOGG)TR+Ola>V zbIZ$opU&t%F9TV-JT(yoM<^Pl4^+}=Uw{NUvs#eI6-K@vTOqG0@E+ojbuaSm6!PEb ziwOn}Ue*lqM_$JoXoh{;iFA zM1{E0LArGfOVBTZ8G*%~0NH~0c!aS; zW05U5=u;AFE{Qdt)YQ|AT%(+xJnZ2vsJr?3GJ(|?-N+??iQfh@u;9lhY&gL&c^Py~ z@$}#3>6!=V$`jNdZB!tAtogx{BDL3(pVN}G5j6kiJ{**!4c{cPePR&VX6Z2IlF1}M zW-|`?)E@*g(fKCLQAdZV{?$6=dKps?NC_m*0aihT=?lSIaqwqTEkeied>kmtADv5{ zEjdDMrM$YN25$@~%TQsdAR{ZuAQnj_iDqVg3Y6(m<^2=zG+=6l%IYi0kyp>FbAs|F zu`KM4DxRPOQkN*4D71DeHnkdsq02R+TLyC^Cd{+tA> zrn2S|S*wYNS!xa<0CyGlW`#WA(om}U5CkrK6wcV8lF6Vu1gb4yMPds2j*N6rVpQhbygek-HUaQILu8$d?KzvThhla zAm_A^km(mo*a`+3UZGv{jL=cx&r*zgx@ucltKwUYrL0VCtW3RVdI5rLRT2QK)Jsq^ zD%gV$q_7M8;y85uCM$u&I^8_9L}mS@`~HvLd-jQft1a(20!H1*kS2e`qBdIyTkrjs zUmm>s@`$Z>gMxU21)pnJK0~uQW@PZ{?ShHTGY7`4S(KH+W7JwR+z|k|r`F<61(S%S zNhH?P8urPlm6urb2M6@tYr~~ocqI{b+F5!@hI=@|MsYC4E;97NTV~<6tQl6ujqgmq zKV7FHZykY?>dIQN$O#%8 z)X}D<-R>JDusTt}Lk?SMC*@dHNcb*l6o(upBdlxLna2^I>@A9pQPDly6?E90V|Ys= z{Lf0x`0fTsnEHZoQ>vQ$jy92Vx2C}t3%Sz_j;13d;FE(#1xjR~#4hW4^YbOj>DlkS z#z7_gSvDS_4}`woxWo1NGW+P)hTDWy>*YW8^iv=k?2H4SiZ-sK_s(c=f)Ds65$ykC z|Mm}%8)3#Tt@z*eMbOU$o z9;s}Kb{K!o0L*t2Yoz+%?G$)1@vH1@_CxDm)OQ;vHKjStp!?XiaRLKc5phZLhQ2KLz6j|4vUrMsNMKPyahw z2ijts0dG}-))*k*_1#b{A!uJ$oHio;-+ySWE{?t}78=+d-D!K&DIomInFA2SN@38S z0`}4cI&ZHvTMq31a8mZ6Is82a?JdeX_1xX+cT2+HJXg3Lu^cKGk4TG^)&I=BwrZ5l z=7 z^NL&-9fv%=>bqPn?_Xv z-}~0;=9Ay#_3rhf#LQ1)eTQP#N^gwLl>O)TR{f7;bgtKv438YSa`j!ffKO-%*Bx}N z2U#*(O17Zt^YI6n=GGNlJ)4T4uXHT*I9~T{t@-PEF3WS$dYvn!b%WI1{^az|e*quM zlrH~_N{I*&G^^tr6$yY##6dqK$d6&p<tSAPdEe>)}k}xauHk4n&u@P3=RAKc7%=Q ztNg=xXnX4~UzwI$7%7btBs9kqy{=srJsrt#9lpXS+s^lI?Kld7wJi;ArYBE_4`2<>9T=cn#6%U4d#%w|MS5h-Gr!{~F!Q=h4GX6+ZmXOt&*_Xi zmj!{wLYB>?3&&wdA7}+;QHkQbjt?Y%@Deru4wXh&8GgR_-<5uG%LBWLBL>Xo*KE2~ zPPHvC%bgM_&TraExC_T6CD6bwoh9IBm%0fXd^4HLA8p%BblK}=>OLLY`99xP-Q~Q1 zC?nFY?}#LcCXrl{<^KUH+a-=0G6kF}H+|_cw58~LpMIHd{;`in|7#)6q{#5OgdD~f zr0T^Bb!mh}lQ6!yYHxWx%ALpfJn|K|g?-!BlZpr zX&R*8$e}dH%G!f3wMV~|yqzTZD8YHjsLQ*D|57C; zW~~_C$!)TJTk0t3Ggp^3*r(C%?7G#?IU7u$CEDN3XqvZ`9wV&E#6Y+&>1=RuRLE`O z`SV)@KQB?ValOL4bCh8cCZx+&HwH6o0JeBif-XMM;eYaVn8kTf0KftOh#$nMFHEO} zbd?DiyNPfvD(D5@m`*bCy^4E8jEbNTLGs!(c&js;FXjsGazfmNCrqi8Xy7&K(!28> zEd%>E+#fiRq7HW!L5?Rn52X*|l~=AxU$g?ApESt5(c#a+zli|lEDb9WyTJSi7wu~` z_FQZ>xyT$h*i#mFi9egQgj|knk7Nqk^10F4X_;Ni{|es%Hr~nLHiw0s{>93S{&CqU z?m6->shBAB~R!z?Y(_ao~0h`!>&l8h@6mBx_Jlcls2&duq&@*B_d zZ(FoW-i-{lu}Hvr{}CTI5cQoT>)d#Wixp&XXHB1rQ!2-wTyHKyM+_jz>AaPD`lIUna6i)8=&u(;Jb#(EkUKfVjn;Nzesf^a|P%!FUBJ&yIfZ$u) z)ff^U>DjEnS1YQ&AL5*}crKd%4*xsj4m>$^z65TMN&LHWAy)pNRA6=FIu>#$PPF?S zPo%sT3DyUmrw}=o$g5hVL}p{T6q0C$d#(e2ZW=e!xpyy^-$kk4{5cU1(Hk)rDt2qD zP|opNJ>~YSs;RO!uJ98x*Yg)LL0;1?uK3?z0j*5wHz!QoJ>)k^&C^dKVm_Yz1obyC zaZi~UZ*_WO?HYcTIkpB*#a{ODCBvERj_xF1@Gi(IFB9R~#g2J75-S;nUx`)) za?h>DNV+hc(LOb0#3Z9{3m>H4@fR(X=KGGPaw*@?Wx@$Qq)p5zC;FVE@ewGE{ZBGd z)bp`0Mm{WY@K#Qq)hB3r+;4W%@>#K%yxu(VrhDTeV$3lL zq|wK*!m}8!NGr(+9k@;QhqL(ykaJum2}WK099h+|FCQ#*_=dg`fbm^b0XI0!O1S06 zD$HNCk@Xz`i1I$e1j+o1gk_t>LX_wtU>p`^_Mo4Ow`5386O`gi$HkBC3SFq&Lk5*! zXubTz<>jviw($jO@>P<)+Jey?hL?8ZNNg8o+pzlyzRbQ$4H**~rN415UWcys!*M!6UUeN3_D)BJYdT(?0Fp`m_iCo#kw6u% zGi!CR`jRcXWvPaKRz*Z;^r}ntWdpDWAkNMotaqU9x)fImh{F`gzcEX&ovKK;fz|r= z$mAz+Fr~`yN{TzBA@6@|8S}ZcU?H+Nt0EQ{ zZ>Pq)gYdo9gC=88OAp^C$+H+cB*e-y& zeNw#=_%n>+oXN3l772X7ME&X*bAOPvL7d;gb52R5*Aiw1Q1mT1E`rd%SOPmgmnY)@ zW?bZtyJZiuz+SfQV#`yAH6+EJ6|IuzYMmDy2tJ5ETU2UGkHmYisFI>YNz9p?T-zGV z=kud(*09_lgY3EtZ2eF31z_e^p!GpR&a+E5rlMEJBKez72}mYa4CKy1TtIc4prTxc zImW9wN16!4ZAP8N4(;8Fku`~P)Q)vHr6EIsncP*h4ul+hN@v&&7w0E&HZ6m zF+pwZEm%6P1@7+kyA^~lqq7jkN`sh=MHD=5P zY*!OUl!*4!Y=3h%mhDCYYMi&OOIFuDT7?+x$P!~i0pIbz5St%ov-Cd(F$3QmW9lF4 zAPg=UWQmYT4AgAP4`d!jOql0`TtW?!Md0tlI?`?8JgeiJ(6JHKEU_dOV?vC>$7Wg? z%;F&4ffN^$p8%1GHC&1wcLTDpKbQ9J`J5cZONx&0mkDT9d0;%M=l~ht1{x<6SVF57qPQuL^?v~R*0>yc`)YNksUBHbn8eaWE1lqmiK zW+GHPIa9!e6z%jpNq95sF`h|}1oj98`_kj?kRV|Paoz_Y69UV5A~~T3WUC!(cEDm% z&7!viRHM8(6j76yGA~jbm-CB~2h9$Sr;1QU??;XgsOe98*D$zFqBG4B483BsLV@(T z=r?XagF(^B>e#bGan9A(o*hH(48|NELA?QS=D3)M>RFYgs24WT|4vda*_;MiV8d=r zm;9I^rs(Vb;5x2^R_2%qdvNO!)LJ4|;#17^rS=F6i%9~g;wLo%-QWI8`*{Y~kxb?d zZ<9Qw=e@M9N@9)=A?L}B=_H``0Zxy^VzOkASjRTQ3MTq<$PI$6MIF9~>{-9v?=I5A_!bHGl*y#Tk*Jc&nK>DIC6-Coacv zmNr0x6UgaT@?;O>LYNmExxgtA?WTRj{xL|Omc$UjEfwl8=@3Uit8u~kkWep3FtL>q z$)Xk;#n#hejt2c7w(cvcsqKp&eR_Z-5PE1DdWX=9C<&oALob4cA_9UGK|v7$flvet zJs>Dunh>gpEujgB96(SIP>Og!Q4z~QMe@(@-f_ph5BDMOJ7cf4voqJ6pRWQm{d5U+ zD(mq{Igvo((6AbX22g@#xF6ZLqj2# zS3SGX%E5pBhpWmYslC2pOOvHXa7zMI2HUJ>xCCsw1#xHse;A@YUZEL)cpb~3yuED~ zPPCmFT&a!cQRXPTy5tYPVw$5m?IpZG zH7ZcxH0{CM7@#9rBGkL7*e3A9KcJh}hdr{65{h7M-a@#sLfNL&sOzb@27^ZxkHpho zT{`O(Psxm}ruputTLT>M42Z3*gafwRya{YoAaIxiQeiIsv2v`ureAeUzg$l0HuUxF z`5dGw*mways zYw)F|bO!7U1!~0@-)?;yfGUfW$?Od6W zl=M53M-_ng%^B(Dth*X2AQLQ%Ss-=42IjIQ`azR=lrn*#-08nRw!TJ--+z-5o#~bD ze>p2Np7GjW1=`sLW<)@2L(>V~Cr}grs4~wSJUXL{4LQu{QQ2B7g0&THD`6w*Vha+U@tIs2Kf9r~bYP@t=kzKOiRpd@`~!qYb^* z3t%BE8X5aqU6HX^44qsZBIo3ql0puzoUkPJJfb&nTp2VSwoLF~x)F{H_kqXyZXGS% zmZOF08D`R}l^=Pm|4U;w5ifETd!M94skpD@&Je%3E~|spQvsaAH8`4IBBJhgukj$*vE8W@6_nj4Dq*qtvF7sr1EYK7j z)uZt6zP`*eI9j52?UhF|jrhAs?~2L089$8Yuw$S=Oiy}|g>p=`eTHl}MeAm{TVZSJ zQA)bs7R!91CAo*V7wLuLEE>l%i2XI%1O%JaiXfj3r0Y-l8C)XCb-vLF+DGz4)VPU~hwecAE1sq?BW=?mqa0y`6**n#q z9B3zW#2DN@Z1e9~TNYCz4F^j10nip(@c9iTaVn7~Jb8XKwuxU3jh76M%z_2kX4s;s z@|03#JP=5xa8~~P`{qBu8_qQtPx9K^zi*8W*Ute2cPTg?Fm^maR_-%Bz{dGzwuw6_ z5|??2tC*esHFqW^Ci$D!A&-xed{*%fuRm~n)04yDuUd&(;Q-!!^U`1Dr4fvbI>~0^ zFkXUC{W_2*V&m|yy3uEe`uHs|T}zu?S;9pj`3h=iC;5J?IBzoI-%fIW%5uM8kk}`Q zp2!u$_F?1CH}5f49>y#+YAzW@%rkYUhItsB>?acjevHp5vvD6vbt6B-$;>smEcKpx zH6`PD*}ZlBTgl`b)3#5Ok*%JQl9u7&t<5Wj;ja1-;q!+}*`F@-MR4}sY?kggl^L8$ zK=KZbd$(~Smk$Gm?mKl3B)D_zG6wYhXR@6oXuXmG{#{OS&Ib;pYRdMDJJ6;J{s2PS zLbF?IzCWHtH4Pl2FK+!Y+Fc#Y6xMzm^Sn%hw1DjDaqu;X|F}MK5#B4qzWcH3!Jo3; zKjsOa-_@Ldh+4lFmhxt9rrU=9W{ST?SnJP|ny)i2{^#~b*+~rhV&R^r?@vK}5@Ex* zh~$EZRB*B4Sni*)mL%x&(>4-BT#f&&QM|6MtSOvlxNs0s>hq$hU(7(QMFpilV_WB> z*;^enMM6oIxVt+48goDE8fl>5b$an)$+55XLwVU^HKAt=7K_vDuf!9=$fu3My3d|o z(EvXc<3u56$BsBegOEyjYb8$Eud9Rn-8tm4dGin0UXio!qr1;$pSSxqsXIDXIH}uL z%smv53a$=?N!ZexYK1gR!|HMk?YW_QC$Ln^gCAnFn3^0@wdTP`>`_q)qYz=R8QLpD zUpU?)dR8FyT!8tEJj;WPkJHHz(q;jO2)%V|ip2Bey!j&LiGULk9jqrwhj}-OZ3ld$893 zSVK5py$F|Gs?QqHKj!#$;Yj|rMEN%-)28ZLZ5LDBcVC_W5N0JF#P*y#$-ccC=TI2MecompR@YO|K#gq5#G?>$qC^$Qp}!ud5+ z!=rJ3czr@6T!)*PHz?Y;m98kF!-L0>1WU)#mEC8q`y=H=T>)%Zn^G_8!Y3nP25X)N zWFdHZ6?_!K=9$9Mbdtg&wUPo}zYgKc?BB4qPaV4yk})*lXX%O1gLhzEOZ^?Wd1I7e z?Mp71+SBl$q0Tq@A~8D3ze6ta#*Qkqdx&J=Lyha7joot;Z=X9}Csj+abV}QodXXJN zl$5l|7@axhWedTuh2P|7!I=%x+P1GdGqq@6A6{aCWgdsm66I)lk&HC4#UjkwPu&i? z-xChx7Aup&PMUgL4Z^00!1H_eM!;5@uuB3#>fS3z%n=8%+PDAxrsYa zGR_9mw8(Op7x>J$-5!}H@A?~%fOD(w6a=u=h1j`$E5%h3%nGm}ewA_3C--8finELH z3wyL)SHD$`?U=>jufXjE<57MfQ8Gk-*wH$$dn965K4K2-6tbR|5~m=&f`_Y-3B|#V zh#QITW^VjedGS~$3bRMTm~Ca-rm4hAH8HFlq&-jC?~Q0jZ-KCuUj+=gQxR7CM6F9M zh#;Ii<_Qm0=awMqW@4TE+XX-GJ(|QrgW|ZgtbEg!PK5I^hk%d|1;G1W_O1z zk(+_Rw$^6@gfpq;dck`At-@er+3L?St)w+RpM4N!flehXOiA&7w6=R8EAearr6;7C z8A*h%O_v8rzqhq9q62e|eP2TaJyflIp)UGM6kyRx3x)FPECFDlA2K z^W-s&O?R!DO86s|vyA!A$`dND55zL0!zK&)@PXn{wToE5v3Ic zHGALgdAeNbzn@+|_o$bCjDa$acAZ59tV?X+=~fS+qlmLxvf*N{jC%;k%B(;!Q7xY4 zz_eS8q#@~X9+*pMj1fY3;LO`idFp>EFUtPPNqp<)lO1=JOB_F?_3yUj*j}vQmAj7f zirhj^ZpbH($pcLJIR8 zw>=o(PdjPfr` zE_ZIkB6H|EmC7d+Cp zMAC_?`LbC@ws|Y>c|$51IOQL#{BRLOW89D&A;5O}jV)pLmW%k)5)tu1hyjk8!r(qw zKwP&}mpo*9mzvN&u!2~E{OgS7-Fba7NYJkik6%Pxaz;y839g8l36tmFbf8bcjxu4wVc^l1!# z)wDDr5jBw@DMz+g8@sUAX-Q+g8Gz->sDeRH0oxg!%?ouJOA>Sf#2Ksmc>3ktrS%Ac zOxB>*m|*ay=Wj_?W?4%=xsWYmPz68Oys&Q00+M%9jnctRq3#B@RBo>n1;a0S2*RjX zcnA`8%{UM}1+Wfe$Q(edi5CRBDZm0{Da^Rkn>K(#95f$3kxbmHRXr9}sguB$cw! z5AmoripE?<5YyMu5R$G5z4?4lfs*>N1SoB`X!Lr${l{zRbOz`*jk%%-s*r)(KIfv^ zU4Ow6#lQfix_|*vzyy1s{3u4>0@t5w#v0PM7#<|c-;5jL($J?S&JIKt%BgUyd_2Lp z=^)+JB3Z!TQ9#-WRg&s~9S$F?kDu@cBt8ho0xl#0anWz)I$?Bk3_1W#(I0R6gdfKkOX!fg9HW82+_;RWW^NdCVHABP|eav8X8$1HY8tiC; zG}~RV9tNXPM-1t2k30k>NpF~svBkVWL7ViddZMvA^-((5kO{<~Wi-ZFyxj9Lm6> zw%E$SqWW80AUaJQlP0TTaTEd+;4<+CM0gwpd;sQF>hEd+0G0shEqT2IULC-Y))w~~ zK+;?nnK2l+!`tF9q!L}VJ{>6bovm2GSx=mHB7S;f+&?Iah0mn21s(zfs<*B!SY01| zmrz4BLQ@sDfcbaXc_CF+TcO6Y4S@WdMIARD8R5C__&fCkiPfz_T3b`Z$_fEOzP}`- z0mPC)gzBLnsB>w>n2`eYh;f9`N(5O`5a$?VI#+6F`6>u9RQQNZ9EMBBopD}(`|hyK zBY;@Eg=)WrX0gMU<3wCK%i0(KApuHuhK+DwC5@ralW#J}Egu@nKlOYR(IDtDh$g3p zn~w0z#nU{pyiAhjl^K*oEeiPE2kYq%=4kxtArZjyxgEp;9Z>J8u|Sn~VgLXj10XeK zf!i6>_R}IAfN&2RwaFX{jZo?jCT~&J;7uW=2dP{dcAcZpqFOwt9|528E;E@Pj76@f zK?z96jz+?e^S=Fc!*uQ;r$*0n5eh-3|7O0RX8yXyw|2I~Lqdljflr069zRj5F!94R z3YLop*OLCbr2SuP2gDYDwgqC+2?056zemvol`rB4V3CLHBY2R$`|zdj8iuCGU-LWW z`ca$^pdoG&Uds(O82o1}O@nMfp5G!q1kQed1d)dn7@#_#VOiQ{%_R_wr#;G;rOnMz za3`v^v5rO$4a%%F_*yoe7&e+_nH%GIyg~ZkZ61F26FsEntfuQde4kpHrdcqKu%*gJFKb9KQjaF2icbOoAo5=FhO2hGos{a4F@pOB(ETkzHVb$pI#qgj z53?MhnG*jxDFRvxs^h2X#ZmQ1L$`Lc$9#t+cT%^ii?kW~#uj_T9wD{rn(sSgRfZ#F z#ec=Al_;Fa%WzPJu+24qO2zVgZ*i9C4mQ_@jtPIZ!o?AyzP6k803$d*Lxdq>DMe>9 z?b`zZ6wUdUf+6IYTI0qgSCEC`^sK_NKV%}tsQ39aNKDDnW+!LqsD6oqI2*A5$Vny| zV7yh`)3kVajBKgW=^I7Q`}|;;Mkd+-NdrKBsyx*m!(?B&PI$TPjmZK^)u%?@NzHE{ zNPd4Sm79v0#xT+hlFD*4a6>qi2=O@}*3Z`h;=O{I8HnF)N*o3{?>i+T4!(2=T&?Gr`6*5_cBupYh>`&t2_%YpUOFtph}} zvd;{YsRpny)eQNB@E3teb4}ap3pRhOBR?*@e8kp6zFsNiP`Q$*n_!jOBLJ$JmGsHT zaE>67n--KnJQ}>&T#o_>LGngoT~mU->2_!;efd_H_-X{Mp&Xyo@he8;W&X*1shvSx z2$7pN$jVR`zzzB`r?^yNAb$s90?5| z!P~+R;`KTg%y&S{?fzx7_+!tLyyrWtaVDJv7L$q~s4jR;g^smaOsS=^(ZhJ;Ly9(T z+{hR#b2BYIl4>-)rKuMdDSlUzmdLoMbdF}yq`Yxe_$d?oGym;+HH50Okf;X-8&y~6 zBS9Fmw!Ac7Tw75bPw>p~2Qkt?zYF;w#z2BGhakudxBIc$ln%Lb^-A{;nskG2AhU-= zB^-?ys(u79Wemm^msX0g?{1j>xHhE6LsP|Im|xgWe_nljCaK$D>E_9ON%vzqjI`cy zATHuh?A2ra!s+7*)JHTZE&@pQA@>yJj0o635Af{-wutnHl~Q}r3c%waGpNup2H%f? zM+C9CG>dmg(|3d7ZBpUlK-5D#lIw1V4jI`F%@kW0nbAtT%%)S0FBoZa63x_h%cLPk z@7;X3S8H}Plxnf;WP%wKYr|R&n>}gG1CsD44YsHyTWCKmy3qa~j@iP~>Bo1jP741G z`$#ZBnuR9yY$vk?<^ZTR)`ws!?*$H9B=q>J7bM_rLSsyj5+Jm?`>S)Svu1c5Eudf1o5;xB7AF7tqJD zhaZXQ<8cL(<^XwEUhrDqwW7YW z4$V=^eTgq#oDFek_38|nA1L9R_e$07_B`7i;?YGO_d6T#&ARMf#B%+Q^l#Q!e`6m~ z!=F2!-*#z7ji!}I6sZSv@IkeBzOM5~<7xKe6+fc9ds60@VH-a+mq%0H_GJ)(v**KJ z_^d=jO*?)u8>ji~s&pV-KU3&kuu3p)x{@Cr8fA2Mv9*h6@(g#Gm{5-s4@}vM3hob<&d=NFg%^_5$?D4l%yBmP_)^i zJ`pokg;>9#0dAr~ORn!j&UHlYeOMtlUpFgNStg-nTfX<@X-C&_#rv^Q_3{PM4vnt{ zaE&q-XS1PAnx#!O_rycVA<}3o4d~67LEeA!g`59h9B@gy%+b+ z1#7Y}y&>^!axK_qduv&ZlESZ{La|2q>MNvLq`6NyxKMQs7G9sKLs-q)ovR9N-w(_= z8ki8EqEbyzt8Cy7b4y-$sH%KjveJ9Q$uaTMyosoj8r9U$Q5N>VBVzg61DT<##C&a} z&K;S{w}3=`RxNS%RpfmuyB9Bwt9VrcXJeErR41a-;Mx~4>v^HKu88JpnKqA<_?Fsf z8I{98eM>38H!Zo^A)Kx$#<@VdzMBz!*|3KLt6L!9@6&7V5c6Fq@1PeN$M5#aKauP^ zZrH~NmWl8C`aMgU8+w*~a&TLzC-mk1IszY zD_XJgUZc*t=_9YA6>nW?2_W)Uq<^k>1=hsJVI#c4wXKYq_q*Q91!e_grOL9EPKSQj zm*aD&*MpM}R7B|Mj%g>hRdVdHkj(Jntc<21vrlgO)t1j>o-PO>j z-G9m;H?0P$lj`_!q!@UWU1PN@k4oQp?eds@iE|a2w5-~~a}x#D;`o78@$YL?t&JSh z7GRK;#M# z&e!mhlPz=O$>*eAOj{d|3Ov3(g929;z*6tm3Fa4Kv2r_n61;L4R(&JCyzf)}(wV$)vOKq6)Y_EP=K>Q+%ztK1}V63!$977MeVY7SX`rY3^LQONhj}uLY>GbI!!w1ot3|HcTpgu z;o_V6ijo5!(T^%m^;%?6;YoGUo3?HyPdbs_-`R2f_3w|}*F6z21I2~Tft7<(^QJ=I zNxfOHHf*Uug+S_LhE#|$tJQW1pqV0`iN-OtS3Kc6gXom~AqDbRL=Z+^Vk*H}H!dLO zxaGGBgn_D^y{Vz`7J)sNuIJ2#ysT*`o8DLN?It%+RE zm0)5+ck4titSojv)4!%UbE<@`9E;!b8_V*z=jz71`mj|_sfLG$3`D1FoxTC{Twtel z1X;wtP+6Hfk^BhAUwjI2&hCSAn(d{on<5bj$(B|)9DmWJ4C>5*{p;DOm+Jj(mB-FF zoLb3$snGu@V)H;jsP0o~|3YzOOaa2UG)t^uJpKY9?Ns5$iU01$Y3{@|-}L`<;qs09 zo1ZVUrA~Js?>$&I_>wrIUih`Z=+US6FNt}hZ~mJqdpuX%;qm#@nZKXK(|>J#Nq+R_ z+?hWYMxKDQE~KIktROdo*}Ru_saNO8g|?VM&$AaYQZeKeplAnUMDxjBb168|tp4f?+3#Z(5#d|^C2{`2dNROw(@f$}jc!@U9nPaifE z^8dAGPIK*<=l?(UOwCXx`v0|OBrA2T>Ym=f5_6>Oy6VS^{%6maFoT|^kOD`8?YjRT zdq!AMB$ZUksjUsXieh9;6N8O$@I;!+X5GVwoq#`?Ax@1VRbJ;f~H zfLS5hwsa5cKUn2(`K+UI(2Ew$guc*~CltNd;H=*0C6+P*p`U9k`}CaO=~NaRx0aX*b@1D&!4pJW=HgvLAkch5$o;bpSp+N31cwlj;avYd6TwtZ z&$Dz7%Yp5)gs{WzmkGfyOGeDSGzt`a69eD9%lCWTtU^feP?@fQ$`q{CiYb~(5zebF z{CleDlghMEu&Pg!71{7VY{Mn%9|9>v{J;nELJLl|8quq#WT@ppLZR{M#hRC@r-ONe zS8Zh4|E+B6o)Gu(e6smlg>cR!C!z~JZ%QiS^@!K-n}-?%6yht~JuW^zO}I<@lI>Av z5i(H+5f2jp0S~w`oUoer*i$n+*r-6dGOxz zdITTGm~@nh?G@-#nG`dK$onIGckN}MitblWdf55IURg11vA3}-uo9E!Jd_HD^bK_X zlVsAWQ$1%2Y^S?nQ8NNoPO?pSi}2s1`y3&T$j=dc`Ta7y_{)TK_SHzI*}JV;1tVf9 z*JXU~UOtAnyXo!w+0uq=^Z9-9l}icgF8?KKLE`fz*~v`R9w{R_}8U9&nQpFA>GI2+{%N}dgh;&itz3m zmyW(MZYjCC1+Pod+*RFPihnzLwGWbksCn*1a0rI>;iu1gtG_oM5VVAqow%{4>9FIH zb-xSE%zYlEHkp_stm`TDqCr45M~^qZ6!2u31knGN5@tyaJQ(vyL9N*c62% z3ZIuC$d#g9=IPf=RIiVjVI*yiD+FpmiaR(1#VRO^JEonfg7CyGiK8bJvY^{%())yu zILd5lh^J2rl`mkFPsGS3ubsJ({!&&fwm`v|zAnLDxB9UlAnV0gF?UpXx*ct+Y3x^L zoyO8X4r5!RL++jVXvk0FJ8r4# zsfNVDX5%+{J_foWhY9*qjRzNP2;zpr# z-^oU8*WDjis1CNA3wW+4^#G{`-YjzY>00=B=INZ|gE4p2+W}Upyb~ z@gSr6>-m6~YrLp$KU(A$e)ehmo{b)pM^F9!r@N|V$*mDR|6a22-lo5nVh4Ku>D|kh z$XB!LjCpTmyb*mcP<|oBvw3M!zw4f$u9HaG?i9C)_i?Vn7GKd2yZX|-LcpbuvTxM_ zypHT)ZLfq7KHO}c^%i?_{1<9XF3>Pr;#*j8OId`f1zAzi0@|R0SgF>AV zHkg6+FU2EjM-FjW7rXlSlHd97bZ)g(IFN6=YcpJQXmPLjK^HJk;Mp1ZYTssm20ap< zO1w0I@6vrD@6eK!Gckx3i71f9uW=A5a?^SG8Z$SKKpd?{y7D3ejZ_{BkIA6EIg#9| zbk8ezt~9(9xgsrSl)@gOfR<#=hm9otnvx}8J4!veOfN}%dYEQtABTv1cl~?JX~7>^ zb&H6#vY(0Awe6VP8skS;%Yc6jx8vv(jd!Gv^^bnktfs5E)|~P(_5s5*iOb7SMMUS_ zi02@Y@M7g>4_i%a{bD*TXy+?PRz8%*d6?KdRCkw0f5&+v;)+b5^RWCJmi561Z zhIR6v?L}^ zgv|@Rcz7;<({Hlk+>1ev@YhN)oLtaZ)^UZOYJTdcs%PD4nu3BGe-`I*(r8g>e$qxC zTm^^i!v^w%=HhA^OHYH?XC8}?g?ZKmRdY&<&v({6*V_nk9(Snef_$+^cpa$-Hg|GJWe`XlHN)?b0 z8p|YB?2iaG@ZT4I;52HIjy{wRJd^l1VFmg~{Dd;#M0#KMPWy_# zd{NRPyCbhMo**2szJKVeR=2M#rW`7yd@$2c-}*v?NqSto`@Uh&R+#i}usNJqbSt#y zzU7_6|7tHqx!=EqmiKy6sP|))Q(Gd?szq=voiSoOo@H$11wC0_y&=U14}5nZ!||&= zHFj*q?Do^m+t|>q$n}?=pZ`lXCD=WW*Z<~~e7lR7{5?fdTrow?0VYUkUBB|_as(_v zINtp z47)L*3CmE4Z>n|obYlK>(w>2mvDT4+S2F@5Z5Vce#x#i;Dxx#Jk)7ZZa8z`|De#AT z&iXOq;bSkYD3+O-Coqv{j;tqV&+STZ4CjzsyWjKyp}zXrPjNU-NhqUkdebq z&|)P7b%la$aJl1S#Ajo<71-V{c3dp}zwH@UlM;8I zlB!g?XL*T#O)+v&sj|NS!EI(YDGl~14M{A;WS5>|Cn**bs!Gt^>=5KtS4d59)R?G! zws4{uN6VU96;*mrs`8d;keLR{(9@6_}({@qI-O{z8v3(aSjW zB8N-v0N|h*Xi#~js%$yA=GL?A%3Xi;FQad9rcqA- zu!ueAhcKanUlqK+MZZYgMypj#my0}EkV&i&i^g4MZp$f#&YShg_Yb+vE7&2EXB zU0zs2V)^kISwkGOX;*-Yp`bgFk}ZP33Dgpre~4rkL4u8Tm-U*|TKd*o=2QnR`kkt- zb&3%qM^`=mg*2Q%kM9b2T&d&JY2YoW1G5>d-OzEL+Z=pt4yZ!Sv{ZGwBATYxIMW#M zyYYgry2(WijT~Xv=!UndO*}x< z@;G{tj-pl6t&sS~$+tMzX3O8LJ0>zklBI=qr5CNxCzx%MuiK`6w@rTlo08AltjXxR z!aC8YCP`FJMGF__IOJ{T(kdK$-5s@7(VEt`X#O=E{|1Sl3xhl+!OoCiEMJ(UPldWm z8{b3+Z%kRJsn>XnyaqQV;VP5oCX=s~sX=0DkaP4xWJP9WL|*ZZqmfSlNXgdDAv}@` z!8{|^HB9rAka;;QUKf*MWI-8VG>HqZ%;1raxd_QFZwUv-o5N*xrs2_Ccz^|q+J#YZ zyp=ef2v+?!V6%u$>4x}u4`!R;D%g+&bvf1RdbQWmt+!X9_tZzQ`$w=79qPXdJx#hJ zM>rMc4lg3{-3IW)GT|v)Jg1t6PU>Q?cyE*VuBh{ID7nL=SQlkGKkeU8V50 zk`NM7$YFF>X9aQqhnNK*$kT~<&K1ESfZ)q3C+|+&nWDF#1Y3pHS|gY+n2WOy9v`0198mw@P)pOO(P`dmOc(`+oZm$baQUHaR+|(Q zc8dLrDe#xGEAW>&c71XzZ)dE)Y@GW)_HcTv%u!&c8lJRTal=$_fMqAy(b?ZkqUV>eaDx(5bcq} z0OUk&q6-Q31dr-PbN3$V1s(;w^6Z{HYJkF*LV_^pPq-LTe0AXkP(7IrI|Tq|Jrtn( zjqgd1m-+nzUlUN6ge8*tKKcow&6qDect@Ei3X=Cc3Esd$eXQnx59r`pMK91OLrEk9 zhn%YBIYsVZn+g4UEVvbbXs4j7xW*8AmO|<3&8eECKQ@8IB75L0G^%!$kLz0?xXes1 zmubPFR#>QcCZY-ezsf|K-9-R-**S%R3>{ zGM7oBa|J2@@);V5xyv(&M+yApby;Q%^1<8Ds0I%C6*(Hppa`C6i`EZs50IYautqrC zV^$Zg5yBgkBlu(Sv-BEOhnTkXeIL*<|}H`A8q~I!%E}UEFIbDi{QFC zAi+8Z>|02{GoI8bu;Amjr>@45_}*xYJhcP&&Mks{5o7?gozC3|)wlMZT^j5_pMag4 zhKA62vRC1hcHRm)PsZu3Yz~Dd{nl1)K2H{Fi^5HeJG~XgT-Qu~EnJh<70^N1MSQG6 zKB3IeaVV=wR4Wr^j_8TNjxhkJSrV@onRoSy;1j#L$8@xB?wQB*dz@YFLsg)M+(S3tX9A~ z!U^=`-_aflZ^s8gnd6T4uAmpOx8|ciLOKzjoo1)dm7p3FpiS@@<%1mW`H64! zd4)M1tV4oKawrvZ7giVTk+W!o2?@ON`4eyF=P>NrX~5EHTw?&f&z2()#DWE~V2*DY zf~UU-X`!udd=J7kj%W(Wx0s%rfhJXc9L4QIPkxvuf8aAmM*(0Iu4KhT4l(Zt3kX_i z?4313U#+%@u6%ywBnk|C%RyJx?F#trAf9NUt629u&4k{PUU5kO7kk4*%wSOrI`lm0 zJ%I$)UWLryzx9&8&Ek-gWIlBg`0rztV7~8y`PDbnKHGABhhfp3+FYXG4~PEQ7Y)1L z0S$UE)xOekc^h)B6PmQM%|`!(MD1GTqIkc22M64WCm{s?BHMBV7fHkMx6z9ovH%Vm zAW&0BGyMcAXj-a%lBA7(g06e8JCe>3@I4f0V7^h!vlB_J*!{SFWtDFdi~0w^3+QZ^ ze4>3O?cnN+RmHuOmGEn+Lg!zbpKJ8o1$>Jlm#1>fk|7eV?`#X@kCh(4mE`S4-mQwHi5&Ip5J&FpUHz6+aL^92mpX` zq&2)(@m;|@B8_9UC5d7h-tB%hY9;F23690Xkq_Tz%Cv8AnVjF5&2vxA%yN(dhofO9 ztBi`0{}%O4PK)8r@bG5~%ID}`Sa>+|TL}7c_*{c0WUaq3_ouEzRgSyF>4&og>Oqgq z5)Xg=zVVj$^1Ce!>0G(zUl?^;sm`UA82@4T#_>XTwWjMSl`3hO>Jv>ljDSL3>jDjt z8kkggKcV5e(Mxh&o6#GUI+3u2*pPDido`p~q@zYyN6PQLk4O7sB}NUIkYSyt4FDVq zh$JPfW!|Zb2JxxXGUWJFARht=#;h?pgi3-fe}<-I3=HSx`RgpE7^gE*7upxVo1@P2 z0;cffLsMOGa5Zmxe(^x{w@mTV-hq;{k87a0Dt_f5CDSq-V5oMEF; zBpbWU?v4}oo2TMtaIA%5m6F2WPZ4%LziY2I(^dm=v?Y_GF;4y zoe*JX)3tPAmlH0pMBJ2q$zR9D_IK07fAYYJypt9;jcz3Uf@0_giV-aZjk{Z|g-Tbz z^zF**(XuXkgA!VbCnHeuLeH=mHa%ZSdrT}{I=l@St@#EDIRv%(_fqb)bSjGfSTGoguP%RIelES_D`$~G6*Mjn3P_5nE8pN z7fxH8&9ghWq9p0fw_JN&g|rXPkgwkkxfvavJxHT-B!1mC=+R5WGPdhZy1YE|M0>{S zc%Vtd5}|f+X34?y`}N#>Wz-j=VqDtt%T*&r^!LF0HJTR zv_4y*d-sJoQi;o(xF{^{(sT})$i8T%un&M(R86LePOn1U65L8jU(+yifs*0fQ)t6r zQ^`Jv^76K<&2Z7A%+!VacMsO|>|ALYNH)x$<4rwMvDF=nm*b<*VTQ%`a@2!6vyT1( zaq}pSe~1^*Hq{+{ePULzf6Og!dDq%_;%i4(>ab`xHk*zQOijUaEf*Z#7R0=jp{uBa zbzy-H1=INBYNU6+*_!^u2oW#!$b39yW0oabf2_Qke*^bazA7NwH#9ojBdGLAJs~%` zB~)se6*d_Am8Mr7j__jGi;2}EE(8;$mKfE_#|QJooj1Iu6RhPM(GH0#<1lTCMRpc2 z&t=zDd~V-H-##@>7F~}FHW}CN?$-a*=BT*jBl+Up>zWtyy^NXS#@^iuFFJk=ZmuPZ?lJddEQxupLe!Vo{T5zOF?TFf;3!n2roe+lv!G6bP8dWRQ9F=7`NaIse zLIF?%ozF)+UUa zEI$Z5_o9x*`C2363+@{dPZ(C3Av&u*(d#HTZwrz3X$3kfRc!w|Px)cZ+|3g~;^YjN zeio2NWq6OzOl3_V0hi8=ypn9pN=2#M&kf5OgcwqYo9_Uy{#$xyjceS*`_PI<5I^|L zVX-$dCWElzzYqrR&KLg6Nyt#-*&d6>wgr>{vxjx>NWO{A2$KC+ps8YUMETrJbWOMdYH9u z$@OQiSc)-zJyi?YShAiA;C0{up-9s}gnKpUh~S6{x%0X&%UWA!{ml;sFfZ!6pu*bU zELl5{YofwNi_-p(@nC?_1qM}uvH-0+t1k{atQ8s9Mc~qE5N-$D55=P3Wi^jWw1wbR z%9u#4$e@9Dp!oDO%=uOSC{GkvlAZM$cDXZ?KPrIN>sqR26{YQ5Rv=XE=a`Meg z!YA^glH{dN-nQ;CTNfk#(>LvPG+XabaEY!W%KbK@md?Yy1#$ln6vLW<{-3xw@&6kHGu~y$REW$t$)97W6}uZ?w#WjUNugdCG&TM1ObIP1D=Q$rzHgk+LQBh<>4v|BYj%J&g^F%__ z2oZ@$4y7@KR0tuJLrUlA^d_?J_4(ayzuWir-G8uauh(AJbv>WY`@{d!a6%a=SlsEX zO`rHlXol#&`1E(L#LsEPa)Ce>P{zLO5cwyKFn>;3KyBKgweBQM>zfb*;d*+QFe{AL8Qfvl zpe250)^1!$T16~l`Vzc(11Y6lgzZDsUA;{{438wHx1GC}li8J0yra|LPVyQ|76eVg zrIR!OZ4(xVwDBxvsefTQ5Lx^D*inP*XFcpt5IYEogG;dYf>=8N*n%cYs)bmsfpw7# z^(L{ypI9*~WjFq&TaLq{Yt3KLWe*Y{I;CLIp~dQYeFl1In->R6^kd?Ii0xW2Wh4U^ z#{kt^c||}Cd`nMO=IN|)-uZO*(7K%x8Qz)QZhg|Bf77k8y#Tqe!;tAffE_~qZ_0gN zJ2-BU4VWB1DGvEu$RXs8d0hTycliA^&ak+6^$gRU&U*Xzq5BGyZflsiCZV5cbkv;n zLK*73rr=e~0<@Sn%9i?>oqnx$5@>P%P`FfpigSy(zFBo%#G|H6w#(srVIVfhDQ!Oo z<~MqqIAi294~(dj6Q^slW$q}dX5{JLIU`K7tcCfkF#RR|$K=4wFG2JegcWd<0n8r+ zM}B0v<5;_Z>2(@+KMyX7YHEq7Wk=P*NH&@(Yw60hsW@z!MZDb!9y@x)B#NW@N+{n< z%r#mQlbsja)^tUas)Cd&3U^~4T45OptqHtT`F8N3wKO7G(G%yilg_-0-=0gCaPsZ* z(8}JvmcHEvT;IA|cLi)FWMp%t+~uBB+uY3NLMRa|byb!fj#D$n1^gHPT2?IBT5IGg z#@*qCUfO{~XJovz@kY8aCu8xDrf)4f7H4fG!CnABh6op*X_!!q4GiIv2}o^nF+>UY zp6P@^h8dORjf2tVNwqQxLh{oT8a$kz>6c6hpcV8JDu75Dx;46Qps<3%t?2;d32aCCj&PrJAAy>I=o?{vln7K@dTLjcu zm=;0JPseXpIma|vgZ2;}o`nF&g=P)`Nfcu90M!YXCRfUC*HH7s4vO>ufq$MIR}9n% zS-|K&@DLxF5A)-|+*Q(KZbLmHSi7OD-So6p4U0^q!u=*v-XJl8kY+dSnQ@p=hab%= z25)@!thFgH8HG^V_e{51ITNAf=j?q*Fkd3<;2KOYp83cQ>`6#(XYL?p-diobm&Z^~ z#%rJEkWbRnlDLTf*3*-@>A^YaXPOY_xQLJ#MB)nEXN93Z1HKPJ%dfKoN0~Z2MP;6t z3Ndx>3j7eCWw~}8OHB2ug$E5a&DTP8DoqK~j;v%cT_il5$iDL#z7~l{j!RFXB2Lc3 zY19ErI>dU3wYRp-oygin+kMxMSto4QjRWI$GDM2^rgb71*-xkQBEWkd7B>0bZJqVj zka=M@$|Uc+LTZ3gKrG!eZH8DIXFNN8elp6f;KxK-Teg~2GK5HFg@fwH&)H*xAsW9% zwd$R1u&`)?vtd89md^0v!Ml#9C(d^{kkZMe&sH{(4u2aH=DP}7xnn$dEm-~7=wKur zmQ(_T=nZPG&b+N>8rU#))mBE$^CFtAg$}0q2@m_vv*Y6|6Y<*QeTW>pxn0Ut`&Q^? zwa|l{$@`Q5fn`8P{f={+oS&8I$@2%#6A`b+CnJVjR1t11Rn-O@!m+I_)gxg&cSH+Raqe<{U~!|p(NuM= znf9iCdIBH5=nDn#7>!UD4?1*Vcj@+2o-Xli)*DLZP$p_gH(Wt{zRjCUkG1Dm#Xheuj?-Q$tt|vwzINB%3R8X%L$ghF$S!UK56f zgDTfE-LWtd|FYCNHXs<{1ro%8(J}L^rNdw^4!g|#6!|T=(>E_f z%}1{393rGy3frL+oR(>~)*ypSNZSXT?;xmy<8%E#oSi>82Pg>s#=-@#J%1zJL~_Yx zFm*58^W^9a(NJ~3q1*dMjPCqQxc(;+afYZ2&^n1tJJ6Qt34;KR4nN6PPbQ|vjk5Qx zFitq98q?wNvxCt{#6cUz!{*ddd$3J0)Pul`ILXi`W@XIdmvq=KqMiW%i#s>fb#d@( z*>CJCkEB%3r9@m$`P#B?I%ntfK4&V6yvEc@Yae}q8#f#UgivPAeb`)?zsK>a~ zK?UtWIc^#Jzf97-Js!n5UbU=U1g0?nlK8QYWTnHl(nIcDpKx~1SuM`+2a4@X6F&&8 zR+Hp2P2($qYuS?zM zy_TwB!_XUSu8Zhs`|)YdEbrqP;G~2pAGfvg1D|jfQ^tXbgRo=8X`w>+$FH@+&JpEa z=@$YK_2q0D7DjPX-nqskgP>L`HoC%P-9g`wDL*YCa_9tDYtZjW_m`2zFHhT|wTO(c z?M#hZK@rC?$KaaZ_Osq@ye;_#u_gL@A=&5C5TWJiHK!08`RO4#?_x$@iK1&^U$nM! zgBs0>#1#@{3 zK&^&vkgonmD|QebF%5H@W)`6|7{#ow32AYhWKXZTAXmFBjt$ zF=9txq>{ck2%kBuFn7|JWTJE%w=|^|erS;KUX1DHRXGY`htb&*dO7w#UiOA9kz}fN zCRmogS&ZYsg9w0C!;~Bf&ovKQPuQoU*Lu)~fQKfVH3IsbV5q}y=cl~qytLxyIum=#vKZvwtoUq6=m!_sAC z`&&ZH+r@m*!CeqIl?uJi{5jylNWqIq3qdlR_2Mh*rFYg#9nCjSC9nm>`fp8znwvUxTrn`9YJ zA*rbn9*JZJAaMc-cCheLC?QRJ9d_(FEQo5=7w;J4$ByGZPHFmG_AF`jRM(NmfAZbF z_leT=B@r+GA_^_oUewNTKe*mTL*)1GKaFFJy*B?l+Zj#5=M^7HqU8!;v3a_u&uxB0 zy6UM-MP`>CjRe6jF2DSrVZ&`oq2`P7M&UO;9Sx7T8;Y0Jcr%!T?vm3i$nxBXCIU}B z|58L? zPKp>eBjq9SnS44jmLE0dsap>TtL4*+B%aTwuv-hi6=rxnkacn3S1}j-5z2oO2 z;gZF6wTxi%mnW{-pZ+RwYx9HTwg<^VbZ_=S>XDA9nOpw~{>09h2c82&55a0sxnaRw z@1^xS$`er&GG3Eb^&|Mm*_y25$ZvAF92XJOp=ig8Tm`bp8ueJRC8 z+vPbOn$0Zsq?;5H9|vM0+yLZ7(vHx>l-@^JU@9FWXn-+sJ!#GwnbV^bt8Pi5PkW`G z#f#7u3@=q8s)&4GpZU`(`@A8(7O&8dEV~t(7f(w>%LtX4dL1W~o9NfY*tp)O?_}&h zofoK4CZlE1+CdthIH^4^zB^*WYD3K07(G07bjN2W zyO;FR3oRHcs(XQ_S>L8(mO*3HrQLe9(TsF7CRF$wD1i{gy1sUP{?@tbg}>9O9y$+J z^eYY?$oS{zl>bPK^2;AW4wk=g9ZtV7E(XEfK)2pZ~jFY2I?wSe9{;KG%IrSE&? zt7YJa-E$+yyqr|9=ZZ+;^5$L}(7t1w5{b#1+mA#a`1StC^Uan$W;HexjD<+LBhaJRN3{aQ zvbbxev!S`|=Z&I^$E9a>-Q%idU|qj*5VMDR+fIB5)!65~pGY0MoZoR1f11L16Cj;N zCTS^Mh;x)QQEzsY(h~97hfqDmSzA&i9C)TYGGBeiiUA92M`b^*I3S z?-Mf6u|F-K0uF{znHSQRn<|m0*mde@eqhTI2(g`?nsS~FxvZl=lh)5-py=o( zmPzaCl~eNxPFEg;=2=9V`p0M~cho{vr)kKbiAt%1O{_tece>#mN9CaWo0xk(Ifvee zVz0*z>7;zBEZWLJ(lL4n3&v`ho#74DIXXHVtTN3YwlpzQ3GJ2L)pQP zqPLjL%Q5=a<~yw7?2^<7+SMMjsa{Flh8kK+E@pP#n3#q(_Ug#1gIn5}1{0|YLv$wv zeq0*CDHR@A+z+TmS!A>+%9YS7#UR3LqRCpBd}yNUf<$1%FU2mQGm%3^A0t)kwS;(B zR*j(yqWOV$C|Agdc5c5H3pJF8p+dIn#QOM4JdN(CW|u0VwA0MtY2`NYJ?%2ve#3QJ z>^PG}V;l!QS*nPXrZL4$+s)eV^p!b4XuVtR_pl-Kc+Qi5Z55pRRn>hWM1GCP@W&N- zQ9nNR?q?cLEIS4ZksMe;Z;LWYL1Y1Bz9ph4O7AH=-?fSVOCED&+JR)=^El<0cTIFZ#Xh;<&jjDORQi>hSt)(1?&nyUg{ z!|bLU>vP)Tv%y;LsVrOAm&1+A70u1*qQT$BrTJ;b%DS47qO{qx=0D~N;o0tvMW)Ljx_Nyt^KvsZ(KD@fSuW|GEw+OKXLk zRx>=>HhJz?b zBx#V?w)U^nj)jLEku!i7V+KWDzCBPOt?cAwsC0W(C^u=es~?l){|*K za8z_6M`G^%idn@Z#zO~yM=SV`-AySJ<5rPZcZ*Z6!s|4CGj@- zc7`JbYECCs;P(8|hM31^p>vHnvkr!f@)ivLWHm-c31&A%9RwBydcK?-zP&kOO|7uy z@&eahI{;|M;OoG9#g5Z~j)egA!nA$&+A5hDCf8R6n#ohzW&~%`{058^ob=^$$iWc9 z39xzcWBE7imcgrGkqXw4yJu*ba*gbFwolYuC}ypcc&NCYy)otrMSo~8I zUto)Our#Jdt%ytltrySABlCMGUOn0@iZ$1{!qCO1Okp66Iib{RhK87G&zPx>nFYwc zH_FYhK;4Qym6uwV(Yr(F&eLN4~HOpiRiAXj>UBvs!pm`$Ft(1zTsr2xc;aw1tW z!O_f9U}N>vVnqbrPUzhhnwkab?NQA)#Zrz&3obx8tvrEYXpf-dn)mN1eM5)^X!z@y zMPsGeir;w`bed%?0;t$EedwoNg7Vy}7G(8cL=f$#GRzhH(uh6qS>3K$^saTXVr z&2FcgHaME$1ZJUvj@Oz{@dHM?e4uH9tcS()M2{n5y#zn7DwM1+O@iTB;%;R+g>dx)Gx9BxG-x_O`w}>E00pu zF17uOm|VNM>WT2$hhisp=_=Cs9mQ_dNw?0Pi4xlssI~`XB;Tz2gGyF_p8m9?z$fcL zDb}sw9>@^;2fE*qm=+egD)!LlUrSY;#PkOdf->i#P7FTue`w3GZ=W3E7*_8-9$azK zOB7T2LS{|%8si{gamPa6GmO9V8gKTRIWftTz4|NYU`Fj=2M+3j&sIB-7f&JLL`(x( zZ+Nl`u*V#!1HodWWvU7}5*)ikhAfZaa=}qit_NU)ARs^v5`cgYo&{zBtVyZ{-%;it z#L8-q*=Da{AJ}NVN@K7`4p2$}#)C&fQbABBkVGO`YoeEE$TY_>joTS|*YbwC9B|TBG|C z@m5PR#5AsV8^>$2PQRKxtMov9v$-dIkR;LS^*NjVdhsiK{wwmA(VJn?+hP8jT+lq+ zcwv~1E~Lv9iz#KNk_$i8xtwq=40Ts|3Xa&q~nE z8D(7Pjp>F`+mTA6V@#dGp2@G;Dv4lzg@hHyQM&htj$EG!_j3RoA^~VNXu6mo9rC~s zPUXw-mXES1%pYli8MR&A1|uUw7Od?T*ZsJ*)P+>GHD1AS-;G?=QoBONupJ2CO$0J@ zGL%I2{Nlim{WpAvJpFw`nGXJSN3AMMI6Ty(POqu_r$HeUmqLLo#nbL(qV z-PD3^#Nw$?Acz5oSge0)HDERYHXP}7pz-^_B#@h-;{&meX8m|4zz>2f)rL5>A9p#a8RED zOAHO7xEY#3_#nd;QVw!-!$BNu)b9B$x z%_%#H;j#reo-xf|Zf(Fs<=dwCQ+LHql>v#doXU$3(-DU17m3{oPKtsBM}83QE*S?H zI2O99Yho&EJ<9E=>Rd9Vpyz*bOfmW7v0p0fJt}Kr6ct!PW{;9hqaqHvEvk8U9`gB2 z@HWi_eVV|URcT)fso=saTUW__ZxtShJ&qi{l_S8PC*RERB0nJ4?S`rr_x5{2EF%zd zLylwu*p+%5q1tB-ISE43|IM0c&%Log_exEOSs3CUq!A1*CgfhG7~21E@C8Z8^+= zz6vqF%p{aWUoVo}d6llZ zDH0#qoUzG{Eyp=Z7K$b0&Z>P&boLB=<)yZ9`J@uJM>%3$>ah{z{`mNkd|7Z(>!^e!>EQIfg80aY40^mzi!7?>@#yU z1BKsXx!&ibpK`&rE?ZS9Ur3#_gT|(`6(q7$qf?DN>$bZF8y7rWlsb)@=+pEmBEBDn^)|%S$x}?r^UFrIz#EQ?)?@ChbXGiq9MBqbQ^8UZv-q?K`~1z!UJnGw*w)?fX_X^;)iy zbfC*;NeubGIYWIg?!P~`QSu#%|6Vz=Zs$F4+y3y8@0q5q-UqS}LyJ(kku!e26t%%4 zkFsXvN5S@e{YvSS*>R0O@+?7~Tc0sV)~H8jlZgR=tv1UKp50{p=LhRG2{E|vh-0(+ z#g&ruSp_WfqT_8-CvpbsnS37!Q%u)6&$c7?Dx$&EE&Isr&=;R~zP~Bzco8l(5q{7x z^n&aK!F68`_0iBKjlbO$rE#d z?TUUA-p%1851%`v*4QII*#3_NvGWxatSl*mq9lv?x@7_Wn@`o(Ht!wWr1y^wZ*apiLcXSR){@yZ z?FS`4DY<$(+rB*bp=+45GH}`8CgIZ~%Rt8HT6*P+_7koudSRVURebBDAbASA%92+X zg2LHqhfhr#?hGoM_{;u!ds|0her#l@SKxn%IedQIMqJ#>`9oqLKkTA&N$C4+y(xpE z!R=3a;1O2ckKTQFtM=q`L$-CdALMG1^6n1>gg2lY*#-$b4Mb({4Et&fJ{63u$yL_A z1wt+D?UQ!zljyrOP0eixX4g6AnpEQ1D$(t!@3YL}L=E-ThE#vo^2FJSnq13nq<7QP z=}}(&uPuK*OXp8hpPVlhcqtcIQvz4H-(6y;nXzRWj?1;t9~)tBqTE|$?;V2V>IK+- zy`JQhT1j4s;Pt>O2b$xqy7O>-DzWV!k8ueBS3iw7E5%qC$X^LIRkWdb9#c`iJ`{pc z1^Q@FM`zu`)e=3|9VPjLM~CcnY*8e!TsbO9D_w4dKP5@!EOH2$wagsy)V`yE{1W~zzL)Wc;_m&0GWzrSwQ z$&M*sjS@j+n3D04N6geoO*{RT9KF1JBji(i^WR zv5`8Gd@11UjjX+wI@T3!hu3%Gw7{97?!xL^^S-n95qe(gGiDD!0(J_uE}>7E7Ri%`_)$#Zk(imuXP@GcVFt)BZS! z(%r^KpT-{bp}$+Uw7qFj(rUJ?mDq3QmM-BuQ6X8(+h@!$6~E?5maC_y5$rPdocZwq zbMg#DkC%^$@*EBTgdEeN_#GtREetG>qGs2glT;jnx>P@BuW_qMZt?}=IKkPz#yjhA zv{vqPG2F6AtUsE_l*M!es~IbJUseeBq2rwOY*NJ;J;5>}T))Hah0MK)l^R`i7t2bG z%reiAh^c^&-l8tJV+mCT`)SR^xSb3bJ2yOz=(}3c$B-rYs}*j)qxSncUOu};v@fiO z=$~51j`dB-jV)nYRV`#D;4)(mZ+;4VuH9mZ2CFS_U2JH2k5d{f1Kt+1jpa1b?apKG zq>EjZE@Hs0s8tKRO$C%Md$6!__mkQItGnbI zu>eeF%GEh?qaKARBPOc314)0Ed(bdbBT z(KxAS=O~6!^5AA(}Ju;_ndEayCyXE?J#Li8MbkUlt7LWj#;^w8J*7lisTB&=? z_lHLCQ=|RIT--e**O;^tLtF|Z@C~E*ioR0QUgM%lIp!GCtR`Np;()5m@vzEO=wjZ^ znOC{E8Lb=6KMJe`G{RonSj_f}yt=aGTsQwGz6zf`+ULTs>HZ^O7m)?(Sz+dPN}lHa z%PYM5i(7$L z@6{Y^1}{i%dOPX9>Pr6k>7T96?!t%K6^44fzQ*$cj;ul3+QIqD2H=~bLD4O8F-n)*C={uJsS15?$=8!TS=}@rr&5v{lf3yLRzX^Y2Wn}aSWN3rlV$~ zLH<57spQIcy;?ST=$xsqt&SAhxMX`N{&nGJBM@KuC@6P_p~{( zs%hs$Y=U&urILK$w$ja`@K%1PT~A$AcA>Gz_pQvOekHk-eeQp*&^#XQe|xZaRc=z= zzxHo=HAt$uLW?HyWw#YeSy+e42B65bccUv@W=&~MIN3Nlk$W#)cMlh?2AuJ~NwbMBOH zL)Sn|{`b3z_x?WbcTl(*TRe*~RcR1+E@omJ4D?Xu@5lj(l+O zJy;<3#>+o)?-@RNneoT-CJVjhu8gj1M4*yL}#S-s64Xp&ZaIH05hsZ__Vx(xVt66{Er~XTQ^^vIuom zGyUds?o+*-S6!NN16eZNasvBi;j2=X*5q%+2PWP2p^laM+RNmxI=`mn6=nV$Xu4v# z--aBrXZp;6+01QvbW3${=W_p#MiHND^2tH9eE6Qp6MvV#cNlLTd}Hz2p&#+BINTdsGZ_=rGxC173@do-x$F0Z z+<(vn-4J=F$SijiMq@&IB&G)rwnO<`?0?WbCc!|wWsA#P zLb11me-XDw(hC}?aegdunA!29Tz9_@XdzV8Nq^MlT^&Fk9qHyc*5D_?EXWUfWk!BK z%m-3V_4OGF=ZeY3lOwdOj|{o2?Q%*pJZ~%2p_4Vfx`g>?R9xY#oYa4L`bVIVe+3~I z-I8U`>Wt*Q$9$WCl4NVB;Z9WBKgDfJw`RvUe(Kt3Ul-fsSAi6v?Od;PHDyp4PF z>mTY3Wl!OIsyv*nSjLRL>dR|2{k4e;sJC^w=DL*a&B#Gj_#LHF?~AJ=q~AH9#>hFX!B=8tekpn zMWaHtpi15OLV}t2FU-AT;oRnXS-dz^m1a(wx%1A#oxd(UbJsYly0PcOo{US`VRkR> zSR3~hKW3qI$frz`_oO+m+YcTr!|g3Pv21_AH^ZZjGUk?ml&B5muy5mnJ4qC~-bE&i;H$y9K{M?e5ZnKconcSQ|`0 zJjURv&bGdc6Mdg!Qo!Wde_^akj@SP6E$y;@YM@m&`|h;TzUiN>L*Bg4k<^KkM`m}Q z?xZxINU_(Gu9|9TCKYvJiqD?l-5}J=CI6S8=}Sh3C3_CtEk)UgFVxEP~mf5 z`AX4le`O``yh|5$XmTVKoPUs-{vHt4{j|6pyh8YaJ=JMB@W+S=A@ zCa&Ie;Y46R>o*kb)6ml{*|?;~@fL;1-ddLP^jy9j(6C*)s438HU$D(C$-Q-@UmRCt z(qK1O#jPF@Q%onI zp>}ZFidX5=%QeGw+>C|vh`WJR(z>Lau-oKd7{yXWLH zAFERhhs_#255|~FvaiW%W{tc4ahiqYzRUb@YyLmev$MQr(;7|435l5CSI-j{-3$_8 zVS?Nv&mc2}W4A9GmrW!tpSwSzTsb6H7q0OwNxypi!sYl+FY#25pofS5W7dqndK0@R zc^cO!A7HM%DD$|TeE!8=`KN3uSoDqX`0J+PagTK0r7lNlDqL;%&bzNO;u+G?<9BIy zHVr?;7`jF6R46_fi=uMWhBi*ho?HC&(Xi)?%P;5JaF2f4twy&`ctbAQ9zw8J9&UoEncU5HFRXWRZUj(fCETGwmC!@paFZKPxB_l%LF zTN{U-?RpJKbN z+1}au{UXp0yH9Fk*25fOY2#=U^ZZ=E4?3KCI_!J8dat13+S=gr;76C9pI=)A4F*nM z_<7UBer2Ql@AT;_Lrob^znoJ)`uWn9=)cDfdj_|T7D4pv*I(wD{OJqck&e+u8LwR#C^5_v5K}u9qw9w_Oy8sK#*Alr zDz2gJZDatyB)SRuvXx2nQ+Fpjk=CR}uq@>s%-lStEADh42u}4=A=4nHO|WJ>lj}5{bb%^(aKtgSfP|3 z(!rG{Ye`WpC9CF&);Qc1#TQl#pR4eK9A9n$UeB z5_*7JARwaqxac7wQ;UEtqKk`Yg_ua}8#-o;k0$%0YJ~tGBA(dCi=o3Kgi`7qGR?B`VMJu{s5qMskN*D* z5$7TCa=b(}fP*wih=>wJcvPbhu}|*QsD*iYli|R0_pw<|Uem zs74~Hn~G|oi6?>J#avV$5e+2s*d}vMuWJFS%Fr5Q2NC;}hUqzjnWB~roGBa3DC?O) zk0_P(&Y;ip5#Z*$1JJ!eqsY!C^w2zt4WJiv@w-A4Nu*TxT!y0 z62QC6w@sL7DrOwdMfhW;=Oy}t=r~X)q&feKbuKs$PCeFWycwr2NN(Z5&fEk=_ZN?0x%I3Sj>_DD?=Ktej$_stdtu< z)Fe@y%4hs6KED@PIKV|y^|5_KY|Ctx5_zBhJhZ%t+XceR0c-~y^SKt|REn7tqH3EE z&P4HXDhAL%;z;GL;}?!#VP+?9(uc8(v)EVqSe;U2hEVFre2sxO$s2@x!~;YYR9O?; zpD+G;UQ$Gt6bb)li;Q8p*Xoe1O_<^)Xsp`>00F7NOCVDv?h%p5-7-5ZVaJ82=Xi-$ z61o)+xF6^q8hS=pH#?6irXrYJ)HEMcD}?3U(mzClp31x_J|?vVYEdn{X}SaZR%C}= zuxmV}SES>8a?TVM3qp3$(0nYq3xvEtL@f~|-wLJrXyWBG@dCQIZx_6Vj;V=*sfgd{ zyNQ4|XTZoPutr-gfZEWcJ1dx)Nw7E?Ktv!Y{Hy*nh}jF+3v9z{A>E&Z0mzTHGzn*X zY63Vlm=9t4^uEaTPh|Ds<0QD(hpjT?=RL&IJ{f5-tO)~7{VuCXzEJ3i-LoKd%^N&(9PSD|JXz;O* z@blFEV_d|E`TmHe{@~62NMZlj5aJBBJhBXNMkp0RYBXBS_vXWsu&5Px^b?S{4-fGS zphI|=CVEdaDDyZ7QQd`}p-Z@ltR(~2`gV`fTn?^?FMvh|PqlWN9^8*uKI*t(GhSJy7R?i>DBbq?feS9=G4wf{JekFW- zms{swayhOEeil%FIuS)w=A+}-yL1n$HK_?2^Gb&Ve;x%ePa-a=h$cROMI+s@>(SEN z30Ry`=7S-44exm;7j|L|MvjJVqdX|eh{7>ovZ^*+OgfP7Y4W61KV7rKD5f61@52`dI(NW2@ys!*xCYIwX8MWA-EpHmjf0yth=qrNF3c=v-c7H4VfYU=ie3l?E%=fHye42yJFhO-i~CTYeHq5wGkoRm&j}QY-;luU zT`5#Dz2#L}(ES>LZ63WQ1{#dPhCT#=Q7w-JI1vdr7vp@i&ET6r(smOnTuiy<-n`4&h2@QM zeV?6^VYusB&G}fu3^-T*%(WLisuM>^@F$+wNYXsSbU}LC0!$=1PZ+x}ZK2Y6@Nvt! z)Dx<>n%B$I8z_JJ9cGls1&_VLR?LZr_0X}`;NWEuf9mFu*D^cU17aWv zkQmVBV2brs*T5a5a=b@jKL1V({{b5`fsZ#yl}BexZAvjfo87NRM&nHa zfEFzpq?#ta%LLUR`E}3(JNZ<+22?$HQQ9yi7v_zKYeIC576Ry)&@DM76$ogDHb+l) zf#BX)l*OlSn;7_^lFR7)@4vmU2`2v$+ZdBsT6rfP_dY}ygH}g)m>2UHIjPC3Tfj6U zVt|BsPG3|LRq3yyRE}enRe&tPr7|&45D~KnhgLa`Uev1+^S@R-eB(cNOfmisK`7O~ zC1rs{E#MKEbj-b5pSpKqyM^MlM5fR4QTXay_2{!=8hXPHQ%{8Xxf#~&`{qo-^!&wi zb2+WVTjM;W1{M0lX7YCFg@YnwCaiGH<=$V^Fv6sRMrZCf&t!X}Y#h6jb_aa2aMm`bQC#=c{K8p(k`NojVEG`B=O@vzN{aQ1Q$45P3mu28V*}LW2wTBU%@Tp_F`2vfccGuo?D1*g z?OMt%m>MyB9eM0YBUP6iLLo|PA@>~8B7}8@D*aeY!^nAG`zQD|?37Qte2?wN;F(Qx zP=~mP*WWwJZjI3d9dIfK#6VLA z#gIfEUSSIU_i=fGvaRurcjF5KDkrupg()*?u8QM)&&U7VHL*4G{&9+LeHzbL^^0U( zVcQs7MW;pOxk%+`TQTfWcx4G3t2@`FiI6*j>t*9vyWyq#sD2Dw18?RNQbOdf<m+aWB$6NXVb!U%}Su%ekVLKR0g1SstQvA;?3`O%KQ!ICFrj|9@#$< zX{LI@_}Y6V8_|q)aSEfS=Fy3^e0E8|c`+BoponIt20~f!qQv3IzigF~UlZxw{ ziSWCZj*0Sg=1x{FtRJz_R0_}TdzWU&NIh2a+jBZZ>h3RG7HNNMDc}^9al5{Cl=EL@ z=x}nuncX))_L+KrPc3pZBuy$ghEoI&zsKt7s@cYs1kLpN86vDHs7OgC(=L0Vrc|Me zYgKK==Q5)<%Vw-}{Gs~^s5J1q&?|=$;`?KA{T;6|PJh~+G&1JUonU0U| zRbs0a@_7j*!C8OX-8QSA{~yBcGc4);e*->y2qxZf)VHw5+tO%Z699tt@VS|0nnT?B*fI!S@m0_4&Nd*BPBO zv7x5G1nDZm=j7=_Fa$oHQ5$?=XMlv(}ftAr;i{ zB()P$Rb*OX$))8Mzlm{g)9+N41)u|Ukj6Cr3zQE90%Z!`jpS|59rPHfo;veVA6r<6 z%DR^!E{j9f>W~&zkMLpqUkx@bbfmn?5%Z-icsWdPYnj(PXt& z9KS&A6T&GQJP}9X(@QM8GIbs030c;G9{a2p#R?^O%-(gbs{SIKu(hk`)*K+rI;zk* zkdE~Ju!IS;7<$T2_3jG{DKRrkQa{CInUqYOGcCXK_OAy%eTjt;M6qqyHsw*={kW$* zwj-xXv%F0*Kf!Xum)9L2c^6I}%mmmy_Pzj>xpJ0b#r}W4#&T?r$F9lUW33aO{U-s5eIWr0LHaYA$mk(m<`{1`r0SV zcE({+|FFO@sEn`#d|zG<(Vyg_$g$dy^3YL}E{BlIYBVN__(&6(*S>@OG8$`!0W zO*^Z?>Xf_WU?mllj@2({4k!uB4))rOX)e{R6KHdOj@ef$m%Rpou;q&o6W7F&9c4*s zr&hcyjsr21TxR8==F!~$x*uH~xUL;?!Kdt1gJtT%0h9akfW#pWR3v{;eZh0Yy+8Uz zh&&gS&k7(OmDSuni1ymBii5kPHFKPn`3QC_8Cs*lH?vs6JU=k9tu92t575%Zf97d? zh^{_`S5nWR4l1Qfm2_tLuw$P`o%)u_-LiM(3^ z<4KmF>drEgY+feCaP%P_X8H>c-x7sxbPtq|+Ua``j1;Mny@RQd7}%C@vc^r)uHapd z>SU_jOgvXGoTUFrDd6jVd3lYu8EJKQ5?7^C2=%etW>ZXUNk1baYI`KZKN~OeLvEkD zQ~h*m*+e$v_zEz*7x<~j!-%IkfFHHr@JA^l*zj=RuOgSpDolpgZk1LH9r+z!uKMez z&eAwbk52(>6I)^S|6^xF6iUo_gDk{ZqYwmok3-SAjnN#wN&P3q-+lY9N1Bme@! zep5L9l$WY?v|)VRILzfPVmUaf(7?oo6YV?{JW31c+WQ$sB)lA0vkWm)^RNjIl!0u= zcX{T9O>>g-h8C=!v(3|~&O!SOb^Y(WwbFR65Whm!=pe8NYVW8O?vY9~nwQxj8}x=- zceVaKGjZ^P{mp{>#AiS6C;da{W#xxS8HE5ZPG^EbAo?kmErf6k5#dBu%-pp^D$9L> z?nHzoeC8GS6(PKC^hFiriOTb+@PAT^4)b8~v?4a1lNW_hZYshy6&)lM<*a}Sd30P9 z-CBgspkBVfEi{x<0uEAAtP`8*%&3Ksi9AlTAm?3k_9dZh1$i9iAR&;CKTj3#35q#NgmIALN@36%OHKeE2v#}p+spAtL@3;XXQzuguRdW( zk_ugeA*3~i6^=W$R^&m1W%0^p#$oYM@?lL72>{C_A`TMa2S_BfG7>{n4woaljUxrP zqAcD?rv{jo5#fC{d-*Us0tXEPKqmc52!I<-_!^(QLaYKZqNR0m@}hiYCXB>#(}5ot zFHZoeGW?33jbk$871;u1xUMgV^Sd@_t&TX_wt1^f!bbQ(G&trxbUbIB$rv{chqzyI4fefRrx_V9atuP$8 zU^3`T!5SodMOYwpHGwcgdoLUm!5Lim0TGN`)t)TvD_G%{86f;E!0`fDMy&Z>9+Lws z+`8swqTm#+k0AN6seHEP5NtnyZA@d}SHSVTXA)HWhSK5L?WhGj=eIs5p`D#dVSiX+ zS~xQX^bvfECW94_-iu6IJU zsFrf$2B+x7IO*_;wG6XxiEm!CpE7|}t4W9x`RWbN@CgDbik&TjX{l+(BN%1~hP$&D z3+qSWu!fTZZ}LVS#%*WFiH|%s1;^U50=o4R-gmggVugWv2~oVs2<_dNMMK4xg*I?R zvScD8jK+%Mv!BUnqg&oA%8kR!QgjCVFqIuWE+YPf=Mmv)h$4S5EE6j}h})jE25u!V zjByYO-&jDz1c~ljFg7W?+W|^E;jaaYV8D*%$PI(10n z$-c}|2SFifA9EYW#1CP*I?Rk0vE`}3?rh@L96~K0FV{buh{rO8lNNsd1 zCc0fjt51T6e11Xj#V*p2r2w2DW&f6-Q#q`^cNsgq;ZOa<**KUiiX@K9K*a|l#{$E4 z#ofymdQq?}cV3k{s7cNW%^M*C<`lb~@69xYm1k~0U=QCTfW~uJp#tbsSA^>+k$HcZ zTW0BVE_@G%bzpe#cN~0?3O~B&ay}EjXAoBU1ndcb{ELNFzl8)6pq}HUt{lde@tdv! z@D^gM+?MhgFUhZhX<4%Daw@HxWRf&3m=keK-^#<&*~8_{hU^Fq zbF1?;YXsPG4Q|QD+fhNO>;AWaO}on(rnn|ksm_hJjrX_gWbqm9;z#>Vb!Hl?dar?< zNt;PnsGOlbhzL2n23KAu(bPhWRs%A|S-x)zW!stHl@c#U0rP4@nHxiH-q-s?^-^{_ zZ+hiL5$oB`PqBgG;{lUa$A;QI)=ZCfqT5Q znH7y-_fJM$r%i9sVrTv?di9f)%8jbL5Vjc;(|oF!85Oh0g{R@56XddK5*(e(NfF6c zci|q}+ae^a_^9HbORUv;*82m{54#I>5x5uu>*on*^cwuXDHi$0tqB1;1TQ&D1?h3< zTS(AUDeV5ZVl+nUSN}cz|3(ZH`{GvFauGQs6|%Z+z`_ER*5FEbpb|gH>~s=K^wP5K zrBx781<#oMeGhFBjtv2Pi zGF6Cg=bwh(18U#P$(#PT;hxL(qMW?bH+MsD!5mO90*FV@U@HZ<6~^Da482$TGSR+j zrdP_Wwh&keBADy|qJlWF5d*1V1IDosaRIj|cP&JQG3+{} z?cV_8={%chzF9^pJVS&|_&c94Fw;w&V3IgW3%+bTEM2(xBC05R?v7?@Nh9{u;ds`v z`0Rja4vqRbsU|vvsw=k5<5EFz;XO1c2ug!6YFwhQp@j+0q~tg*>F} zBwCe=3_ltuzhM^j!=`8XY~D=J#j+0THEuM z434GbFmOODHPe<2u4*sb#)0lZ6d4bZStQ$M{VVk}_=55NS9{)Oa$vG+i8^BqImEpE z_i68YPw$`l$&87rI#KujzdeCc%*W0F%-ED%vtp++PO9HINp||0ua8(LCH8JZHU+7i zpfmTEm&_`Ltep{#!oF%UnDD?fjXDfGa>6rmLsJ($|=K`W*DMnJt%`!k!$H?}8X zq;Y{c4idQtaiQ*CIbG~DhHNuu#f`&mwPv}UnUh}KYq}v_*0=U01@ZOPtxmV}r)7`V z_O=zf1=N53!`{Y${j>NEa4q-1@K<4wi`$8t+t$CLtp4k-doPgQ2_3J=4MIj_eeLX# z8V5tF34VIc-du%2)fg_OOhYVg+n(76dI&5V@S}lF32fe=GNXAlBBRL&Y-Oy zY=`}APGab+!I9$tL)oXsMOs1XQPvtjv6m*lZuNB@Wh?@2UjLr+(TXUPUwUsOkYk?C z-D40@d9tv|io$%!oZX0uBmo=aS%00A+mGSpUWoK>n> zr0taRVdazIIM=}CN+NjtVGniX(S@)dS^U92>b2XuLxqUvY zl-FWaa4zk=%jXx@B5w9X{Ej@a`)cNjI_TrKZ8G{I>Y?_ukj(K>5{Z#@)HK2E*L4dJf~j`>J20%jPqXItgFJY7RU6yUWc7 zGKOX%bW#rTB1O!Db_PB>-We~J}rlNZjBh)Zh4kEgxR62==Fy5B%}MDbBRS4 zO!ug0YD7Cot&6Q&BKdrWzypAa^o{|N#__ybgA%ju4=Q)9>bzC&*@uRPVH}@QXSO@% zoiH?XeUl$nX0%c5oaC^QaZeTbzwGd-@~cmdyeFm9ej0VBB#U2|GB5A0@rCan#=BW( ze%ax;IWcUz<3j>y)IDXr8y-SgQO2;c+UBMl1-#yw>x!2N!7i@>cBN4fhgd<*Df7I# z_{|kj0UO=_s-_?FI}-Nzyz9>0$lAuf<^SAET$rx|V*bvA22>XJD7xJYRDB?EYXFq8(qK>sEJtyPgr` zstg~mmXNX{>I5$+c4%`pC3~p7%dr-F`NP4lz{|Zy{(Q^NqJ_Y6=zW9t0zRo-o=P6; zt4Wz?^9a~(I9pfs!aZr|6PlHr^Uh)synP5`5r(Wyawv8>cOu{o|4XUGd*`i&eP*34 z+jJ z_Fn07^AiO|rbKyOeZGi(lQ^;|^6&;bXJvH7F>@~&PyW_Ark-o@O7opS`89R6W?Q)7 z0gBF;Q(0B{ezd}dv+qhG4Zd_1VC3DqfQL@UyD$d=J#|+7Vv#ML!#2cjYO zP+9un?a_MOM^rraLv|wzcoNN%BUP^xhjh6#5bm0UWxc!?;h8sjrS585W~!m?``{>F z#jv{E@{7991#EM*Pi*rH;EsOKAZg`a?03rQaTL-Y)+@Qa{d*+_H@xV+^#Y(=&C=^7qUPP8W|8!@--|i6y&JCL zJNGkb@dhYR^!Kdo6DsmiZ+~(2A9z)t6afZ-T85E z!(}4Nd>yQqf*8`$yKtKuz6?VC1pGx0a(5<*(M|pDB5YDGTlQzDbO>NOo{6gq?hZ%6 z8LwTPA6-bRl_jZ^;7d$B%!+Jv~zyJ?{Au7#TyyoCZa}dA=Puq z_*vXAZ`2MxsGgY=X8lk!Y_ILx$)z5!zDpbR4RJ2d)u=M`S}5iA78~6Pw(A=9v#@iu zVVbQ2v9y)pkaOa4>7TPE<>mpaF=qDWi8I;AS0&L!P0$qy4A{GpMv7ZL4v%-E7g8A43V@)@E26;#xt}Euu{K28@^sL?5I=9VE(1jifi3 zG7Q}}=FC`mdN>HJaX-bX^cTyO_;&Bmc3&rk$8%|Ii);BB96u>6dbiHd9!883t3Opn zqhBf&ra~-7eUst;pe)euD39 zwl$Gs;kA?!Pk{C)UcV9{iNHT<0O?@oRQKl>>b%a2z0m&|b3l@$kgV!=oFNaXiO4)q zwgX=2EN;v=Q|B&mWFucP9y}l(lo2Mh+WWBnhnV{4!w(A8ZGoBlhGTGlWsY-6AN==r zHlu=g!&WU-ih0B%r8PGuT(2!fx}>O+v3w_7T>O@RaDg z2S#Lni_TljHdm&9t&K@aQtYG->E5Q1Gv6R!M2keI3?atL`otkSy3jt3pa#hsc~o(I zw?Y=Jkaes4qaBeKDZ|krFk1$uZ1I~;-j@GT5`I7ac>G82Mvv9K0A_08&9z-K7p-)z zuna}wO9A#8(IcC(TuvcWZ?`;}v+8BLwu{vw3;YM;rf+>*5@@8*3Q3a6Ut5VyySMnm zbe6~29&!l3iwLu?lj~6VWFRbx2C|?%IY>hAXh=STn?K}C@wBx+9dy`hG8}Mg!VI z0P8P6Ul&2COb~&DIpPIv_s}3QVvQd8M92#5-8|(yjdiX(&Q0d;rKScAbYh9I?H2k zkr2X#(eX8QR?FlGYhTJp2T(Xn3%jVpKg^ox6!C@ zhH&4}VhGP;SQ*XRdCl56gKgHywiFQB^%UCC3o<+Cj)=3$1)8kPDjlrz<-Q{EJU0?Z z@?t2n_343gI}4T)hjP7lQDmNOAw#y5ILlhH8Mcr>ql4GM=yZsqBFRy~3xI$Cm-&A7 z8-VC!fMn6#$msa2hv6{~jd70+A3WquXh!SvR(BcJe91&R+CoUbm2$>YQdR0APr!S= zVuDZQg7F#pcO_M-|I4zIOzq=#N~bnh?JAFZ=Ik|O5d{L?0(SCWbh|YA(5i8SR$O4K z?gAaRJ`_<+-`N4w5_!Boby`bA*SbsBuSkj3VFt#i%D?L0-qDb%-g)>Y}!j?jdWl&opL;r(VFFXLT z9DN!CKo80&iZLW&ZlyJ^(X4g|KM@?mJ4@mh-ZD0V@BvE8a-V5XuOEo_I%(cMxT%*M z_D19`5n(ghpcbcq1QE%|$WuEx$A>+G0eC+e(-Z-5{bF`@l;&OKyBRO?mW-wC$i>X& zZO@+)w(96+0B!DqC(Znw?a69XUOpYD)-DSAe(sB~c+0m)`3vnPDp`{seq2OO#3gaQWwLv>=&-pO7m^p+$p%glJ-b@hN{%5!_j`v`&{UGbnIi2TRX3QcAl14;8nO*7~RwciomggQ2@ zApRCw<!$hU zfn6=kYJ-eweT;Gq&Rq#jc1$w6&c1m4btpYaLKnCC1U~mAv>tpU)`7@8uPo2DUnVOL zzmU^AxP`RS#~CwJPx=4TmZV#gI;4|)F6p?6ui6TU^U^)_HRF~?XhgeDEo2m?*Vgvh zU63L;{^74sACCpwNMH6i1C69+skpH2Z>^->*JYnZqC}p~kopk%uq+DI5))ARIYTQ~ zLl{c zRIn2zu{)(bc0b*q*Nh$&Z{Ss1tXJ9dmmN*`_82E za|!c>b|J~gTZ6z5X?2(f?tPhLp&KVANYw(URR-aerB5aw_^?4dnyTOGDI?F7H_%_p z92nWs)eCsT%K8O4<9iNhYB}5`xKKHXOFVs9ez7p4ME@X=mD?G+)H*M@dX-Lh$|t-T zz`?^dwFb#zO&obC$Mc(lAa?MQZXEDtTzuj>=wd|X<+6|Y7V?Ev54AuK<$kf9=+5t; zuwQPgu2tddZU2F-KbDRJY6K6V}@ z5P#tpt3rb~k29x`--1lviOoAR=+?{KT_%yK zkR3vey&hTshUPvZ?jPxpk7uBu#V#4B7R>!yXK^%JkM9# z#q0a8#3x@l)<1Vpd(cU3bVJ|yIht5)4q(^_Fr{c+!VD_mDRv=Xoun(of9y=V#d^WJ ziNv9{6CN9xeqn2G=m)9(W!oKe$6ae>j7*-qt@86R91T1yGRhsZzz*3_6o$3-r$eWX zTs=8^qaJ%#%{C?d-eH_p{?UNeY_>DYb|YLQLA61OYT%|E<#4j-2vU2p(rZmgROH2KiH`R<^0#Zw=*Z+`8-pvT*Y1rX1+Z>BN0;jK@HAA;O? zo}1A_>g{3;XF6tmXlJ8{FfL2J^vy$=$FO!<(BwZ+GrX4eaYx89gT%FqyJT;+aZWo= zD&OULmF~G=&PRs$e%bld6_b4!5C8+a4(wmVp1$^}_@$}-oF3CNz-UM&LN`@B3$pu4w$oE6~ZgX`$;SCN>cq*S2X$Nf=^|ava^dEY6u)sAnPP)s}1_L+*Sv z{g*!q{>d9M;f1Uhx+@JXzx8mNKyC^*?SE4)hsA6-51o28bn1Tmgt=DqPp{2bRKjt( z0l#o_zNfB-hvFOnqbn1uP|41I+`vb`rR`)3tfwu1P%rpSdF0RhvU@tS9_Pl%mhyqH z)*e#+{SIe>?yQFvr7-Ft!wjk?6VhQ(E=`-6Nz*5aTZT5q(N%{<3UeZjWzi-JzG%Pa zh7a%c*5OVTLq-|mj*Q{a(HmzA#2Tb|i(#?>u*AC=>zEe?FZ~pE(9^BVbNiu-tbrS6 z#}4~1hibMD25=a+pkfu_XH5%lrtg}LF5TPGQ|=gA5g08e`#KLWJ4p=of3Qut;^9By z#pGYsE3qf{xHwmOx~&hYBUU1+eswlFmbN2`Mu&3Ozv3E7fA82c&Rf0)(2YMu=QMFNCWtO*-%NET2GUoZQ!8H zUggKs0ZHV~jwpsX|9{dQETxUW2mM239L!}9))=w1gI437#Nqwh52#2IZV{Dky|-D{ z&Ih?kxla8R<8S>0wVv{Ztc!zm-Q=4KFV<+++BSe_Zpy_8-zdeT}>C4Yov+>%)HkyZINb<4fD%b$uQ~1?bNV zL)b$Gic1)}{aY5sjyn|%;C$a1E$D}yWtdxd?zBf;N&!!8*rt78>!+i6&%R9ZWx!2D z@X<-Gi?DdFm#1@2p}yMI&)+NmO}Gu#xxHdoq8GeZ1GgC;$S}?wbeEG?Wgw$|D^mYY zCoaXK5Uy+z-pft&EK<_k|4p_t;Q1CgQRzb>PJ2lU6(HZBjtr(yp}zI)kFMy;rX7PaG;zx>E~Y z+sJc(&}}p4uUS6p<=E_fl>hjR&V>e#B8_MDi+kB;Tq<|oYB;!a@N(i<9mXXw)&AunMo;q^y{mql#2ZEgN|6ipbPOcP) z|9_PNjV4>~vtyNb&yxRFDe&j+_N^oCo^0~(ZoAj$967KKXYGB^^nXf0k07Kagl`f3 zGI_k}jPKcPr{qdOL`zT|dj8j|B9TE*N6ChOYwh#h&{Mivp9_7T7VIj3AWN@xJd-O0 z6&c#gmpyLm_@dlZYilv+y0^3mM^;+eXWdRp+GuNM}kWBDZu|9-5$ML>% zGOWPs8L|7_WBV<#*y_2?p^ct(z3g%AYaeT_6f`nlo^!5UME`U8Tl>{Zt3@7lhz~!j z?FOF@cAJJDr~b2d)$qKUEc^W-zuX7om>g@J5&x>|Wu`B!a+W!{^!KrE;13G;KPCac z;Pv}u-%LYuzdif$;&_sWeoQ_-(TU51eI7ftH;m#J8RhA=bICsz^J&L^RLe*lwxl@M zi?;C^Poi#d?fPtq!I5t}()9XLrcoO9_v}sssdJKj zc70oUQWff*%_?(W<_@`gs<^bMMg`SxNU<_=cv*h`;K8$DGQYGpC0y{AC1ZRLfhL+)}Hfc4S-nTyvQ2 zyZTUl12tw?a*4w9MHjZLL@hf8SfWyKrO$P4PcZi6+{JQ@o^<8Dw+bPA=#BZrbG~Nk zG2DcXaBYE1l-i!9wZ|a|tI)i*3dzRUC8am8clWI5^+5u!#+5?Fbyc{^`-2DzklU3Yl+0)$B?h z8O0^3LQ;8pLk@c`810a4$EC)G^t^MgNK1Ru=8IB`zo$5I5h)dGq75Is78E@)L{GfMb@%o?Uyz)fdM2>hO}^3) z-V_>n)LTR0mJ*w* z^h5q3h8y@<25YklCsJlVdS>>aggyW1Q3iL|reSe3RW+ASE#bEs8rT%?%Lzdx4xTmE z75n`9f>HO(^ERr=K&3EGJ->VY)MUJMIrT$x#I>&@&G<}C1Te{a(>EXIUsapNH?-r@(*6C1RGsLpk@=pd(*^~~zIqR87yTc4^ullVCpV*_od^F=E>z=b zZFHNofHW8sTGbqa&tWRr>AVbZ0ex{IOfM(EOdTRdha2JgyPNvgP7f?IZSof0czYG; z{HAS7jwkKb=szsm{?@J1@7cOZwG@sMEPJ1cjR}3WC&{mFk!^O`<+(=34eaut;XRp8 zT2AGBv8NkM#LkeHPQw`eW_Ta3&u*oSO=ck(+Dbkv7MyWD!}?cytAU3zqb})Ph?GBG zP3Dfz&nRTuo=3kjOH}Er zW4&SJ1YKxzI_fvYM1R|87^??Dg4D|urpyTjs-K6SM9eV{cAWdJ{Y`_WcF92}e>xhtI1g%mM>_p8 zI@SUA(8d-lLVyp>?ep7=h)G+k1X|}AZ_%&!*SARg{XxjHEFm8=oNhJ92j0S!g&oy5 z(9G5w_Azq&RQ&d_S>fD{^H#@g0=^p0rf;V+afh=gRgsA{>5uVS_rFWKE@2U}cSa1S zD|)&bUD23<$P`%Ke)}QjubY@e&-YiyB}cBrXjYjUx*)y)pXHOnD-Imussn{Mn z9c8uWb%Dr#MxE-xRbjIYUy2gd3BS~K<=Sn)XmVOlY@}MA;gC1 z&)#ph-AP7;joWzzs%r1wK%YF&$DmtJ*BNjd42ac&vnbGmPD}63jy=fFiLFeQEyw_}0P{>(1JH7oO-TXu}ZLdhcczLtIQtG>o5bQ)QL^(?p0S_IPq0PQC0&?_k-+`mT)mnf}>f ztT5oOw7cVac~z(9_CLN`Hy%GwU*`H41Jpdmj%}*gwGS1Nn&ZhJN>AiQ8~areyo$b#4pCQkF5JQZmkqNfHT0T5hQ2Fc4 zl;yJ!w`_yQKg!6nPmZmaZ_%Hr{P0dL{#eqQ&z(FoK{MSZ`!!Qw5G>!Ws1pwAuV0yb zz4!R%z1Rb$i7(C`KQJA0YA2SgPGiyIPpBP%?}Vi0ZdWAG?8T;_D)n!P8=p5HTLdzW zcqAw;8|&33MzHB&OWU=k&3lc4HT`f221%0#kJ<zM~lxyGihgxG$OuM`$lC%d;N7advibrVA3NkvC-9D`xpzm9B19%tiAV@Iq; zZxN?LM%YxcQXrE;6QrDY1jot+>x1jg2Tdm#3AfpAgU)5cQDvifW#hcENlqaJ5b5-zNUju2 zJD1Pk)EBRo;Xk1$#I&ucxcJ~=zvTEi0rn{t{gk)kRjRT*&FulV!hnvE0nq#K72`a+ z&>u>xJoJPR`(&{KJ5?!@qOv6Aw|Pk*APV#eeTR>|zgVeC$Gjk-Qx?mf04u0QM!{F0 z9eC{BMXp>;kcUNM`6YVuMxQ*uDH<4TSEcb!?DJskKB*dF(l}Rvo7e(r~jf)6a0Z%)wSUHxY*GAahHnnp{ z8W!`m22AH=9mYQ8WAHzf09uM-l3R>Ca@zyDN6-OANOyi#BI1{IL74;Ins z^=Nrae>xb;y1|1h*UH_B|H%wQMR}TjLLN3mz-ke&kCF9NUG<7p_28uwnaY|5Vo-pY z%13p`|3a}qSO)-ehu`qH9z9AsC0j&4pr8drq);}GU65j)24iN4m>Dj50FSy9tZ+gK z&%wj9NLZ@;&;Z1bD8i^qF}_WC8y9|9sIseSXS^I(PMNwFp(1I#X_Be>@c_T0{>oqU1h90bZed5j8BAB}A?A zl(I;Qn&6}4j=~utH1%sa1>a-@L?i@*$uEs_E`|+oywc4wsVui=fcjxc#T|JTQc^KZ z#LRIqPf3jrX$mwvoGVuFf3AX7WC;XbykVjaN*CQQscJ&AqEY>7fhGbL*y_7 z7Ck3Ki{vr|0MR8yzrKN)m!k>*%v~Y(p$OfF$8-?WRDP73bycRhv=8-hU*P?Rh_n-3 z&>k9QK_0m#w#bo(8PcU`l04*$d5TqhKvAGcVTD5UoD|(5gb`n!-SSWg*{sxjQDv8y zSSLiklA;Hs@aSN8w*>P>s`4~dvZTIhjbC}_kdv}-+vi{vV0LyuCBx25PySoEkArFIj2pNpO&oqSDD zI4Xg62Vf}y1?mckl|reKR=JLFH5gtaM864cd`?6)iI7S$$Yc03 zVkx@)7c2$V#Pn6}pebCUGz(ETFVVb?QL*3Vv2u{$k_g6@Dsd@To>b`!QQ@C$*kk@7 za}n?f0ER||?i0#~NsHy${&PO&fdV{5gnq`yJQFGe0ownOHD3+s`k$ilm)rYmQR%N# z@vB6kfqzaWI=692&+s!iRtU=pz9HgQ(j?HcG|cp3=P>|uVE*iuC(zp@OieH>hvt)j zh4KK`45#2U3H%6Ee1wSAD)TB#$973k*iTS%vc(1hc89Wt_1kL)iWMR~i|&Dum7C=C zny^s#DFPNew1_#@i2e;hKM~!!vdD(%-;UHD@ST#U-O>K*N`C7~gNtz6G?kyV2Rymp zSP_hhMJtw`J&J`k$~vChsB8xq*14D+0e zCbyz``Y=}(ae>>0z4O(}8{w(K$xG`>sUkRK5q9h(I8q9uN>Os;CE+4GfQEcZ5=;}3 zk?Z|(;YN-PloJJoi~V7wtJ!YKm>VdV4f^P!!fgs#&Y=v6u0538lS#V(;TT9rMIB!G z{x-}NA?#r-=}m4sT;G=Z9zDlL9l3BaRtla<0}o3uP5o%{Yt$M7a|+9jOPf3&r@mVb zGGJlo4Y1w(bBEI;+9AahfL(|bmL)~S{%SocxpTWrPMRR?`eCTjTMn_94k;{>j|jb} z{E&7Hri?yDLWyKVYzF`}AA{M5syzO|X~hgXO_D?2w_^cT4h8u8vw17-g(HyG*NFKmpwb1 zB`Fh2doGEPzXWrd&6o$mF36|a+e8HS7E0~Xh?u5ug?qXD7WQS6CIFzE^Ogsh!sJ9_ zyX0Dg5H;P7>f@py%IHpitm}qZzkByp$xq7uRLkXqgHole;Qt{s52OMvE#`+V*qQ?F zyLW4#Ty$*`T|)!qCW`KDP)WIkE!d)ZZ>|??1KS-8OI&>AwiT8vdIkC<*YxOapk)Pq3WpG@qr{{V7XE!1Hcn7g8K8n-@8_~EV|%;7B*`A10_pi+-)3f)Rf z0l;rkUk?)zA;HK6$-Q}^LNxasD)w$XO<|5-?P`V%+M;|^5iyOH9MbC=5~9%08(yqq zj|FJ}ga9lSWE~8?P@}blZW9J*kCc?9P)1{eA&w-l2^U;~MfXXNmjHtUi>PWVEYhmK ziCBMEGSfJKO=Bpm2Yv>1{g3w%V{69xp%$_Y7>N{p5YVpv2V^e$*{0{f=3d)Rf0$Ham=Qu zCRAKsYatOz<74K9SK5S7DrpETh9&S3Gr^ZO|5RG}^M<%V`QoWcsuflgd=q2UrTiP! z1DJ2Phb^1>7y$%q;7a#ldubAcjE|@xBI|;Yvf%r2EI2LGaZ096%)1+1dZNRiI2pV7^(S=|a|S=fpRxqJloHhb1B6-)_HhZ6Se`l)0s z|Lwr7xMR=+3VL%II!7jauln}UeoO4O+t}?1e?t>uo&G|Hb)oV$U#BOn6MRG@5m9#Q zihv3|umhU`fU$*WK#1iN_q}(jjeq~BoO9oe+_062P;PYGi#Ggz;Wc+om?(BTvi{9H zQdlq(*l@1S_T{!Z%lHANJm!3Xu0V&z`dQ3h*?U*lr_tncOmEf#d-d=qYag;Up=A9i ztZ42^ypfB~y2C{^@}Bu5a+8WAa`#&q~P4+)~_1lCQn#K)-^?c*>nHYmVd0e_h3?-vjzteJxh5u zr?^+plRL{B9K$1Ji^9X>Z|;0<_@6i%)O9g7-`R~%Zh0=!Ct(mmVn(YUU-K`Pw<8R#Hrp-> zSp7;YPY&;){fvozN%fZso_{J3#!J+#-lHw;nQv>%V*g&=7#lpIU-wB}qYVCtu7~FNqqhPi`q5 zVG{VwQ#^;Hb$FS>bL{1@5i7&%=rNj?Q~bf(Dw#b6&!a3R9Fjlf@KT632C;+-Z+(WJ z8ay$mOXpX?VxcnsHvQ7z`h%?mgO=2L4h~n6>k@Q7`)j8U{~l{B7)bQdj8VCmq8?6> z2mXVIUFVctS{9d}YJY8-!6*Wi0)(Rd2sAPrN0FR1Rt;Y{0VAn}6sfK41;PnJ084Y1 zh%E~}*l|2{!?x%reWwn+`D9XBu!|#R8VEg1b#oc?VXZWtKemOk^8&G#>AcgbIsP3r z2^D>l_pPMJdUQ`?agWQL=Ka_sg*E9-^fy}R79k%>jjJy%6=_y*9JNw|59^mCD{S*o zQf(6$F4(f;c8}fsJ8y7kK1FsOC3-xv<)4M8P|uywXGNI0Bd3;qdEnb@(HD( z7pr_4l->}FnG)d4MV3 z>s5ChXF*iZ@#|pR?Ky9==R~H^LSG>)FJA4ds7&LCMc|8Uj#G6N%K^PR>VaU?s7`Xz z5b$oJ3rCq;*HyNyUH*Wa_tZ}N6Xr9Hy(>-)*=o zkNQsaCgJ`c!tOns>Hm)(|IT9vhB>Q7%H+_T4=Hmdho+)AWzL7FIYyD$*pS0=YC?@7 zO0^scsWwAMHA0k%s8s4b9aXa5KELbwUDxmW{`38Jf4=s79j@o|alhR)b6xctsdkCd zrpWf0{`<+_pziKpwu}ShezYxO6l`=254gKyi}bwPLwWZh91ObFFc zA)HB)sVJA8H+hAvUarF-vhX&*QLz1t% zrB7WoY$$%MWd?oF*1z?erVAF3i%vLrQS+u^lhX`<>7dKda8P%57&wc`>wBzCzxpzK zUJQ4qG%6MG<`gze@)SR4t2AR=B>LgL(k+RtC#C=wt=IwM(YD`k^`8^c&UP^4d998VJ36Sk>8|c_d*O)x=iMVVvjiDP+ zBD7>p+*-<|U!!URT@ultUG9Qs!1{kmC2A1dPcf{Iw;&Jee5kylm~U_?RrU}bWYRAv zc{|n_F<||XsH0j!8P-OIcK2gAe&84f2aVlHuG?ZU5aRNJjQubW9Xp?=I@zGovIg0X zZGdN7GY+;Ly#l{4RX{r;*|I>V*pDQZ=_NH3PZUi!zgmd@-k>qV8M2IB&F>!VlDlvH zIc+%x6V-kR8N{pqd`kj=S+}XBhlm*4NWMim-8yPrYWaveo+xRYQ0FPPd0$%x%I{Ql z4HtSXc9-YfSig>Q(70%qMg{cb-m=FN98T)jo)?}U)pagR#o0^%gIO|XkDM)PcvfT} z%FF^INXszWdnP53%l*n-fPpRk%tBXUqhgVo3nBVj$Bpa;7 z=dpHYg&{nL1@;>ZI;E4X8Ph{DMERnvl#Zw#>Go84&S7YX=Ak6cI|m99)$~zXmr6}P zydg#grqY!z^76C?{xVaN(#*7nK6-Kvg{NP>nr5`&cm})58t2zaaR?VvURaUAOoryf zu&YtVI!D5yA%|ObZ>zlqKQp`@as3Fu*%4qO<1|x#Vu*OryfaKO&Tw(`g4zHMn_HMy zpcB>1Z<3bLqT>6|+7rJG&qci&i>v?2hX@}Nn-|_z)^?FxZy^a z^x)%-(yK26dak~5t_}kt1MTZhMOT;8BCnu&0Iu8kY_RM8)#nnbBc>6MpH=u~ONZK! zS?a2+3yN-&EfQ|lSmnkLo~ln#p<0b%MvnQB1i>29>Yj<*v5bpa3*r=fVIAbH zzo5K&(pt>ELA}>AL)OpF$Q>hIRB2XoR*40`4-U{(t`qa|kXRzsgn8AoK(@CFp_KAN za_{B+U&Rs5iyW034)RRSfIZ4YW1QC@RU!v;Nz!_Z1YgUU;X&2mli@631L}_0+v9pi zaiSc2ZAk3hvbe128anc~3iH%c@P{n3lQ99k0~7^KGm zSrQ;sL9i9x_4=>6S!1la(Jtq(76FE0;O2l1NE8$PGUG|BfnlOG(*FXu2j-fSn& z=e7QUr`WUy_JDMSeMSV`qbtUZE3o`}6$o0kBx> zYsk2U<V5qLS`6vsdGR^V6~Y^YyWGH_W|TdOffqtYv|aS zon+~#4$lcs(re1FET_P}%#@~bE7Pcz$1<3q_ zyJtmrUmhvkx=c6AkTJ!9=amM}lIpQ3_jZztmpk%uVr%Si=;S*(x_=qI)O=q6GiDej zO*NVl(J2w&3=^nTrcAgrt8IPHN1X3VX7rLbv4kZSGL41ZCjYHL_2zSRDBuG#ut80E zy&d^8=kfz-`F?n4%VMW%3xr54u@K$awuhw4EGyhJT4myCP(bjV%RID=G#wv) zBeKUXaJ3fu7f$3snp5&aT3pJn-f`eSb_78WQK4iRm?TjH=A}j-^UX^pu<|k*R8Ppd z9O&G;dM*PGPirYtI|sMxEn~4+e=M8?X>t%{jc65PQH-u8QM>O3>D0O~cv@cP7X zB@cLTtW+5F`ZN#=W0G0}GE*DuTQ3S-*`%n?*xiy>RcPzUfmA*R;W>1RHE>`ClUe{j zNq}3&86=AHMawFq1Xb2wkTh~v(OpU(*FDw>b|*kx$n=yr`}ehZF=A#cml@xEGZatn z`g=1qwk!=}9$o-FF5}6iQGF_pDjvLNZrLB*+yK z-P213A4}JJOHOSlKAI`8T4N{%Iv+9^vl^z`ujNI~Gm{J8SsYdzT9mbUD;@gSA~IY`Xkv&>QX!y&}m9b0DJ-PZY9_eID* zcq$p5LWR0m_XgsEBC4=|y{4hCwL=rWC5V(bZe9>{mcNysY>lkBW~0!2Y~=iKl`F*69W2 z&?cvR54P{%Nqj*@izaNYN@juY$4bH!26Nh@I7})Pd#3ZrU#*d|%e@lk>udk6Nv}&B zl$peVXT-9oYVdFhSl=RV4=FF{9OSlHkCgkSB+(_~sGWhIq)!^~ZZ^XK2WyzR5#Oyw zeonU_wM@Uv|NW-)zZuuP=(#HL;rH2x4-bw<6+k^{i)I{Z%wMEK#^Mct~5r@O#bb^teJDnfu{>OGOX-(F?W8va43~2tAk}} zGzZSWmK?)bvB6RK!Hn`ZnJv&sBU#DUWoERuOvbX2e>yw`&x#@D2a_EGIr+f_`8#U! z5rt4P0cx26^i^YSn}de5g(9r>Nr9y0=1l zK|uagAX+)R>QyWk>cjU@!GLu!xjAB59xnIVmybDXA8$NHDTwGk>6fgs9khXGr_L~T zud$?PhXJ19Q2>oygB>pj08eTB^OA{4D=^(AUq)tzTEE*%zB!RmU@@QfY^A6EVrSS| zn}&7=gOhK?W&~5Zg1hx7V(8d*Sk`lBr|+xxjs?`};=|q_71Ikzzu~_PSw~x#k;{3< z+hodT`tPI_-p{}8?Ghu&eg%s+J2)Tx_cSCVq zp~#Awuduz|AE#mBX_cUFs373$_x!hh$8d-B37=q3P}*^(86WCj5b^>j%p`aCKkMsj zB@Mp|$}}eA3kV1P{XWus-m^q+-p4045Lk&au%rCu__gz8BMU~q9(lV& zzR$L66m$MtU`k3E%sbDasRi)37L7A?*dJOWZ?9|9+Z)#S@`nzO(D$>#>irE5vl7Wn zwd1luhnWs+s8v~D>FVmaud8B={83ImtB{_9&fTL1^Pxdg#LPdv?9Kx1KUr_mx!Ap% zI**{MLXpYer{_xM3qppW9uzTzs0vww8?V8wsN&5Z?v8aTrT`hNaj**>8cwWTtM}jU zW}bhq()P(W%x!IQebSG|;~s5{&NlGZk02LXdCMa?4OYP&x_1l*vh!t60aa<0Z|ITc zr+aqtEct5zDN!NkYr(SI!-+YMwUSSmuWx6hr)zUZXG(M5J8p?Fm|fmHo1xXiWya$3 zlc{h?Y(`32x2?6?Q4YZI*YK0v_}FzaE0pr>VTtsq3tI)uSJAwW;$WWqHjNmAQj^mx zPHXUYSm1K274I~T{K#V-BHy~|GYUHzTe^ zcE-}Tb3ocN0NHLD^8a-uHuV57?gglsG&jdvhQ#N=99SyZLtPQy7L@MHkpCwtJEyg} z;x030%ETI=_q_nWoKOGpPK{b7131szrv`iUBfU_qERj9=J`fff%Q}9%aNEAmnU{Z^ z*e5es+pM`&IiBrw@I-;FlXZ3Mx)>%urU8#z%cD|R^-jw-yu+6cFIUa1l^m0mj6gFD zw~I1}aGF7C(Ba>4)~6Sb7Yry=5PriBQb9Ic7h=o&KD9eqYLE}(`aa7bPr#X2{xc&A zN5}VpF`i*W*{R#Ae5}UdVfr33zNgw6OU1oFmMgijPv(aF$tO~E!TPTosKAtQDKqH3 zPknFd#|?ew?k}Ntaul~+4|?|H&3)GY_7GKfzPg|Bb=0q-z&qR`@gcVAcX`x)ql3x! z@X!6xFFY!hj(@59`Jd9wggu!)b^(c+y;5GGCg|POko}EzvznnSG($Zhhd>4ZN@>DSj|G82V%iMb!t!}(t%U$c6 zme&$>9Q$u$_xjrV*{qsMIjU`H{x7{f!6p&ms{d}EaodvZT<(F7Q}XsYmTEG*Q-GSt z*NI&)_w^`hwJ5b)|Rqcddz%~VcqgPXYHv6;9e9Z41Jn(3u`l1dS@al1t|e`@4PC!eVktwpsgf=RQe;! znaGrAdSLxX3Esks?qgFFndFQ3@`SAR@j2iSHrhN4hT6rY_RA`|6UL!R?8{d&KY8;upVZ1CV%15{PZ(S$ zwXe$OcO)p4+KMC1+x*KsbmwVWX}XGZ$x`a1SywlxUZ-oU9iDHQJA6KHs?xOBdnO@h z(%#Kwu+ro{(FN;w;KSw6)5begE)|FU>v&{NT~{o%O8Ncwku&wKg`5DYX(#9V?`V^p zNp)UFsrl@8w;3O#k@yA>=P=;F8c9=A`ou`^^?*@K zvf5U(uCJHxkctzB%ewf)%WMRZT)AIX%Z5t?V-FDL+Z?dpXRGm#eT4gM7hfNg z@Qr=%h=*#YoV&k47JX!%tc}?}cL&2w7H76C-rqIVm1Uj$=tr5=OX|~OR<8%>BUT@X z7avtBJt`FqV$&`hvHP$VUK({8Xn4-IVR5pC^CBT7F#hpSR)C-OhfalK8=hs#r@xI=ux(d-@M7e& zOK+{rf4Yu@o?@Z;n3MvL?z8r;oWV;#GntLQRF19--Hyq+5u6v$X?Bd0$0zqH0rPGb z3|!ReAwGzhUPVtV>pDMv4g)<5+uj-+Yko<;#PU{dkwlvBm$K(Rs--mgK3ym4Cj?gl zQvnr9U}z+0D_Qjex2E)sYkhw1acbVYG`U!)DcxJty~N8saJNKHi@#uh?{}_j@d2Hs zO81vpEsxWNSZK$#xAyC-!g!7lcf-leQSP$x@fGKy$I1SC&#$sf`@&d9sP9z)NfrLh zt-a0d6zbR*1{(Z};&fY^O4!K8TblFpQw{P1O zpZ?4eY64F`w0{-;^k`*-M%S{d?T*$`ZH4@Br6l*EON#e?^0#XBwiFLLdo=r>^EmaK zT5Z{HE*p+&I`#8~hv9$Q#tl*%szaAUu_P{XFUDIlRT;s$piG#pxODHfOHpURU1%+lUq?7oOQGdoPpI>FmnV46jz_ zuPqN8jUzM`IIjBllHBIsj5aH_>XUg6sG}UP#!H&p!RL(0)!Dt9zR0k4?qJ+J#xAV9 zDsYXX>eyGggvpCIul>|p)vl(4m4h&pcRIVE*fE_nyK`G~yaR)KPc*TE^dDGV%0)Qw z8jz7}29_gLtt;J_fhGyoa80YB^6ml2{@DiSS3OtZwhnR^W#;B}!@Iv#$`?l$4Vlr$ zrGM8q^s$zxEyId^S2q|M(YENaE5Fe3U$l`UJa*0VXHDzd21^pNqAeZ(Plqh&86c-O z{g-!XV)yo~8gIEn*>}hL?wo(mmYv@Ftxp@<6$>tJX*pahTE*zAD_>xGd^}lwPT|`r zu-Y;WZf`~}p%^mMB$!e&?m{u~pOZ$_t9%LF1nqf1II(9o)#H}Q-JF_m1Yo0Il~2uA zWgAx?Qtq-;%$k@pQM>V$yLTiWC~F-VhCH;+Qr{5dDM*b%=?RUqU4qZ;ka^x)FFzZ5 z5iN(Bi;QfkD(tvBqkP;UCL1r2vK?ni3tAY|UAf^U7yHmL2g5NZFpWhE1SR4+;>OSm zSM%+kA9(CSXgtk;9j^wWqwOxv>sGp(ZiE%P?OP1l-c)pU(Z$=f*HJaZpXyOzpu2P> zRj2x$xus6?JFBholr-(%_AANHj(J6>OWa|C3*$1HSqodT0eSe;GsSU{$2Q^4c-<-f zbAFw)WP)_KVYAj(8U<)X{ktl}lmE!a+Bs=F<+^O!vC1;&3&Sw^1B3z~ilW9)7Yz?- zot>x(`F>2Nb;AvPDNL@msP{|3`d5Z5Q@Q$6KPHO(Sb2fUG=l``YykkX-lfkRGd(rn z90pb=wS6A>#zPU;-5exfwEXP|t7U$ZI_K1Oei*9S zb!NBa4b55HkYV2a;`kPVV7f{sm~+{!5r=XOJnv#EE!8_{?x;Yr?#KGi0}25|;HH9p zRSM`+OMp8mZ;AcDx_FI?f*Pn}=FKxyS0}W9^>$SNm zFOS}Orlndx_R_03Z^)-KpdM~$P=;y4^RFoen?&Xs4Jv7g9&8~9t%*Js1O|92ce5dj zDtcbPg3SfubN`mqG$N>Vw!9o zQ3fKl1B6oI!(x5Hc&s1q)o%}QRPhqn6LYsM$y<$;t@HaeXC_|tIjF2Sw7orOn=S$K z>>y_OV5?%GS!Awr@P%^!N-pL{Rh3x&TTPsH|}?g9|EA zWnC!a1z{!f?XjBf8$$OX7Lv`k9}}otk9B>SH zyusIjVDhsNg{m-Ipfgv9?<2~j^6m3zP(vCJ1=t6mIb(>?$&z>Q?>J44J$7`wy44)xVW z*+6U`5#7h%JU>u4|NcV@Ino-IjeEOQ_a z0ye_;tGc_XAqGs+3-^8(;6;uzP(-N>!x`hGZwQEu&)3dlm?wbEhkSgG6YCTem*D`3V;9r zC}PouQ?*q2SO)_KSkYgcrN3}Kx~lR8nh8M7V4!9y9EaPoy`bN49b~iwa<+N+eVDGb zcG2ixy~}glWnOM~4K|tXvNbFBI4n|nflyokj;v%6DYT3Q_oF^&HVNlUoQ&{v|K0;? zn=Ac!=d;^i9W4-SD#+EUysXXvE<6%QeG2uK$ZN3LYZLK%&*%1eIIC<3Z%ka~y?L~& zBqr4kqqanEdgW%D3a%kH)mgJi8$vmomzI4#wuCL0r-2tOV~k2JdzR{1nu1SOK`dgy zX3~s`h^|is>CV+@@db*(k`MN=+W{HxIC~-IlAG}>1?DTcXaL=kpmaET%f7kgv>NHt zq8Ug8NGgG;qD#|l^qq!IGB!=pxUT9FKz<#7PNl&}G$1t^Aii++gg&q`E~|3IIkoj# zEZ;sdSFu`fR6M34-uo^%c9R!~`e(SQxz3O;$oIK=ke92KATXps%rY5P93g(iOEvZt zR+`M_@Ns}K52{mrqDy);<8mO~-}Rc}{;Qd#mzQLt=o`W=R%w0L4b#8j| zGTqruXgJv~_gJ1obOZ++)+*njnFXd`hqApG2a62i>y2g=V@-=0)1zb3;Oyqk2-j1& zy9BvPD_cf(NSM*aG{X!h^JRgeR<2H`dvVsAFUxNQUU^oP!ku}o5BIv%R7anzYy(EV z1MpT}ijA)?T~6&2#MNIpXsoRX=*Q5ZcNK}FAez#rBZ_(^58GF&6DO%QFQp32CM7`h zSBT|(H>IK-@!J|^zjVyXowt{y=sI-}Upy-MptYT0giUR)$q?#NcyB*{GxF=#-*OtC znyXUA%bM&6FHV3Ef!K8?tf6!{f%0Bfg(#T$s;7LTN}UkK&c|Pf?~7I&S=mREPU^F+Cyh)eTY^y0%Wd+{Ry2o(uN3Y{2jkXh_&lc#JK^}fuXy>Zt`9o>q&4Z!`c1XI zt5^&4tkW!|fgH)EzlU=T@DNJ3&~h1A)bW% zWi5fS9mBRS9P{Euh{-<6{qJ$_(R=QgUSGke1#XT^6S{jpwfYWJn5AH)P(>a95v) z9((->bWn<7I4Gp%n#W!?7}ZHpomjQ|mF4*>=fJP9*T?Mc#%-FIvL5#MC@WGZu(FMD zvCabHr1xUR&ve@Y&!YFb4}UQuR;AR7l1g{iiCZe)YgSb9~tIqRT-COfSFR`(=Xx%KH#-MY1gjCV=^uZYy>)a>tu21HE?b za(M9PPm!J`)z`Ay$);OqINSzUz@`Jkj8hB;+i z#Th8oOEOq|U0j-lem}OYKghx9!IQ*{z_hrheY|Kx%`s4gDZ`)}f=lqQ$YU6;1=7DD zv^YO<5}bDRuG3ByDcvd)j%2Up^nV|PboO9Y7#x!eI~-K z@W>1e$k@TTL8%|j0_rAe-lEhr62}n`iE7NXV>sJ6dhf(HHh&`{&Rz_?+hKtM8CLHAOG( zB1y!Tuh!7)(B|KI8i($Qk745aJgBPHk$Yu}E!sKGn^FnDi^a`=Jc9wChvCf@128oqfIIl+?Ix2i5bId6~$NQKaf0Yk^}fvsoe-1RMQt$_u#ivSVV)y$QO_PgGHMSO8*sWvO zAxyzL&7-{^-fcO0MeswDQeMci&wpGuHWe{4I5uUqgl8zFO@uW+yMFJ3Ujy}CVRW^_ z(sJQc@^6dE?N97Jd6hcsmCU--QkU+kXa;ww57`}@CQrxs9jZiv9IJ`UJ9`h6J0eB8m&H*A{}2R;em_Xa_48`T;IhWuKv zh(BWVRM~d3&eHLxw6*uShe1z{Ew{cmy7O2iS#IuQ&wa18FM{`pGvbk*uh&1O{M}gp zF?;Mn=#Q6W_wPJ?d&BrilTh zbCJE|$$dWJ?WD>y;{hyfkFoB1lJEBdZoHlDxu^1OlUA7e?CUO?rF*`rw>ls-8!kxXo)y|P|?Iz z_Z8;F_~e43Ti>?xWQx%c`Kl?jTcbV5`s4l^<?h*$E z1uq15zd4-1ZiU;MJ0CC*V9#ds=oe}FIS?0k4_G z)}+EtVcXQ%rTxcO73{WFHaSyADmwyfu)dw+{w}Lk4-SamZJtK^Wn0WNl{`|H|6cwN zGaetR%z5zV)@RXh^uxqoFQruV@wfvoC4E+hv>D%>6nrQD{R(rlCSS-_Ow9dMHYumP z=H@UQzqY_E9BI7u?Z+SC2^*fS!qx7BJ4J4&w;gDY6ST$gv|c( zm^Odn&)N6>@PM+O^W`!_MbOSm;Lk#923Iep&1-~;#-?iS<+j?1GX|^389_UD5JTZUA?j|7Ny<~7g`T| z>_+bVzFHLSZ)umCw-X6VDA0r|I{x=hnDf+;Lu5l-_w3p3bu#I245()ZH&(1|_wC6a z*MRc(%UAuLm@Z*=#Ep!d@l2wAAIqNmI#S?{Dnqt5Iqi9Nn_;N9^<$x`U_(b4dCSx& z{KTgBUJ+hQLJ`}vP{-SJNC9=8WPD;*v{%N(%jFbBKiLUvLUW4(tzq=|4%zMs9L!4wpI^cWcBY~kUy$X42#D?I?jnyG{E& zB6fduhg*5`Qduw1%eg^yOA#Vo`)1wJWa{Pa+NizS_Ua$J)GUY>t_GxCy>A(MRFufSKY<~$s)Z-_$`J)T{zggo7PhqalB|EB`e)Xeg9BFuKa@B_g zI;-9Gm6o*aA&2j1&D*xOOmDTQ`})&oU7NSrc(x>jceUq@apHISa+RmqQ31ugr^n)U zEc?2@&#S$e*0D+OsU}0sFj7*e5`E$1*1dgEx1KdeTKQ7v4okFyW{Q?69 zkWA!iKyH$Qyp-Oahh*C4Y#=J&$@d4M44lQo}Yi+L;_nf^LZlkO^Q~+CjqFg8Lq!T_~EKY6b zpc_qe@(ZoZj;w@CK6qy-^piyU9q~{wUjwHrWQ=7PyZfudVP{d@BT-@7{$^YxBpWTk z$3s48sXkhE-PJq(&1L89#@mO$AX_8)t0T0ZtsOX=cp_6yR&IBC$goTxuI{iqhPWxd zd~(P5wPugTBHCN>f05(-+s_(p(J=cvJ8{jT z^YXuQfGFnFL9pgc~lc`>J}EulX?{`gz{YrB`) z7cy^zi&LIJ>b3PA1fk;J7B}wNy>;Ak=;`GznERE-5bd-f`*S`&9*NeeZ#5svAFW2k zQ15;^_xps?)T23&Vt0;xg0qi%;Uh{=QuM3d-eYt0$GYTW#EA8uD|EPaM;9 zwb4Px#O@ZOXNT-{|3imPIxG-9?B?eFGK{<5Yu1T$f0vIreSNvcl&nF+<#JaXZc(iN_;T)F0(zIh3|fGhu_oxb*QeCiVwkg= z$|O1FZ?;s=r)s}LOK8#@A6lA|ml^;a7-SkPB!v`+*x;N(F4jBP1eOa2TR~9&uGH&yi-S0W#O6 zz1J8dl?`pXd}8>g3qz_Guu%f8Ob`!#fsGsxqXIKdK9Q#A38)b4&H@;t&a6+8K_1$UZ3`po0#boS@O36EfyV$Q%+pP1^P@)d5KG1LC9=H>E;%)tH%- zJ(9Bu9H=y}j(iG03n{WuB6yRu3>_o)f=V05W$)KRPmqvL#K=&buXpDm&*5W3Vw6&Y zOT{m=w9x$)84Wf;zi>m(ish$pNU6X;;YwWyI=&Pl}OMR4A2%n4`%_ z#OQm=r=JqgnRv8x#`Xz7h9rhsJuS+3cT(K}mCiwT4`XilV?#;Ex4bky55+iy{+w}Q zl92frgJcsR0RYtOJcRPigc)3>T!Za#DZ;_Fz5HPZ=CpU${W*Xl_%uuBG8N6XO=Hjh@`$PT5EC&-n}CpJuP@BN?MaB0HTfZ8DLkw+qe3Qvn=JMF zd;#!E50n7FWCNfyS{V$F2$1k#AOS4(7qo&}6e|l0r3U%q0A#!qdTJic*Tag;px31a0zt-JaKU8$LKBg8VGZs? zLEM5PsST&DO~<@Rflx)zc3No#50cGpkWdg(rC@X5acLYjnTVPqpiUA?Ic(HFD@CCg z-X*epGLL8!XIiYnq!VZ>>Clsa<4?Ho1C%%yF~|}FYQk`505VSV2+@2-3kht;a z7>SKpf*9ICE3F1VPqG_k0mzW~23?zSKEFg7(2tr&(o`?#D4=p^<&ogib$q20+_Ouq zDHnMqBLJA_z=bRgP%}ZUf`w*~AUC;b6Ewt5uICW}=d0j+C>0X*`_f(2>Z(BWBoE!r zZ_w>&3Lzk;aOj5snG1xAx;N2$icHJLO+X&T`8EbZ#xwztcSwpxCK+gj%hF8t5C;8} z)*6CwcERLqryAaV zl&4(u%si?^B-;Z(^@`V|Ldd| z!=7-_lQi@_p76yk%o-9kdm?Aq{hne2`h9JMEU;E61!f3?XC9uBkdPk?Fnd zmaRQ$Al>4tAOZThLnXRONqrg3SFh?%j(=6ui7pL+l5T@pA)&hn;<+fmRnFNwicT;3_~~0+5%E;r82&txB(>A!g7{ zUV@STf|LKigKHCa?&N{+1eAn`E}1`rzpB&$fJzmeA`E&n>K>I~U@8Rn)PlETkW+x+ zaVi8)JUPE6Yrxg3oQqGO^np8JO*CP%7=d^=fA5K@UzliJOxaoK-`Fkd zJ3fv>_qCv35ag0G9=MSa*Kx>ZQkSasgfb8y;{cG)1>K*Q1K*V6l+2=%CXD{cKS=`1LK<*>3ugEzlJMRTs4&1N+8lOcbO$;T-F+{-0Vo>iPxxdTE=T{m90OW<+ zr~w{JyB5{M??fgQczI^Y-h@yn2)p0%PjLt>2LnrDzk~=L+>L&eh;pHUt`gDS&txsw zmszp0Cy2-I4V@oIgW6-D#GaR9B;h#n%c zMJ#)PFWbnMJraOqEw?~IKb&vph+)k* z#D9BaITC;@2Zu~|pJB-^q8s4Z#PdElRddm0dwdqe0qMp=zIZQqD29_nF;dEFCm$RT zGzZB2{Msz`$rX=p`El|bZ3%Z~?d!3#`d$)iZ;kK!7F|m>?LGxj-@oKuzRml*bItxH z4@(-#XmV|k-I61?^7haEX!YB1ljYYkh0)$_)}j7k6}LvP8?O;SFd$o#Cq!Y>cwGsJ-LQ;%{P&V z%H0C=R%mXxMmlBYqDBn3sqyK`T<;KCIHQs77TvfCmlZhg#w$ zZ(R@Rr9L^;aOHq5QqQ+Nx@X5=Yvb_Nlb>HdxDYDYD2+0w|Av>^Zy%Sz5JvHg5@Vg` z7ZqHl$q}k?yw>eC+zs{kt(HZmP3PjQH4Hdm1z|g`7+;sh)k@|?GF6E@g_d(U&8^l;;8!^YTa%oP&VMI*dr;oXJAQD41HX_Eo)dexi{OC=UK^}LGx zcHw92kKKn_%~dHzE^d*Gu7Qf3wOb(-r?In(r@4HZW3Lr9;O*I*Qvt5rsE=x+_UHvb zgkr31IZVar57|lKU~$sh(^%GyUl)=?Wc6y}@B_?q_VF@0r;E$qwkRE<)ihO;z4YHI z`53lC*t<6Xtr8q(Hzrj*Db{A>Nsv$Ffz z$s@k-3wiVXi_5?Vb@0)x4~l78?zK5{ti1iJ##*%XjStg=OwOEM+pb{aTFQ>KL72+M zM=FLOEokB=L`9zcnAe1(vsF78P|e<(b4^-DHpYq{?Qrt@a4sP;aN(ju8#fy3XWs9l zV1I+xYFAR&wmWPt-(P72srB#X;p~K%+Om4Sh}D+Uw0kjl*mhQ? zHGA9DkldFXC)=N;sae-FiE{UCZ^&SxeIQcJfCRLSJy`ERZWP8H@h0hw2;uKvj}8oW zGtO;qvQHRNZNLtj?64{fN=`DWS>f1{DGaaldd+voo*mlgEE^vh)cMZ6?{y`x=(G@$ zs@a3y_HsqxpV~G}Mmz>{hzv3P>+bbvX;2|iuzCItNZvM3wmeD#(Hx~~!k>)VG|>u6 z%&+gW8a<7Sw<@_zVPsuq@V#tV@;mAbLp8F7J(neHhtK)}43WDU-KcRs`*Fp^^+zj4 zFqorI#v*v0Y3l5%Oq!lUxZ^w$Zt4S=@1X7q5AJ-W4kMQkt#O<)O5Yn}MZ6QKKFwwS z;Wu(D1#`#FJ7RE~n-uFLTePjE4@RIJQ~BbUexGUjw?qnt(B5ij@#KAZ< zZm*pm(@y7ck7!Nju&v_CP$2>u+|&KBSE#Zqkpfn&ZiZm?Tb<6#8q!u^Ex8}4KTWwq z!G^yaQ9EB%el$6wKs(`n?9J|*@e|-8%@A)Vqu||H-Ghaac!E7_`Hf7Gcw znq@mJM3x>C4(c43h?UykTro<k8L(vce9&isIg_U zw>8g7!P4GVtD+k$mqV?y*I+okAb4BwNely(I#kDPu-3w?Ke4vCS35G`{iZ3+54(Qq zZCM3xKGi-QpX(j67+-C$@7nwmI~5(msTGI%m7*ZDoBW=&{2}efWt(0-z3-xiWS+hS zb7~yM=A{hjiGlwuPJ8RsKAf!IzF=j4=Fo~>kFAk@$4c?`T9o^hr<`v)S6<{9v0AFQ zaq1f~>@4Ja(vvZ|Ho3pT*TPVPAyC6Pm(5IeI?~RpS)@NVRhrLitP8ie@7OJ4ihfDttfB4-!P}`>hGVQSuRtZR2zZ%Y0Yh#*NuVKy$yEM!K~<~NCrKyM zwa3=;oo^v^pe{?BVMqbre#58SFdu&Eb~v$HI=k#b*GWF&h|+mK>t($^5Lpu*iSSH! zD?JB-NN<5(%(}3-!;%B}u0V;WwRS-DhrlR~@2wV}KO=eUu#6JtlztWQ0$Xng9WF7-;_(@b>T}Dej_I>TgsyvEk{*eQDRW z8U&RH|4|~kZM74$YnZ%@)*2G#t48=^4)8+NyLb~ZLqC3QsWUiA1$kM-yB39vg4H5I zJ(rwsw8vJCx3(Pb*Yc_>N=ZL(H9q3mHan%X9KXG93f}m*k1Hkym$>hF8|-D_I0eTg zOZ!n4jyaO8$i;=Rwy-oVL)ZT{n@}04RQKI$>!Qr9Nzihm=3B%`ms9;QplFRDZmAoX z`HpBdfS*oZ`-f+Rj+T9>;qO#zl{=NcMF_^-9%WgcQbgbU*8ta}!4AX2j>O}$5cwG- zR#U^}%M9nr^zEl{`57X_%o9Ya5LTg7{4-iPL1an(F5K;oM1E!;roa+&%Ch3~)6&^T zA!p3p;A!IK?Jqc@RcN#TUUvL;Dc8JY6-6S_3Y%e|Wdu!(z_q|qsW5vn{h@|HQ^*V< zK>^+26RnHMKdgm(o-84jg{?7bNv9+&+3dVxwq6ejOkqn1gxumgz zEm%1pJ}aK2)xAQnl&iD|((0LV{3qSC%NI@)!BWMrlyaEoQZ)$K;k>HH#k8+P9-+bc-&B2e)cT;Fy3^XF^V7g)VnV?`;ich{YJICB&!0t3w;rId~HmhY^bP2Zx zch|mY-OQ+&Vzdw-jwBcTMlQFxNMlYmM8x*_f%pL_&0H$Z!kg^%Dcs>Sya$cEK98_4 zLLP`|4i~C~@S%xIHHl)c08`Zxq2(?cr1c4QEEb*ITzqtit*Widl|=>@<)5m7_t+F9 z^Vz;Rg(klz#9W`|&hP#F=ctVaXv1PZ&O$Dz}v^e==R)_f;~z*=f& z=Ko>r5HakUOSi;>4aM4qz{hz3w~7d0c-)W zFKn30yLCM1L74bb2o@UST(L-neE_v45O-TEBSD+1I#KXcArpw+EBLmjD{9?X=N#qc zpDHk@0$S>1W~5LhsAXrO_Q1DzO@Gl{>m|BtGem_Xqqz!A2xcFqvhV;c&VdaJWL3mH z#yqe|cdOhCq9XbpynNLdE89@m&9-gvxxEl_#f{ z9{Cd4F(Co8;JsYQxnK>torIN`&RR6B)x-+IXYq%5P^(bZVV;*KNrp7q<3fVE6AGP- zjGKDsKyKHH#|RFeDiP7OxKzw)KB{{v{58rR#MqGoGk;a_&tpW?`FNcxCaJm_IvBx_;x%^H|ja5q8?8&IAlCNJ#hdZ1pQ4iw)M< zF`y}FLVSK`H@%-$r;4CQP}yZyS=y^Iz&UfA2ul1-D}H?j`MQrX%JNnOBr(-K99a_g zX&#l9hjY{<&~#VxVIoBMYlP-sxI8DY;r-LbF5H!1h@YbxDFOeF_5Rb_o*9Qe8z*>x zpYp=^tQ}Zx@F>d_#Bi28D}VKvI|@s!YVBzCGczIu+Opz>q4oa5Tem+cBON%#5F*y| z!^B=uOALt0;AxR%#ClsfVe27u*E0>Kp*1Hs+Nca6rtiC621o&X*ol0t{|ezp`0Sn^ z2vDzykI&w3bC+Ivgqt>s&9JOk63Y@nHxo7HA;1w-*kN9Mo*OiT$21S7N!1Z_OM<4) zF_j3%`=OMHbIL$%p6QBXCZL!kimcbH?0z5CAr3?p`!cUNu*C6Y!@kf<50M|{3WJ60 zqDqrVAA^`LKCgc_bCo=%K;fKLc2Kju_D5MyANA{|$n&odDcoX@g~u{SXJFqtyLC+K zK3`d8-h!zWxQ`z!#b3^AW})PO-z1w{K^l17S-HecB@Ko7S0{=X*N?qIY&3G6AO4;0 z&J;hMtt@+gjqOW0ZQv39{xdtq5is!3pT4A9^1!;YGP9nn*qjhQ0nLzLjLoJmqU6qB zW$q1S1>sn`C4POib+ID6!dO$vXj`5DsxAiAeK}OJ3MwUrO63u6we~qxZB^-8)`KVR z!n^IS>^)GbU73v_TMol~R_V?JhJy&=Bq?>sVJ8ommW$cp0P(iw*?Ds2xaRYC@;D;Y z3jS=SN=(USRfUNGfe&kk$aD91go1g&*IpI>kCxBZ7}wX}iOn%pIn|TR_bP;4IZIk@ zisv#p;?p^03C+wjpRKI(5kVoHhPb9YKr49#67fGD5-Fq`6Uc!^fY=dum~9KYU3oAA zJ9unQ%Zad-K)+6p4&h!d6TLbS7W?7!(iCTLgQ(nJFz|lZl=(c3 z0FU6ovK?c(>k=!P6O0q{BZTZ6@)qeog!}}b>L8@3NBg4sF$h7-l5;LdCMcK$pLHLj zn{#9WR@phbZnq}RQfzFwiyX+S*-Qy69??rA2U-HO!_PO|)i*iM5RI%4?T3=^=b8#l z)rn%T2?6523Qb+Dn8sd;V?$JygOr zY8}T_8zc+eyfU}P*dF#I^{V~({C5>t2&;Z+Y%Sp|H+-F4pM?qeutZ#bEFc1UG28OD zZe|g&EL$*%fF*O`t5Ew0AK2aaJ{k@YKUx(dW|+?!rYR3R`W+U#GF&2xEQ!}_3UTxA z{#GK|e8qi_Qt9^A$x3O4kR3{7wv2*_prr4o(tiBnXnf&xZVK&u!|8nXJ*InkfXBgA3`tUyD{|d|D1%|D4NkiK+(WU0pZ z67BKte3x0-&6^7z=3L$}mGN*Dy|T(mL zzJX#l^I$Vq57-pyU0I^*y`ifTSQ$%0I_U3Z@dWl2lblcNaivXfq48jin~>8(4jm8Z9-6HNVrA%9HO*Ijc;X2Iq;IqT_ZB{@_lJfz6QvG{<~X%IsS zP9ADh?#T%~g6P!q}1A9YO*APC9PHp&xD%PB)2XTY~|TL_yX(d1KmnFt2M zb(KjW3euO9(|l|NR9N!pa0<>Vb(Mkta>dpd`E^s+u@h`y^$wM=K1Y4I4(H^(>i%mM z4>Z)$q2pCsIjL%uzFnEESN0I$Fs7!%hTr8$ZF}HM)Z&k)*?x-vsyqxypO1j+N`I_d zd7@|fV(ul@{eJEfs6hycbNjT3JOdo2{YYMWob~X`mZ^~xo6(0_y?G(hl25-<4xTrU z*hn$e8umh~gkRWK@hffSW*WwB^6zs|m@TDxA?zdg!$Uw!j4rqOo^~21-3&9vzapnI z-zjRFXeIYApx;lNbvW|%^V*TPej4UR$ai*$vdKx}U(I~W3%9eS&$Zi{+No8^TIJA{Vh||P`!A_Oh;)+U#*J#;L*Bd>` z3dvuZD^M5$lQQ6fa}+Ad)b#)N2#mtT2wlI$j*O^=1eXI244?0>T^

(3=_PZ)jK8 zNo9YXz~nMd^@uFmm`jiB9x`#$UwXj)MZ)whr>dC#5ts0+_F)&P{T=Fn(l~bSF4xHy zT_VSnvla5rGwXCD6T~{y~hwb5PL7yP8#qCoUAKqFM-`y1nnh2p&M1-(FJkvvvI2@X-Dp z`ulwq-`@vEH2d{`cw&c!qPY&Q8r@MUFsJt(7~A!VVO!~pKSS9*@upLNBP8>Cv00jj zl~_}n-otK0$Sikg#1-q0Xv9slcScHa)mf1N(fhfN1N7p=+U&8qDIE9sUpsN}uP{+U z;4a!uyBS`?oMVC%a>X<-CTuxA&9TgXYsgb}z}C>}x{N;3yUnk3yFQ0v?6&M??6mCZ z{#Ia|w;Ft3_W)V48}vsLuLpwV9IA(s;s%SA-9a4@hhJt+KXmRlm89rp3)|0{wq0`@ zw;mJRLU*3i;Qs3hSwL-zuZf-{z|w4u?*}tZmO**P?pYzsH6&+t@XnIxPWA#?lAbkP`D(4p5jD=t&2E*CJ+5N9dD9 zp(2#sN|Vp!L#!4%^6o^c_2bw!x%%d>V<1|uNy9*cw|lB~S>p?l<2}$`-oBPh)%?gX zADF}ZyBDI>CT3`Z2=HQ>%;tjvsw<3NmTUJ3PRjI3ZzC4BYR6c<2p#RQAHZY_Wk7$< z7Gt-5pvvbqdC1~P9d|Zu^S-OUxjfYU(GS}eyJ?zo1J%RCA0XNNqf!T)M#bG(n%l$X z)FoJvqrk1$fZFdDPIp6Z(C8L>8Cp-5*bV|$E~qYAqiYHBedAZn!S{;s)N+XSEVnbS zwl0Y$GcDu0cA4RiiI1Y<&7)2Iq|AvZjiFWe<{RlO-*`iZw_HyZF%ef>XGIJbdTre{ zulNtB0lhV5E@pv`2%n~_b;g&QW{zHZTN!sQ0u+TEOyGiEeS(XrBdVuCEDOQmU5GKY zZ6-Fu&RM5xBeorKg@LHV0CsJp(Fd6xxyD{Eil3Gdrybv zA?pQKG8vYbZm9KEGU6~0cp6j{jRhP>ppNeylik=7#ghphbu9%jYY9An79uRJ+V zU);YV++BTtS@BQNJnBHspe^DXCL-C@cA#e;1(=WehyO103jt1^O(#F#E1vzkdC;v$ zgLffg>d?bbH)0!+X^NK0$WKTfb-!^sW3L|*_LjhLUNRk;0PW^)OTo)H9r2nJVB=rX zV6|%BN_PB!`bYa3$%%<2xBfyjtBIa=DoUrlJR%K6=+7}Cj>$#eD@{$i-bd`Y`f}HA zmFR;+PiuA=Iy6fkmCj=aR+dAj5U?u|IShyL@1;5G1}NxHwExxynMbk|(3TK4*Ln#o zKV^ZG@p?48%Ked9+tIE=o&@iEw>5T_xcMvwCr8Nc=i=^Apgt!7t&$bfCFZ8qJL1r! z&`)1<^j6_)oxL>=^4BD@rqxBA0@PMP7X0L;-FT1^9c#G{~je?ay8VhpOwoF^RsX0rfZ`=Eu1KO z0@6nesOE?eM}iB?GVZq3en`e{P5Rw;oZWoDrYS$o=%`Ku{)Z=uzGO$y&??cT8@v6w z`QVs+AkR{2-+g;L!9J@_Wem^0-8}?L0A`WyZqI(y#xL5Ze_!ha=+mvgev=^c*=L>z zb(*}}Jbkog67HL?y;nQ+U+3ik7|mThYVI_>(NH~*Q&^|a8ki9BC;Y{)NnKg?LiFxU zZ&5C55wx2|QT!9?$?=J{S?^N!loh8Z zg`b03NMpv^C{#{P>KWsw?vOWx)q4DwbQsS|tZ(mQt@WPcsYuLtetmkbxq1UV?{fXm z_5Qm`M;u1B9J&XKTR4w%Ud%l3_eRN`hwkd(c$=IfrJ2K902}3-zmVYvgbg`mG?kR^ zE&KL51OFSPTg)>XJyfOfpdY#2!)K{|3ov^>{NK{XuvV{x9lNky<0km6IGJW}ZI^T7 zppie#?crNj+vhGIAt+Z-22m!H8wgsLm&v&RzM%c8s=s2dD&AiPr{apid7wc9F&XYo z*W46I12#r1dx0y)pGob$i2iH#T=wm!)BFLqITlAEm%l+ttvBaa-2?$}X-l;Q#-N!3Db! zb2T?;GTJ}z)m9MgA86gP!1=kmb0=UlKBozr|KKa9AAl9bxTd!bnmD?uNvP{8{VoK$ zTRg*|(%qCKqlE)+o(5Y@58#u>viWuR(@_v43>W5VxH@3DPB#ar*gG^UBk{(onS@H% zNEXdpGG{N4(GJf4QD#8=x|d)eE0f{oEOfUR@&WKu`Ivs-Z9C$jfHi}F_S8uZ2Oc`8 z@?Z(F4GwNQ_xzMK7Y-1XX#3k`tg_r+`V8t7Lp9f?UDoMan;B-oQVs3(d56V!5X?K` zqOTs=eJjpgW1WOGcU7yD3HV5NV505jL-mq%>^{@0Wyuc94W2iqRdZ?P#6c}3B=4GQ z(`PSR5M6kuVYeq^YZikTxZruyX!p+sFIm8c;I_>XQcQBy$?8{Ghp42xdH)sT`6Pq| z!wt;1zCo;bQ0qk6e$xzs~Umk}3;GIKm=IL~Uuzn?=E8R^eywbO7GLEE|_VK~CvrNnB zezniF$I^M9Y z2#ePpn*1Uo2V)#*Y;uj??}~OeNq-ldG8E!=8GK=iqVib%nycQ>z!o2f(Md?|rePPd zyU|m(tuYY1r2a^`NuJVl&dH0($q@U`UT&SfcCwI)^}%Nzhx1_XC=VC4Ky<=_TU_iJ z(yI|$z*_-gY}*Ere;v(WFYSu;Iy&vDvu)6k2R7o-O+oJL#6jCvK6Y4!`Chu`9S)~_ z{ub-KZZ6#l|N3kt#5osYKNL1UF=#D#NXuRzRNS}}Cv??myqxgE|JbyvVfX#rB=9Ex z0bL6Gw0(=_*JeQBY(;V3@-t|Doo-4*n#j7_IFPBJwLC)v%ci&fA|7BNF@qR zL16XI#vdhrY13Y?k*%8{+D&J|bW1f&Xrx~M>o8%t#mk%^^K&BG zobWen3>4Nl06MHTJ~Vn*cepe&lgS^HhVklR@}{&IC6T?|K;W+jY*9tHB+ICR)0# zKCcQWMA!W80;1aSPp(wqj*gbZ68(Dt^cZnU=A1uGpn9C z+WxLnTlvUv%a4qQrdE2&Hz!^sEj&116wu>#_J>QG$?k8gOy=fSBEh+)3lqd6pSk4f zrK8|W|BbxpeD0O-_t(vpOZ|#oW66KI$MUxQxc4_BW-W7~<%?U!r#rWk>DsN{Qkmar zJ^vMZA3{wi#`sPf2KbcTD}R;jZ{iO7CO&ab`tZ?n%*eUm%WH?HRDiEdq_F3`DpR?k z#r{C5yX5{w8m_t$=Q{qgX+|*`TBeun@$}4`+p>?eWgKlz?uB zYSJH!kwKu{4i(Q+C{-EOvBa89FO-}na0?Z>rd`Z^;P-Z~lU%iBF1hdB+(-#4x47jI zd9d0k&|B+5ukXkHuOr>JEhbz(+?Nqt2VtzxP~;U=0FRAWw~14Ou6Tz zxgprRhmoJLXqSfrDoc8pGCYJemSVhz*S!T?gHnkib#9p5~tCZg9)PFGT8GThv!AYe{ zz4M^t_kk9wJxnooWK(Fg$}yb;atC4s8*z;EW*Xm&8JJPd^KLe?jDqSYCVQ+|1R9hy zI4OGEGW-3f^j>LDfyod)HZtQ5>&qpprawxy|Mneys`>0l!P;`z7-{*Z`8C;}Jje+r z!N_J0d6O7(Z{f`_?Yv>D;aL~aGjwLy9+Ky*=k@AmZ&m#q9}F_GorhX6K#RP=y%Tv7 zd&;_eAGza6_fJq38oTg`7vUA(&E%nX_arFqb zkCFO2nR|Jf%XrR&d~W5H&m?TahM_m^aw`y+vK;@Et*1m))p>TzS%%(ad$QW$$gp?r z0jbKhcd|VWB{!TorCTd`c=>nA0qvC=U)1)&*N^4vYR#wy7Qe74{sn#=J~(*d%xYN- zguTx5!bB2Z3taV1{*!3vOztkyzo;AJlKxzEQSUFM@xxm!wau?@JI7_HgdacmDX>!g zzx@@TpK$*8sQ2BUDBZQMZuIMeavVX{jB0=EnakeTO0#_*$qE&UzOZ1@J0^lAx2_{aZ6bn2m7`sX8vPVpPK%A(f_&ADP#jnP9WhRY+AKrlS%@s_e8IeoU{g3$s?j}^Y z*lnhXEUPglGK9Q}#8^Pi5T?!YrQ&lcecxDDl9NoYwe+PaKQLGmXn`(0L!5%tp%gt5h%yAlDGs>^~r!At|<4BKtgJwkz*Mcxr;-R$Q&L8&P z3iI+fba2|0;=pp=|Jdw@E0z;A$T#F$l|a@SNo`n5B|6hpucI+mfM`hRy1WHt^tRGD zy2U_HURHAOy~~cV2fC+koNTJRl&5Ly&1$wbTNsZ@{r2CP^CuPMl2w|92UH(2vuGs; zc;~%5i~?eHy@S_U-h>S#bPr}--ClIH!4UdwES1jO?3dHzq@yggPqZ8HyeF%j7TSVwoxzgMphzUF`}&cG?ji2 zP5b@(ju2exrqVo%xuiN3HXn8PhB!-K&b~_X*zr~2bD>!~1_{=@9$KLu^f|=h^{1d( z%C&E3FEaH!hq7C}=4-NRz50~;PDwQPmL{_rbzB_tQLcPQ+W4WM!bT~@?rQmI*^X_p zNDB{af?)EvcR@xgV)3!qv+2@q7nLJ=iEFUS0jZI<^!hLQU!4y6{Ozcb<)sg4+cO_` zwb0S^`4d0As{#TPTgBD>hA26P8@jVTl8_y^+HapTzGIs z^8CT>?-@>;;Kl{ZK?k^h9~%wt&JL=!);xS6%jWEJiw_=r>lu<30kwzQSZu$oLbJ$d z7*ZB#YVv8`>(fH-m+cR$Rm;0CE^r9xlVoM}-;h7OLr=0*zmoT)Bzpg&{N$?qS5jyR z<&EtPujOPFUI;yjODK*CkxRSY?vH(1byk1aY;!z9>C@M}TTTYV9}Vl2P1*A;>!ov9 zeVtzU?fS&lnUNO*Dq$zE#TU*UEu9w^4xrMq7(-(fGHw4{#Bj+5&=3~_vM6SxkwLaUAcg{-@ zRCm3z`1DWzHcCyG_qeD(fy;cqmd{DuulJ|hoc6T~?i+XHI!;$~;lCT+$j`KkswP=w zt47GoK8=F)8?*P;Gy*r=FDU$7HXET--r8`x*Yb;+f0~5F5PfoNjKD~UNTg0FxAR0a}R62`)Ouk%i4*0hvu{dae-xOKfKU``orAz1u0XoQ$ISwiaa>P6qkuT z9wLOv%D`eJhm2RDv9mPRef<7FJ3B1IiezUif!+qX!dOhtDmsTyFeXHkN6`WzQc6Wy zbTjrZ<-d_&rif^Wm0Tu}))c^E`RwUcSSC)CL%ik zD3XZk5}@vJQ8xr~O=P)#0Ar-ek!-f|&nVgxu^?)0xG4MFQUTD$o&FzUDc1zV7Rho_ zs+<6*Is=&GtX#E3A(QgIR&ys2F-PaT<(+VOl5>KH8V6zSfdGyM-YzPelqi%DSmPA5 zRDd1?fSE~w+($eA1Cm|D9L0V-wnU`Qu?#__UyY|ND5U1H^J|F$fHVQHWgn14 z1+lOGBnur|3V@4=Q7t`|54{_lVcn81eHU$YJr#mI+*8nd`#XP~X zbC%H4v*=@MsPy!m;grIs-7t+Z^fxxqoceYX+BI7srl6;|=tm?Bm{#zQ*4Sg^etx{fgVq<5znG3u+U^83ScUvMEQs{1s)DV_Ca4H zvQKaWbyf~t!JSicMGsLbMkwgVrsn}t=w-{vD>fnfe9@C2<|}|$m0~&X*A$*oF)xYe zCV;vWA%O04KL`nFIT<%}t_gouNR~?$K-+|vPki|$viwsCnwkJmP{0eLCy9tX66m!b z(2fwGLyPSLq5tziODX41Yp_Hg>_f<~Exqb01W`FfRGo!K|dC7FG~K0Kuz;JfM_oe>TSSTURhPxepc}kfNK!t zU{lOzA1JrKLz}qO=Oxfa9`xRG`~Gw$5U@TWfj$NbyzvM>5X?%Pd%IIM8Q)~2 z5fQCcnc~ED|1UGbr}qy zw~p|ss*hVK6rjK|=nkq}FBhY_6SEm*d?5jQt4K8)U{DI=T#kkBp+KxC9b*#AO-;0X z5_)nLJ>sl0j|>P<9tK}QBH(K z+ZQMT_4idy{U8zZN`NXPq5AkJ0Q(b46z-9MI}H}~NpA}fMklkRWEQ+u$op8;byayW z1lyRCQ6WJ;o<&y?j~(Db2Di$N&thgon10FK+BNhT7HTU91o`DQIr_i*gNzA2U&s|K z`(j>y5^z>w`##AbLyAvedz*?BU-G$663!2C3sP^R)=t!=o~yq>ksIM-{x!49RRl=| z|ARB-sl%dnR6n%^xPM8xsM=7}rzP|&E~bWqjM|31hsUN|Q&RNr0`lGzKD?h=FhN8x z7Bi#*u-UBaB(drZ7JUE&9|y5aq}mXr#7hZ?+(eE?vIsc@x;92e21?*hfc`cY(*c6b zk2fZc!uC**k7lcGgUVtZv2Z`d$KrdC)m002paz0tL(p7a}z-6)?gNn8=(8mF*(cvMJ?^Iw=vw_rpGwh{1u6rIJ#a1hq}VJEqW>7Kdf-}X6m028-2NVVUr49&OEx(eZ@xB zjQ=v?Av$Osz=we%{2e(BXz@0z*~IZ!{JjVubuNK52EwZO?UO{g2nl==SjL90piWv~ zpT=JvB4VZ}wb7#w*gzR4Sh2L{&Jlm?{okx~MbQ2*HE)2`6up&z6gIxdO9a>z^6Vor zr1kfG_lL;$WO*J%F3cC}Uh~2x5H%O{x&x2l5ing`kAk_UTd>o9LiA(o#k;xCI|#sa z2Q4mcc%R&p;&Ys{vydMKPo`K=xUa!0=s*-=9Lt%RMe)t=O`a@l;}+1rpjqKl$}4YF z$}l%~GU_SCut;=)HpYsgSQV}eBB?Cnu^+LEk*Bd=Bnn_l?3Y>j??SYT1Z>8AsgwTX zJw?72kFqMrzp-l$6^L2*i~VkaT|5TOmcXdo`^J`-FzN9OtEnsY0x%hiv0Fp`c7%$k z`9lKP-?@bi+UJx*j}=^D#ge;Hz5_Sdx@XIB6ajoJ{hF)pvd5bb5mcmz`esUa$Cl6= zo74KBpQreyOR^QsFJgBB`-qe_S9_(%w8`3u$&jn5I~7e&Zk5@rlraRZ~+ zE9g39dkoh9VbSS6Egl^}c~{RZGTwm+I-aps47MUcF8AN53YN2-m5sfP4wQh?1nd)( z_D4F$i;?#deKZ?&f$cym5TriNMZb`e&i2hNpZcpnAtEQqwRe0fv^Mt!!gX~5@8^ES zyuxE_N73V3KRqFzNi@USYcT7Meg2@qBBi~59x^sWt>_W--Jr^0))WBpxsZr(37hof zPv}S#RJp^_^a)t{glYR}p-6t|)PLaiukvM!UqA{|f|pKwnJ%jQJACaMyqwWitY*SC(r*8G8A`3w#3KXuEIa0v^^5uAx&UY zl$E_DaE%sJ2dM141Sybyk?We3>*Jy>@=*xQUelU_3v2nglW#N+p8s-n%6CfvL~%|^ z$q#d?oVc*sOq9Esh5$t(u)I9KQ+HPXd*tTE#sE?6)wCBK*{aW5261JPxAU}huADn# z?p2KKnqZl!e~q}cv+IH)V{Hr1E9%}()0f}2jQ4Fh@Oy9KQ2K+aygj|^jH;p+eF@U; z?XPcI9CV2CoZb8TKmV7{5>GVcj%EIcD8ZT46HhGqhnlK5CH7eVF!m|YlqPdBW5+~# zPD#%y*KT>eqYSVxdS11`D^VjQhDTp~i+y;;A{_s4{Ya{v^wukhxN5T5;&v?I!D&ZlS3lU{zA;Q_N=`q#Th{<}-3a{3)H&b8T z3_M~l79VAgl{>8m;6m*Zq+z!=K1{p_ibCfZ7r)0vbrvpL#%WJXMGs_4H&0f?apZH? z?w?~$xPCadJIr?!$Ii4GRsw6BHC780Xf~EQirN>xa{^AVuq zG@YYLL+jVP&n7g5z~pDDIddHCL&R~7_znG$N>`A~LZSBO-yaZ!OzfrVAm_CqCDM_8ufD)V-j#-51?%ZODwu)k((BvCNN)=C3ZiKEPW5Zi6` zfvS7k_bG>tZ2h-7Ro4#En1`tG~GA>kWCYYD_eh;}h;{r#AU+;!$>ftgi^% zkhxOsxtB;HGCKO0}Z^8RifbuG5a^%q$d>;4Fbv8O`bmBifaZ#o+}i+^Zqz55)iiydXNX&dL>YJQ1B z&J{>`mdx6HDfZ3L$T*GY>KZT2qx6X=;r4`&3*Fy5yn<^`rVQl|$)KyaIuKm)0NRW)qE2$Ek)=mrZlBRr{QQhKTzwl5*_Y zhs)BKsskHXtdEkl?4onO*iB=Bvcn#4todT~K5-e(v$hq`Xdl6wfb8-&mA382qJ081 z!JTcx)jIodp^J3(>RdjQH$`YD6$1uD%OfkhGMcX}@*FQ3cX$|qklmLni&NH=Q1 z^#nt$pP?-6S|KA(G^mAL=O7B^{< zrk2RAz<57ciAs2=*_+O?oOV_06u&gTTaAhSEVEnJV2=h#mUGzFSpR*^u-TiXm(rR zIvvPk1YV2kKv%!PSlAV^O@CooOzlXyOpvj=?6V2>ok&b^5Yv?SvouUpuTmS&)K{wK z?DnwI?@GrKdHtQ5j*;qLg$ja69`|ULT?r;5|NVwcsbF$Y`(u3m)|WViJ)iUSzfrxn zZCI7EC!%#0iNmV#0=hbHR^e&0mwjh@dB*5G#=Y4OJzCC)-3w7=bn8py`~~!=qhFj3 z_Im^$y#X~r>%)*^;N%Oml9cpl?dZ>{&U=gevMQrP`h-J=xU!t1GHx~D=>v+@*oP)V zSI_oAEqU6wN4$rDd5LDjUTH!34FVaZ_EDJg%SeUXMuyhw*&6%mb~tg=`~q@Lq*pxE z)l_MMJAs(c0!1Pl+Wd*xa{fBx{*wMTNx;QAq<8}hU8@Gbw(cZnet$s@zrp4117r6Q z&Ujw;vtx*1OZyw_lD_ka8k0z^&HjOMKG@i-K#rUG2DQp1Y|c!>Z>Da>f?((jM<|E(C$8iJs89QCXO%h)21(^n5&dZjriU6#+Wb<+K|dOz=7q{3cY z$hAzJq-{Ly(xZ(rDtnsyQP=Ko`EauEf3ps1hkEEZ5f$cw8zPvyV(q`rVur+3ww9<8 zUz{;&d*tZhQXx7D7kyv`;q8o_z#g%EsVx=8I)!~IUNNn=%@#wRJ|nW?b!oSxR5xY% z*2Eyg5qHYcK;Q%Wf}Ozz`n5iB>S78fNH$vQ6VTRP)K};^8|^ISvyc8KVOeL<#JW*~ z>I;0-vmacqOO7{ngPJ*(h{c3FQ%3IHeoW%F8>CC5DVGIGVMb?z%X{K|8;`(QPK=!) z;_FlTu_|s_Otj{NNNnlc-)TUi6`?ig%Ah%WsV6X_GnUiYpxCJBX%jpL&*UUN$zV=Z zMVUrVWRw}_FO--o)o+yCixSlB|LXdH*rcEl{ZD%1_h5_@U#ITO*k zp>fvRHuM#a#7BoT?DEp&A)`vV(^m?$niB z*w>&{i*Qq`CBqL?y0>EN^KEv@D0nJJH|g|APkI=f$39~>XpZN6R_D($bDk3QBgO_) zL1jZ;RDF&6DDPjWz1zOd1Q}yOS`(G&8S~OX?c>5BbMi{DzJtd*jH~tg9UB?L`CXC? zr#oX$^v|ajN8{w@IoUqQJ!G4fb6`J_jaH9y3RM=MF|cMOuc#0pd}o?o8B8+OGF3>x zvv&{+Ehr2^j*J#Cr7S2@C@k#%&3==_@ltm2vT^xt27kIaKZU~%UuC)=7^WgRF$WxA zC}-${Agsdmlfj0L1Kl=m`qyr6S}(U~sqp*=Nu)7dfaYH?W4ljZia=qIVK!c1X1*1h zJZm4#7q~372?_lFb|R#EcYu4F+k- zh6?U7dHu0V%p8gvUrh&6ENO zC0D2TanwWlW#2ioPS^?=40a%1f&8E*Rf$&+wz5~;UtRzkb z9|*~^5^9*?Jn{17oKBnMU{XxEDN90+cGTLe5q>5Q{tQ{vTbo5TM*;S%A0gUNF- zwz+jo$c%Ti6>7pO{=k!iVJEm|!=Kn8JZRJ^D_q1n$ehUIvx0fhP_hd#hqe#Lj==7k z`T^Zs4-IdIUHGU$dBP3_q2A$ibOo|Hn)%B)it1DBG>Ra1-TBmde#Ypyv^55C@WJ?@ z0rp`r)E_Tv1)}FsK#Hqyg;lr`H(KYI_Hf9>ParRiQ!YVNh-BlIBbZ%t=fye+=FBoAtk3 ztr$v)FQu$P!@Ij689SRj^1W+!>i#L81CJN*w8&9zT@JS^cz`dvN!EMfvH$-LnU6Pw-$CU_lxJCdz3{7PI@K z?lL(+QI2#x2o}EHz%Xbv9p2^rTQgM*-8)V<45nc@-qJuKEA#&mcJKd8|9{~BXD71* zlhd5fOvs!Mi7}@lO*s~sQ_hTtqRdX_lqs}ITFxERB!y065>bsPr$k4cDb?zw_IbW9 z-=Dsh&krAefL-kPbl>B4zb?b))@4=Zx4&eJs=w)jAHSyh7v4S%-}8-~&SUR&V(MT) zaPcG3_KMV(kBaHQdVAaFAci{u7CX(>Ic{FQSameKL~lFC1mb+pTu@+Uc^==9M20%G zLuT}E*IGlbWHm4)r@~o?G^a*4JZx%RL-?mDD&ZQ%<3<~^1YP^!_{eBcTK{D^*$f^h z@+6xoVP6`(lT;hZrU;yGcP`m+3$}7nK5ppS`My0Buuu;6;%`P~J;chdSSg7nn*@RqC+pHHTzbK#G%z8b zu|r@FF*Lt|towD5lgr|eZ=Z-#3`qnr(?;zQ=nw)38m3~DUWquBMhM2T(*4>?xW@~| z+1blgIg+VB#M7vXo@1-NuJ$F#`Rq0vES3uIkW92svmHOP6Q?+H@tmmNB~Gth2i7zz z{Mgi%ZU5;#Tj$(0^uM_OM+7Ha7Nia@NaEy#V>~7hHc+_F%bm~<6Nhr<#{R-70Ct3% zaioF$_248iV@EhES$wOgr=60&E1e2}x6ggrq25j#xLZS<7O@saj0buQizKiV;M)Pv z9;ir*hG|7Tv?3mDlT7jJ+?l=t#%ev2k`xNe-7s7BAz{GSgx4OT0@=j z43D0$K^{9jT=zo9?h=rS+W2hbgVBUVXr8bo=EpJLa>Uln*dzqph6zo?Y87>A?~g>c z8BaTX226TblAtPKWzNRnS@v_Mqj3lan|V?0-Ge-Mte|8#sATAyLuETW0|ED6zCIX_ zu=?9HmOH0$nswTf6`7>wAYq8*WSk|ZFUdfarWqYF?ItIg@u$(9-0r(v#^xT#79PWm z2eI`-SS`~{lE5ZOGOrJF-sDy&_#xgMURc~%=@`zQ+$J08IU&7tVey|+(}UjI_TLuR zpynX%ElUt{cROr^^rB&F#gANAaL>_S=^V*(_U|{lOM70dM^!jiBK{plyzr7u<1bLB z8FwtF*@83uxZ0&p(cZzbew@mjW#A7>5Z;^Ah3z~5>lOLUClla9TQoe_93^>ppoGtC zkEF3;Zg@B7;h2f-aBm6SFkgmozIY1_vpX581Y$npv!mK&9E8Y;RHlms=HMZS<)Vir zkzqk#SaM_S@B=1%I%ko#$@v{Yr`rV(;@e@*UmL4GBv+v7I&80Qf3p}PH&rqWPr+{| zE`ztUv*KycmRfkaU{1t?r}Ha`M?xgte9tdc(x;yB>j@QVk?5ityPRQm{4hJSrzv#) z^<2)LLF%~gqn2D8Gl41_Bww=E>`etCvXW?1RgRDkj7-UYGc=_-T8Xgzj}_SSrdT^Z zQfFFOYdV9eK+Ha!2T)ZKPrrbkA^JsFQS4E37Ij2OS30i(7mQ5{w6_S{;k^d;S zJzmIOZ%Jee5`P)aqcg&e{!BJ+Vc(c;bEmN)aGWh1I5TjJsZ;Tz1Ey9yKgf(ED_;8j zxh0$8RjyOkmhlkT02`mZ^ zdhTPXF6C{DK#`O$L*&DP#SFc08BKyU0nge!&59g`xt6iQxv=2&4gUE|Hw4s&!ek<# z7pz&^g|IyWFjkzp=>VeNWK}c_QWMe#8(P6-C*yC@T_nsXq03S3`x45l?HJW76JPND zC2AvS@2p|}O#+8uPrC3snFn^45IzIyb};2OKOG^=-c72zmVkwo*WKV;d>THOzWKTD zzub7&AL49oyi{!l65Z4JC9Wc|+xS%mBI970%W&Ed>ailClU364u)n5I$uBouO7Zu5+tXZ#okUWR-`Vxz72X8wQ%=Kt`L48 zCNrWs8R1R1*O?MI5dOHwQ}#Sjk9vh2h`-GHjkW3hIdyriB%`2_o}F(0`Nr93KY!Uo zAv7nC(|9E;WzqQill#_H{R6nUOPaF8!M-iw@VF_r0fgk`fwDFZ$1#`k2T`K$eDOkB z{)Ywsjt>!KU-$nLd+9|IGffw&}6=s9GGmAK5p>&Jv$^#6maW=u(5> zU!jquwZv<;P4`Q^foxt!>u}jWP1%fmb~puUg-@(Re7d`@LSZHM;@QoFo`F_HR)QFw zOAVuBaYBw>6}aP}*{*8Vtd|#lrk(mhvPyZ6oeh6p7bT(ZT!g!A`W@CXk^Do9H^rus-DoPm9Rs(yqkL z+rbj+*LUlViGt*Reg5`_I^WuF08KoHyQ$;6ep=X{^Zto6uuJvxANx)qt|&aC@GpEbAU+~o5g zj2>8;X3dWM`|p6=vitt`TB#Olmm|%RG^}@D`fJtfC%pxIX!9oN`)H(L zcyX(4ufyUwn@W<$L0Hx1&*+dE7c@qBYL~IYlzgy(k8z~k$XFNIbp>#N34`b^vd!b= z`((Sv>$d*dhh7g<*op%TF~`Q&4WC9C9<)7bL85hvRU--k<6$sAPMb6z|71E&`o;-5 z;b|FiV8%&4om62=jg+~kx^I2I?1q*os)w(VMNV;p#&)WVZ2sjTGGD2X|4tTTx1yjH z&t1}OuHNQi7I-7sj-}IxJFXZKbZ&VrGa7RP<1iCmQfZcbD~P@Su{Cr^a{`nU- zYt+hq(<_w4)iKX=+Z2=06NrWrkZU6-|kInicrE<@42T2b+8 zH0O}60Q*VaW{ji!zwdf!C%W780$)jL#ytCjca5qB-s+UqJ3bg&VK?vOGey>5If;od zMZ5IwWsrApNbo(sTGuJ4Qk9F&g*f?*1?cNTx}8MBhn_y$UX>Xf2W8*WkRD#o(zjcC zi{HL`IUc9(A|eJ2MMtH{3<0e#16Lr;X~OWNGJbFU>O6u29Uk=<+}Ef}F@mdfSGSr1 zC(>qvWGeoV*i$v94m->edjC95Ph0=~OrzdHDX?v4$dA+1_uRg{QC+mop&IF>ze5_i z)bAlX8$!Pa*iZY}R^r`@I`66e_p`dg_Myi20GqR*Z@#+Ru?>N);l{PfWe}MkUra}_ zAwfk>G0;u%)s?R>4x{T`Xj^IUtA_!%rd}&D40D@z`7XQKJ|KwyflG~d(PKz3GzL_x z6ooc;U@@Na>Zq%Yk4h}3VJA$>brs=QwZ%$Dp}~2|ixFsdw`b=AQ)xD6Xh|0fq zAgD4kxM@Sk2k<-}i|WQDZko@R^VWK47c2Rs8x=mRMyE0(*1l{$xH{w@ymk%r9%}HG zDC=0)S>_Y3k9=4x(sUVsPDo1Fg!jiWPg+IV-^kRoJ$`7lop7~Ay8bHpMKJBv(Hv@1 z@Y5L|%Ri=D`nSBpP;g#O8-5CYrws+qNrp_X61(jM#h4ItZ^t{GI_YGF=Ea@?%{34r zKuE`pw6neOT}rV4cs;k`)2AGZOj5q&uJt}NZ6M)h7QaceQ;%&M&{>f+{n|vzLj@`~ z>*akeF`SSL=)|5w?Zz1(q~V?jRV?Txnh!Ps6haqkv^0ZfsK4P)NfifJT?XKIQre(V zACaM(->h(`ooUsek4eIk)Vd^4BaO}yv-LP+6bI&*vGYtu07PlaNAShg754?uMA8zT z^HAsNvBVsiVzsph#gr9>VGmui(qGMy`>WdfI#Yka&&Lwo&&CIgq#s!FRM>0)b2h$q zPQIgZYF_osu0virA-|Ex(Vc4PUY%$PZ^Ok4KiL)m16pS`zHoW4dgfGNoC<{t(Ha|O z1H&BUxs{!}#jG;(fXiqG&&%|mBI=O(E8RX~KUj4Ugk%t@05n9j&M_)DYkX)31`@wtuiD*%~7ELjtZk zKCdLA0H4!@(qz~$`_Nn2p0hFvxm1RK%6oj`D0agBRt&11?B`NrP?Lpj@)2S`yHi%x z9SU>w(+I4+LpF68J+au!(=6>XiWuY7K#eh;w`}Bbn0-oI-az6{0oR8zpxjNdE}S9>chK}vnT(NWrvjMh;eD-|3W7tEQkYgReFOgwbtBcUQ)7>+$-bBg$v+CaxZR?B0?57Blv|sH0 zaff)~uwC=#*ypU^Ud2)2kYmlj!)xJfsuW>Y{zdp13qCkDJcxtm6Qj{rE2(J3Ol@-3 zt}nq&8nY|j7KQrdK@`t~vSpEbyDGhC%^n01T;!^OX+>-2EM6>n$rFl4GJ|*ls{dqN zDiq^o1;D(wOUg8lT&;>Bq~@VUJhg`lpuh$>SrY*Wu0@E0Srq6)al)hJXTBD^!feXWcZvORqI}rbIWT^i7#Ym-4TxF91&^0U1Ta*NUmG( zj=@cc-cDD8D%}@;x_`62Z&y)VY7-fQTC_`QxM!a1{dfqypJOLxG&m3_2Dm|jkIg>) zGF0^g%c3XFbF&A4d5Jv!BY&u^WA1vGXt(kV&No*uzAqNnD8Fw~7yUx-3DhdS)*<7j z4}vg9tuR0aE)F`{)P%J5dsY9n^R+>ssP&(6WmJH%^*#u|;}*>Py`FV9;gw`-M`guh zMSGHzf;;0=Gq9m99I?^o+-p|NcEgMUQot`kB{Sc7pgr8P&NS~Wu3pP{c3E-)_YFhg z&?{VXYwuYr^FDizs4ZirgS;(u#wXgy`j`U;Wg(UXcEUcwT)>7b&jk zVRmLgaQR0M<=!Av96+%2zJP{Qhr(eI4>ej;_+d zssIn{<;C@Coj@j>U&_8$DWiI9+`Ym>J>=o{keDAWe4N|vmGKk$IKUnz5!U-PgvGXY zijGDiRH&Bvb{T6S#D(d%na40Llv(mRH@h-Z6ONfYubpGmkcOj~<$x8Y&nQ(4Y43dZy!#X1{76A3&| zL#)WLdE5eD@0+CE0@WpeU%3xrt>s^t>KNwKE~#jrsa)@GGbSmei){Q#W^?*&Efhn0 z09RQit2%b&{Px`6_wQQLl$iBO@iGT{2A>F-Mxp%Ah~dM23=uz_WUNEhm1Stx=$q}Tpv+!AQy9av@qrdumd5kLcNiX=yh z$bmr8h8~@|**DHHtE_TnQd&d??phd zjL(EWaHDF4{e*myLmI=j!svY`MHN5b)XoUb7q!Wpy;r$(G>C{L_N#dHD@r_FJ~c=A z!AWHH4sc6&pi#IDQ?*WU2@5xZDvCzZo&zQoX6i|^t8_CB=V9z?HDD*(f`(B@T zx7Jj+cEoN)AJq^78jM?O+LJA^9d1#Uk5dh%d&V1j?c6^3qXh>BWy=$xw`fX44`np4 zypQWksEl7$iu?Ckc)NAxnn^mfO}Ve~O>RHFhQS1w?KPPlW|7LTK^R`WSp}&t2coA8 zaIJ40sm4K9TMv9`6W`mX4gy>=PhYyS?6Q~%eY*2U;(rg@m*19JXE69RQ-^V?GO z+Q4KXN~>Q}G8|~I17@Ibt5D=L9CzSbV)~_>GM=YAMxwPZHB3;zKvwxitKah$O|FN& zuaM-h-mf_M4ss6cSl7U6Y_Entzpy4P?!rY868bmsVf2mo_n&9x#PpJ8H3IK>&5>7L z8$d9NV&hK(Hu$X(`H-zND5q?z58lr?fDxzjKCOK@@sqeYSwH92tY(0R<)?EYS-?%+ zQ+_Q(O$t$bkj1AY`!yM5Eg!d9u;ZQATL#WIw^GobJ+9i78{;E4aii@f0r%q6RJ_Ng ze80^#A|r`@zJn~VVXIx{C21TN^A`eD=F6=R6?6JECp{kUJS|>>tyko20~qGRr%eMl z2DT@8Mx1b4IFB7y(<~&Nzd*7O$E!JcU@W}Qe3}J+5O{j(5It;?e4GnCm6@^C8s>f9 z$ml7V33ZW8{8P0_@b9fskq&M2d+RCXsagzJst|846ji8BmPxzs+tMxNect zVNOtOfROT9g*!a>K*I7|6xv*At;79`>r@BH=$Sq-Xd9h{T{STr z4ORP)PVxy5RqQL~HwxWXK$vy#jRU0qaZmdkkrS1USx=7M^zq4c%QOENIud&2I^^LW zh4VQid-Omw^OV?O>f3B{hboPA~+Nb4==)SF1!$CzrIBn-DtBm-7Tmj(% zJnZz?Bo}Fk`SQRQil@hsV)bEOG7To1Epw2709b!kKnCEj@+>Tn5oz>4un5JxP;G_wEhgfddC(-$qR zHi*1neQnd@3OeZ&a$}B+R5l#CwE(@-E2wmzUXY-R47y^?V@ibx&$>;4+*^L6-#g_m)hrn5?uVX`(V`A)nxtD$2b7sqGohy?Q`K^8VAE^A(;-qoDrao* z3ep~|acsJ5Zr$B3%&SZ%xYiu08o;>;APqRamT5BV#!Rq3@G*STBj^Y<0qtiq2_!U9{Qy z#0vB@OFI~B;UwD5Wd^2^deK;Gj+_a9Z{SzEzEgGz2Tiw+6l(H!QReP)s}hLae{QZ2HHWT49m~~po+E@)5RBT z;?LSgG+Zb79zf21MT&gT{o?vSYpuzC-LhxO$FqOAWn=BTQ*K&*u)MA%GoXXc)BPl~ zL~d1a{+8jck@@TA%%^U7xld_DKGrtoAEhbm1zT;$yu6zx zIbqA|E)Lpx%!tez2hZm9IoRhV>NJ~OE_BZ;6t!;Lm3QRg&p3sXalgJ7d?y!6^O}={ zN1|U88}?}5cWAcjVYo|}VNMwqOlI3P*6|!*UcLXt0cJsuW1hsO+{uX4Q^f5lefNWY z#p&yX9v@*(`PZEC$N6&)@+;fPPUXpX~4AHqZv9~VM`9$7thN*7z^RLza|AhK~NUHe%nNVv~{9j0_5w7LU zgA;`RA*nz@eVX9>a(<+%?eSS61AgY4Rhg10`Jc9#!L{>G+Q?1YFvx_h)fe551U!EL zB-B?ZN}<}vLd>fKKtdfgs`_SH&-Kyu{|`xpoYT@bSG)4!nu`?q_Cvhfi+;}S?D~No zYmdfK!?}6Q*S^umxyGs2r+dD?dWhdsChs~ecv^JAe;@qepP0lU_?8=PTmM`RYE9gA zvEiZSQf={-7&W85KF1+E`Iv8(MGq(8Ij_=RI#!?b`MpMw?cZO9!(4s;e?HQP4N&{B zm(z(#B^K+B{mt*KRF|ro{s{;C2ND~y&s+@r_V%9XV#e}ch-UWI2$**f#OC#&hj-yS zZRNXBOAEW-z@``3#@*0+LH?MmS`IvW85@7Tb}C9yd3U%$_}$9Hmle7lb&)ZOp$AS8 zEhd)5N8!17gV|&0x#x&MX?YXef#0}@YHte6%}jNV_j2^N(eFs5QibwS#nWj1K0t)u$q_F;9p6W4!T!fnBg%@JOyH z!@lD{bI8J(p~SN!Cu{Heve}TO?DT*8Luy}WuPwU!W*od%2>wemZbBMMFHFQf)d%22 z-xcM-^Jo4f=+oKSvllZ}ZQv{csYW)H>~Ap~bR>T21cbZ?jWW3qrl%HrPOkRCTV-!_ zGGB=ZKNm;ZWppC)xOuNjN@!Nppb7j)>3aLx#RQ>WQLM12y zn=oYb`Pz05{nXogdry4XiQIGkif5i@a-=Hx(@4~1xEhn-NfJAzw~or$W%)Trs#@K% z)Ah;>VjSHj9zr>fXCLAw+|#Z#P8}2}0L62PPtis@TaVQTd%g=oqNW7rTs4FD_MF=G zEVIAj)WyjWOhVEdceHNEWa6$HhjosIeH|koBxx<}zi>MJ)IDW?q4dz~jB0Z2xr;AY zXe%NXw&rP8}6k)BRblkdYSzwsakEUVNLKpN7PVW zg8NoWUe6={8-vdyf{UJIMywThecL#d_w=LN$(Fy~iz5qLVq^Ct=P8GA6!!g#Fe-K1 zwzbPczG46I(R|Y|{7B9ETi?6I%3qG`62I7OQDgaSeTT6wee^#+gKH0Pz+h+Q5nTHn z6K1orDZFffc`_BUU3r#ARWCazvk9EwMLI$jz&;9X-eC@yL4Tp~%8^F1H(%-xgB zhh?fKNH}>fy3d2q%h^dvnvtb&t6uc`m1nG!+;e)?ku_L#&p`{7to%lUUGun5=vg_V z3fuJunL;c~yRy?LblqcY*0^2uaJ9bUs5s%lfOct)>4C%mbgiJ-wnK4{Nj}Ff@8iNA zP`(}|Xd+BZUz+;OUeb$x>G6Di?Is&xh9n0VAu%sIjO0IiE6oq6O7@?QkJv}p>;k?$ zAQ=qa@VY8=|M9^)tpxo(B=SyhOR(oVZs>+)=E$eAf>Pij_o?Mpodb1GG2oya+FlJH>wU+1CNfH@zkm5?OuGw>_dr`t}BPX2a1 zux~lz>bk$ZS#DrAW$P~YYX~=`Y`0oVFKYUlkw?r`?R`gHlan--BULX5P;RiDN`L*b z>3YJe9vMyU=Z^WJY<86pwl<}`4~yC9bMJCZws9C0oFTd9yTVa!)l!-AKgPcM!pP1h zOt5Xoj_gzCLycPSOOqb0_XKbCb4^1pn)wJOFKzGKhl--oL;QE%>ie{c@cnyX+p(Xw zH<#A>ZgBQIl5_u7cOBCW)4wl(cV5NRYu9C~QRnhA+rCxmVnB-U&ypy6Y z*XY^iMLj!lTQ0sU_hLoQ`P8DpqbXZ7tN8ii1G&>erOR=gc=<5%BW%$*MVn7#ET zVQ^!VSL*Sf4Q6J@g;MdKkI|s-&E>iqsrgt=E6N*J!I3%9VleZfJ)Tn}?WO`VDjT@; zRRJpqS*|y#54~D8NcPZhzvvyOG8)g0SdNW6J5mi*VI8w88$g)OYtZbC3a?JKRh6f9 zx&Kl1l+}N@VPWJPZtuAclk%-)AunGSuU#X?HDvo<2y+XLa+VrA@&BIo=SB;gt)`zH zYU{I?*YZt~XHaM|T(XBj@zi{sOdsfKXxq%SjP$s&y@0b0zrB1i|ClHITu#eY&nx;$ z(A&U{42r$&9*COJtGH|c=3FUmvJCRFkIoN!(lb|1LnlpClPbfksY-n_e820$m9C&3^``)E2Y)AUnE5QD??_V6ZX`Qhwl8dx-rh2L=d{Npb zdUBc&@H6b?#clrE^%v2PB^B4QHFl)dn^Q7Oy6T^%J*|;SI(dLYcN##C}+gt_Ip_~m&rD_)3WJH zsNlzcat^ui+fvUITmRIb==}Gi9dY{;HYkib{^;lRn`hn(9=SRHF7Q|1+}|(xk8dvg zdi3kge}BKy6mBib?vM;<{#)6hzghJ7sAOpKzg6xDbDYVJ-^0E-x9T1jdb!@odywL0 z*b5@PdiM8sQQ9lr7fFR|Hr6N6scB8g`Sy~2=y`_4tDk}r&ZnY(3gb8b=5EeD?7`FL zCajAjycoaE%kaJF1V)|l~n$^ z7?ZRDw)kfVPvEiac%A5Hwub6MdPA$yrHXh9LsjnLVW}7cD5MKmp#H!>gOC6)Sp;;? zi9l=%kAGL-_DuV+3M`tB0NTq#E5M?D1)va?5>Z`T6cCKvCDWygNCCA7upYN&ZQNOs zKPSXI7Xn}!W`c}rryv_C$aXA%B%%Ne>Lws|fe=)IY&!*gXGJb`#rsa;5wjaMBrJ>~ zmJ@+6BNQZ{5jK)h{dyQN2=naOVJRS|o$VL9(?`lARLbw5|l{!G(aYm zk}*(g`NtHwTmk&Du;{W7zP6Lak$NBGh96WL*CtV6sd~r}GG>s5qyl6s2lGmZnE|4z z74+J(k}d5;45B{yX@M&5pz|1(=&?@^bA}dh^lh%9HF~$Nm6Ko5ARi^= z19x#H1X0!Mi|Nw)U%Go|5i>$SQYlAIhdL=YB8CK*AtH)~g|VrX?^05^0L-0G9+7EJ}t&{2_f}ld4R#|BbQc~s$6k0RM-HetiTuq^q&*x z7A~xSi=Lw%>Ry45-Tz<2n+bwf5->o$x{FX#lvB&YZ|D*rqw{R{x1*+j%2V%96xSR` z`VwfEc^YP65xI*dH?4af)YGAM8-{XGK4GQ*r*QHZ9+*h>NNk_?{_oLbOh90vd~8m5Z| z-hWNC*I6$*o)8U!dneb7l99dwI6DF}%|}%^V^gX1xmffQE;?{$TP0WH_XrDC2vg!BmiJ6agXT(?lRd5E9dONO6XU?dvN*dsLm00RhA*N)L+?2vGk)?h?^= z1*n^Rl#tK7#s~0Z)MEh#UXC5p*znB{tGfrADuDT)D!)uTaJvqI$3o1>axc}6)Kj6Y zQo_aKiydQX#X8BY=(hZc-?F`wrZEC4=?Cg-8Sv&I3-&lNf`A{Q@t>E-wSG{X#$pgM zXa$cd8V~16j+ZFvpmqYSyKE-$dL~E~b;{y(uWYvfW$lOk0vNr26!zcTz?r2bP>@qt zzyL+=$NNW+A^)pXFJhoQn0oR#F%~1{BN}F}L04fn4pM9WQ%G3xjuyyvanbjE>-K<9 zu4R}(vFvaJy!}5IRj+754^X{c&cr(R3`Ind$=9LVx%e8lk;h2vy7 z9`#l|P{-;a&j@KvD+*`Ha>kwza{(9}glrLBa}Sa|gJ=MboRapn9Q}F~j zeHR*U1yWC8KL+(?!b{EadI3fBQ8#Z!%HLKL zfcSNFz^UDkEKB0@Hre3FVq?Y@N`A}A5K|dKMLd_}T?nlrptYf!*FAkC8bB1kkf;99ipj3CYTF*Oo8Cl?EHJ+qYszK-;yn`9Hb+l>X>Rz%YHZKA9_k-S=!T5kA546#+9o(G@#qIRy@yN?LdbRMcq; z^7FT_B7f|1d$$0KqT$e9{}dh<)Gj6K9U}aXKs_vkfBCCWw@4QOZv12N{o58t0`+9G z`KV?DXF~6kC;JW%BFj9g+#4?Iz(<_rkCM3W!!O8Qqab-?IW}Q_F}d}XV%{Nku2yMp z!V2snrRn#(r9ywqi8Q%W=`X3k#heAUrDq7*30z|DbF_tNy}w4IAf#fs!(8QnEa;M) ztbl?%z9QF2M(r+O_u{cGyz;pdMN&b@@*V89z1W`nq6u30IQHu&H*7zks1i`69N_9u zv0wZZywp0uos#NJJso?vQ1LeO@Y{$pEAZ+=73;UaOT8j$r&K6+mqP!z9d(tAtOHgB zVCPE^@B*=HBzWT<>eV>9GO%F6@wbA^M?e~erb!8MW3=w}B&4eYjCc9H6|pus0Ilhd zYFv>`#X{{VkX=LwZ3XoUk336|qw!@uPTe@`lAyD7qT+nTleXm%J|cqOQS5}dbD1|# z*qlI-8za7-y*~lzM|VIDTKI6f;^ExkDa4D1$fA{{dhF05vhh9*Fjjed zME^(hJZJXsFj@#g(|YOMG&u%Yj!Ti_66DxoL;)EdM2xVaK!&)7X7m~txQF~fvfUIm zSRWHxbT^6$^ZYboPN|$affLmJ{~r4J`T5I}80zfVIX(TuPG1X|{$(itq?wopa*23*VlVa?^$lJ3i zTG?<_?=>z2>*qA`n`-SwS9nCq>{Cza`*WZ7{Z#gY#*ly@ z|IH;%iuY=Y2I%+n!{UD@o}lnfemIMgu8Q?{SI`Ab7JZVJD8kEI-_G7xIOe~p@>8F` z;q9MvS)IJM*~>Z!|B*dibakAq`Szcg@97J!r`nIk#~#Z$O{Etb zmFxa1J;zpK`?YGN(XA7z46i%I%I#_{elRxwb7i^AIK|1JyrtMGef5{hdl0U z?H7BP?%4m}AY8@&Pq@g1Wb!+4!`H>-Sfr>fKen`}!RZ&?xQ25jD(?l$ap9+Af6^V` ztT<_Co6Gyq>MeJsUm^*y7BF7sz{w9?X#(|@^4~(g<^CdJJ4p?tU6!IA_;37`uJ=Fn zx4Pkr7EKHG3%EgEW*xMa=aP+oS>_R{_~UG*(|@qqQ`uSgj!NxszZ5F}(3+K@rm?NY zP5hiBjYPNeBtLwgKB;q)MT!8sjcV zBLZ%_X@C3p_|`u;<%WDoo#~m|(ug+g;GfxkItSmLG}Sq{A#7c@aMt5~twjnI!7@I! zbcvsKXX`$$alq+zc-*z~2^*Y~B(gB&X~e#*woT`J)EvMli?Z=(K|eF-YS6c{89gyi zXZE#O-j^hIs}HFi(Z1AFRRofWud@HJDHB*F{^m}>*Snc{mE3>RKXs4%W_igkEG`=H zGUrPZ8;WM%ztzp~v^OrdH>$_I{Cfk-VqKu?2d6Eeq9_almmpM-*SUBTe?99^)#HBF zaSGbS1A0e1@VPawq`HsW<<2=F)Yg(9nvT1w*%@8vIHywe9qL0HBdxdWcZMQ&g^T<) zuCk4Z=j6Mo!&V1myew-QmCp#FH_-hRk6EJp_q|V)J@sz?SQ)_=Xhyzx^u6amlA=y& zdC3dO&l~ytIIT*Lhp2znu*Bx1oijLD+Yc0m8lPC{FkF1VONOI&i&z;Xfrjk+rjQg6 zry7Mo*Z+}JsSN5tk5hdtD^{H|YhL9)^4#le_x@5AB}HqV>SN(^jbhUzn{9V39@)+BQzo&Oq$Sz{G#ix=7OP!hw(*5ETys>I}2hPctG&8&d z>HZBB*`}4tBbNJ%pSll>SEdj}IMO=IJy&a1~Q>fLyj{y-PoDqu<1gYK#P;b+z7@rf%%e?Ky?zgdwRU&b-`6(y*=@2SXdH{8Lzs^reN7|@(A*3vMOb!?46Ygckyea1DE?-1k+-(EekBp!CG zphs0|scq@Y_5154)4Fl?P1&I=zinX}L&VM;)g#IB{!>AfN_7mJcUB2Mqp$4VAT6Un zkDb-nX%O-02kMZ-Q+<)oT|ldD7~vp8sKyvRbf3NhUj_p_iJldTL<_*@)~2mbx7T%F zdWG_C)Hed%U$+FKHvyWMUy8{$7LW*4MSc~>rz_#JS|MuMAQVkY! zhgXjd1buZgRI6Wx;8eSq2J3N{7CdTWY!EV#Ijx`kxw&9>#rs1J+f{FJ@0qG5IJ)(R zWxn@LE`YV1$RDITM=?t>UsbCWuCrPa)_u(G2ueJHH>WpA2JzPo%eGBND1tvijZCaF z`~wECra@ku3m%vQ1tuDG;#v1?|Iu}aJrrgC^X1%>dAV)h#GuqCs|rvHCOZZ)ltd(Gor_ai7%h zFIt2WuEog5|AJ-y{yTRl2jU=AY;|f%LU2 zTq@fU9*d<&2Cb4}uJ4X-(n>yTU$CiV&-3g>a?mZzkf*U zaYN1PJGh?A;aHUsf)9ahjn@BXpw>5hZ&Uo)K9yVgxWS$)J1eZw^Rm)c4uV=`k7gM} z6>#p;F27%gQjFYoC@x>nA6&W}o!WiCW^u`BdTC4VOHo0b=9J%%r9+JJhIJ7H*g5%o z)IIxW(c`@(25mkI;k*v|X5>E`!M+mCGy_{~mPf%)?;Bp^;kQ{bOH&3}pUE2nOXP$lh)f@etz z-)*eMNLDLvs$CiqB@?gYiw%UHT%tbc+yT|nhcCXLhQP6BiABNL~ z>=Z8iiUykkV(TAcZRapH;AuZe_kvxKPVE=*V%_Frti)w_|42D#P;;Okbv(QLkIW#e z+A^500Dy5W;RJ_e`tE7?Ohgrr3*RMSv{IW&xymJr@M*eT!8g5FPDu{&=C+Hbn)7x^ z#|Vt zy;kMke?SsA40fV91C4D`a!GE_@r+yEKUOqz2}1Ws)9H;Y3+zzQGUEaNYJ|j1rIU#A zDoe?4P3AGYR1?mZNS1XG@N)~JPfj5X`yy!?HE0Na0iWr`WAC6Bu zo5>-S=5Umk0-FA}qmd~!6GOb08RsWiA=C6NRB%*MTMCZt2x7$e5%=Mk_-V-KiUNU( zbv9!VBw))PV_PSPjYQT4SjCT1%u%ERRNG=u| z(9TLhl+fsPgZ2GKHk_vi%Ra2|akOLMc}mWUP=7HbG~Qoa(ea%RPou&E!r>M3UX?r_ z_nMlV<$Mdh&E~{Y+36g3KtA#kx2tu8c#Em9_5DLC4mO^q zHeqCTr1_R{$o4)SC$}AzBypQqYK4oQ1K=$?Xga0W(^{w3!BPL7)L| zR~iD+NnT+s`6(q2gOzy=_O$ASe9rOGyG1>WA|9lYaK~U;Mh{?3d9*b`@Gs%xA_?eU zPH>4{n~el|>iFgWn}DPxpUZa0#XikD5n{&)PWlQg!>l6^#12}nL}`zsw9RxF2MsyD=-7rgS@eK) z;^?Z962)m6I{$HzQ}B1-V*CwAz6aP=SmkqgvM!`!?j<|%cYorlr`6cx8fcSyaY-(< zyY+Yn&!R1L8}O>(hA+bZOd!x(kYOso*C1C^PChIZgeYxyjh$wtD+dtPht#%=Wk*&8 z+Nh?t!<}%gOYh-s%&L18gNHq3i5&RdVZ`^On@ua_)f1lSTdLew%aeyWn_Nh_V%T(Z zNcQ$>h6I-T{2@Cn%zBPA{fC{D&oGd{^2Dqn0LU1o8@9{nP+@hyS^79|ESH^-FE*fp zk7}_hR=AWERwVv(xF4+k5|c>fI7_kGh|X3V(;R1UDmxkEm7I+w3$`Dgo7| z&(xhUzi^(h6XCMC(=_1WWOS55ev+)ROlR&1MuZ4-N8b+}6(Ay2uO?Uf)492peYYS!NYE zF}>}PwU!ahxS2OUUX~&hc6C))P{HqG{0D+$r3KkRO~c#z&LBQ{SLZ#4$1Y!Yp)lMC zOw0<*A^xeBb?k?4)6K z5|?3@#ZfL-_|$c>6k)x*pOYzu?4y=sQrNGMV@DoD^wqlFp-k+)?tG`a9ot&m)G9onxHiBOlB|@Zk(a>nE0Yx-NpQx5!M8z)nkKOa-l0`QEO_(E(Uk zSbN_yZFWmOJVnetyq}aIf$yh!m2h|bs^^gGXI@SrUjBVq=;loLf=Dv14}TP~#|!Dy zTJyOBCiUwpk%-S0mx$QT} zaU&;B1i;-PEk8-hb$;pL`<5*_JmTAT`>`n;_KCD}b9>n5ae^G5&T9j1-QMOOG-cm4 z4J9wbwn}Kg*CJjj01~pdT4iRb`=h1X-V#d0;h(Xtxg4MH%9|Yq?xk@rCRtsdbW7fY z1N}mO`MroDu*!VOLK1G=i!%o%JHBp6(=t|4%jPUE-vPydIlJCBC)f8f7AW@ly? z`&g>64_U^(q>P65I$(5BwEbx`u^^7 z?sM*Q?*IJbILB}?*Y$qB9(w83f4DIH$mH8MI3=qY9d%c?`qUVdD$Y{O&&VeYDE>u! z<~go9=R1sG88ujEmtEHv>0mt8({DcE(fr^>)Mw^N5LAsI^YJQpuQsi0tbX-S%ZW~9 z7mw)AZBJ>TpN7#0kwIq2HrGIu%$pZeP^@Z11Z%iB^?Hg}+apo8fz$2Rx5h523@wMFc%G$Tf zo{SV3GEPmDI!R^rVIk=qv@PC=r&7lXC%+vs&MU|O8+FiwOdL~>hX%=|i&m37SA>BH zg@Oy4DJx)SP|qOLMd)zJ{#&`np013fYPBX&Y@fYiFhL_M=`RCy$8W7u&!udONUk(HImr>a-WYvZ# zBNV%lg-)<*uLQtP03avjKM8AHkx!6MC7Z9_f+c(*XLsRol;|mkAMQSUeEMj26R;*{8g68!ZLBZy^qL2>xso^aby zX9>5{1xG|C>e+(gF2?)j&X%5T4zEaIF}Ov*bp-3rV_$!dqN*0v{k?1M7`h;&9TY8m zRY~ZaZ@OhE%9Q|7e89 zmY9Mh5r6+qczV_+dQCk1s}MqN8H%Wo9%?wH+OhC+?$p73$J$@BpxX%DDNn}P!>aq$ zZZ^;-vGSjbw|7c!WS&)uzM&e+$Gyv;tQ=^0kIFH?NM*f{U>&6d+1{I-V_ zlJiQA4q&};kYFAt_Zrl56egge^!jCCTkpFoU{(tFOYhMuBQq_DOI z*LwN}o*Bk$veNoofZ}=PX{*Mm@S$hgF_olO4_&xMakCF|^a=6Tj8D#+*a{Tt4VCQI zS*shAjC*Gs_x|a*{U_>YT6pug+^e##-0|3M8w~i=(u=DpC#Tqwgmv0weNZ3n*506ncK0C;^ZHuXt#QMDsN|zh-E_! z>NlOr&6-vlj@+sF5@a05Z2K#IO%%-ywtp1c&2%ZN(28F}FL-_WpdY(3cqw`tZDJG$ zewK4&Gxq(nQ9kpT;R#pn&u6Yr1OrsF-g=*O<{LQ=Ved=>c<_t2-l$P*H(wsq5zXy> zw(o+C=%=lFdm?m9mDt)%qam4!Ih4ldBIXkJyDSvY0!YR+=#Q5Me5gK3vz!ou#0T0a zuvJEpJui$1gJM$0Dea=RDhujMH#{&oTV5VZMX$t<`-SN;Fb3Rph{0Pg@oD1^getPP zqd#NPsG~*CLB@}wIx2fS-&RVR-;W=K^ZE2r0m``nqP0kPZL8|l*m^&^l9MtInTqw~ zP}c)r{{C_^$$!%95`HMK)h{|+6T*#Ht11+aQcG0C%0+h!^x-=%Hsu)Qzc+G_;!`US z$|Xf$jZ*Z9r-@H;_Ez;oz*r=4k@H_s{jd|}ZuyXa-c zwLDRB-MyU(ZdUyZ96k^L*-=TiYsy{&^DY$Xv1kgMrSp;maft+5hOX&#vcO2yvA?t& zr`s8!%k>uwJ%~tdKF<&5p#Cw0K0kZQ&9u3c zk)udqeY$=8-*3$qW-B~egv~STsdUKMEAaMa!k?Tc3to@0sC-mfvlgP4d1!Iov&E_9mzMg~2$bNn0QEvcOp@ zk<#!6bU=jq#R5vLe=U8!VFNB7_}Oyag6Z7lsNUG}2xU0k()_~X!9@N&w?nPRODN$a z0Y{Sxt!9kbxTeCWw6L{%_Z?GbB_l)A7jLWlW*vSw9hS*Coj4d3smvVkE8yqJpXBJK z%N*luyp%cKW5NG(p**2)gunCYSxTwevs4`o;0~LAcql2Q3$FYWUtja{)4u8+(L+ ze6u9%Z@0fF52CD*D(J~w*w^g~7vYN<9QV43T%}n{CF0rAbwXgcZwI*UXPzW*{6WLZ z6V%+I{|0#+Q>X6t>!KpUcGyZ*P7`C)mEXGmpX2lN^YBa% zBlDnqAqIFTD>v6rySJ?R5FbP}zU7^{Kw3cZ<9kp4hU2mxeEQ%MNbJGe=r44KmGweViVkHew4OPMR=CDYNlnP4zDK#T;h86LS z(*%Y_a-2_3?hoEQfzE}9z@XJ%q0yJR{&?x9z@`b)Y(M*COVP&!NPne&+{oP zCA#&)Ds;iIqPFq2>x*f*wmG3QyI15*XB zv+DVxQuQQ;`r(Z{x6c)5q#-KU8IBODQsGAVk+g<&S^IEIsfhtUy;*ha=q2HR%aRWN zt&O*)TyDDR7U*gFqu_`pOh*edgW6W^%^3|yH#yBo>n7_}p0Kr)eQ$zMbo+rhMN|^< zBfl-Pl4DjihNErv(9~mvZil@}YPxbzJMqse!+V#4CCmeu+OHd6&S8df?+CDOxT$-D zid}FkESRv|h#{A049HEgjw8rA(9*^TgFe1_eSSoab}Lc-{CCl2TCrA1N_$ z38F-cl_;R%?hg~dRrd}S81#8G5tOx-bnQd@G>ZoL0P}jRK?(`mu&6*-0kVs!hl)X;&~)bg}~) zWD;i7Cy=c~Ss*Mq*xmTlA9h6l@wC}~{mLuB*|I|lsLY0XQa}RC@vMt}5T4IGEBPQd zMEht;`D~W-0Ww`ZCRosq!%Q0NF44ZKqH$cH3Ak*1+pS(o3RAaM8`f=!Z?={;Ckh@e zdGm=tMaV57nHbfLmV|Yz{@|=}pZ>9$tBwCF-kWvS*f7$J3%-d1e)MEWF3sD5Si>>vpC+Vbg!mLxaap*vojT> zdxC)wqiqDf=%&;;H5)w3)5&!E&f0zP-!r9*pIIil@k!<%QHOp%|G^Jg|Fz%wqueC! zxlS@D!zqV`t8-6iedBSauW2vwjxiy4qr>w z{lon4Qkh(Kg!9ca4t`G7@!^`p(CCf3(#;7<)uKm2GBAEq%mXa%9OK1LLe|^QQ85(o zND4&m<2p=4GvRg$R!NEXYv|LxdnwmatFIWx_2_GMQ$bylo0bTMjTT^_8n})7?!{+Y zn{m6%oV)ir2z?0r0H%O0BkAG+oo@fE)q1S7{(|*Qy7kSijf&DjzT9Z4ezPT7GT{~SuQV~H z6Kq70c1~Ta?hc*kL>Va_#eoE%9Cf3sTegP_gT>_fyW;C0sz5?0kxD{mTuz!bDuQT9 zSgM#?BfC3~kWA|%3D(4z)N_$`(wdg)JcwCyw;nNj zLq+K*Pti2DRPL#wrHs`EubUv>DFqeyJw}p3^V2k|;KoHXO_HTjPZt5m^yGog{H3W> zSWw+DzT@Dyx|JTTJAq&&(Jx@ggzCnm!i?r5R>qPjV6<$Neblx6B_g<_PQAKrGftOS zfPlzf8b%&qW+4el)_~foso5zO6R4-zt=VpQi{N+cQk8v;x9{IYe`>1a*RHoKUHif; z_Yfg7TGo5w?V^+4Nw1HZnM36II|Bt&e1yv??!qL3(@g8EwE|Mb|B@~uUuja_8PU6S zBsxXHMok>LGm|=1I*<7*haXE4u-~=Tth3q&718}WXXMK2SuiP}}2e$pvD3N|;f-h;DJakhe$q{e*)6S3X;@m>2|Qnka7 z8vUKC9M$-^Teo6TRj}4T0H|wVB}HZE*wFMHi8(UI3`FT(#TQT%8d*(VDE-==hX{*}KGVa*JK7>-(*#X#_QuS-wWE5LnZ zVeoYvL4TC)&xIJRfYk!V%WC%qp`$AF_MW_LyD#2K+bi!regqzFZHVhOi|O7QwwmC# zR;>uEV0X%Lz(i-OJrkXKNnTo@ZekrqchO26XSK&!qq;uo*kI~jij^Mw_AEBXfUKw+ zMl*M&y+m?qh3=>irY2qLmCFv3s!Nq0j@r{<`Dm~cvFyF&9_{l=1K$EB{_WDK%XwRB zrS#Xby^`OhbFqt5Fu&`3r?qijF6onDh1;WD$llxNj;LzV#H}-5@7V87#I1CerA0q% zgUe-CxowLi?$mTCDy@-td1Wy9i*>%(CAYQczVDwpnRR--^UuD*>DQ0%Huwx&R_-f^ z89u*gd0O+u>$;J~Ubm3XBVe_@n&RFy+mTu4v0Y8nZ_QV`HR2@$W7=91cCmVg?piv= z4#d?>_GwNY*LrhC>tk%}^YgLCLhep^e0rT0J6&`4?bm&87Z^r&?>;runt7@|^-#+< zZhU6>?rfvr+qZY0FMOFTsdxp~R{DMSYJVqOtbRfJ>rtHcRlN2`yZS}u=gidT1y}85 zxb|A}m$|Qwy-UZnSD0T%xV0-Kz|Ebnt2Om&yRg;z`cF^mNzK}yN4~BOYJZv4UU{qi z^{e)`t@>}f+UxLdnb+CtQuk)dR7pN+!Ka2aj<)V^H;K?kfP}Xr|NLl{wM6jon+^+6A0^9nu_rE>uH&mcsz!Qzu&(r#wc2di=3`M} zmRaoDisG$6@0)?{lNO!d&yB{~UTY1x*r|7^SLdyc`K^O zcEcJoZ^aQxqgq_;-*Uaw~?7sCWYA~|@i-;}DIdNd=x6pi&+-tu% z>~5FlYY9Jsu`O5DlD9sTum02sbH*<+^lTIz%Q-LWj& zskgzbtLNYkIt4=bCJ{Cc{OqHbWWQIxPqwc6wB<$qFgZrEl^=T;HE25jgOodg=7g-aO=bf6+Whl#a>Gc(iJF?R9efN}vi{Y>`Yq_U7zu*dMbmIE5_3b+_WL zqF=9hOZD=)MJ*l;jWpFAZD*_B@Y$5zF*DurnE&sRZ7W0P^7QCKvv#ZG1E()1T)zix z{yL+!^zWD4y#3`$e%(F`_v@i*s)tEOr*<QOIO(0J-BG;=#7e+G)CL*i48|eU&L~vG0h>{w#maw$xszfRJ%^S)y`fUF;=g zq!a6=ykNw$j0t!9M*?NybH;5-^yk?j!Na1J919Wm{7k_}%uGy@!)!xBetw4d-Mz{k z)g86SD~lh!%zx4Db$lz@c|7khRpaC(`=8~&`Jo5!2RJ_~73-9CE9r}3M0Lv-SNbxl z;vSxm`f1>x!=VdKY3McJuYD^tQh&qz3V%^S;!)$w!{Xv0Pu{z@=&I<~*!oK!pvR0W zI_o26kF)qrxyoZ_X4=#&jgGih8WiBu#RV@@TKCvK{G@%e_CH%_vvdYi8FBTmy@mVt z0hM*C8LA@8GT5V-CU*PCX?^i9lU9aRY?v1456|2}k*ne?s#XG7wXE0osKkk={II^= zCx5=!^4OS?Db7ktOuw=(tfvzBWYuLeoYo;jw>~JDN#FgEm!Up!0qu>e-8ALdi zuc7h^Qgru@4Wa#&n8t6&JFL!vHgWc2c#+z>+-^{zGj}eG#F+a@L|k z%nL76OFe)%s#&d-=w@!5E|C3aPqV&xcfT!8;Zf>0mbF;rRk4U}eOJM6nu8THUpYN9 zhKUklXXcnmB?lMfqHGs5h6g(kCC0DCMm_F`y|j6Ow>;zcIX<+Ka7N*&!|VV7^IcL> zT$x`j9g@(}mCbgmIjhn3(+%5wq_MEqi13>xRX!dg?pmL&XsR?M#&hoX&iSbe^6x)` zbSV>dTr%B+EOrbp68`9R3w#I{iGm7_bJozMooXY=L>h;fdZY< z9NoJKjeP-1UI)s*j7m7^N6ROwhDQu`9X;_VT4%=}zw~~TymT}s(CPR~xcX1;895_i z<9BKvlPeV>VzG}+&vqkLPdsQXfEmC0q($qiFh7SKKKZNaIX6IqcKAwJVstY8(=lV8 z`nfo_J3S-F69$fz$zgpX6SHSh$C-*}Tl$xT=PFcRO8iYOG*k_k7j%D=E#vw<86vz; z%s=w+HR+*3O3=&{O29Db*s&8${lI;VotBZaBc~4BYFmUjq8pUF5(WmAM|PgJhC$;b zt7#`E2rnJk&mcE0LEbIc-_Yw66{TGgH4MX5RuK$Ppq`dEUHTCJHjP4g{vWU3c;NoUo-a!UM=g(Vl#+H2NlP_oY z$-Yl{_pAL;MyVNJYz`5maN^KYhmeTBnFc{Sqd^(C$Lp}QE9Kit=XTwj_x;-!f;eFC z?GXQZuiNf-$hYZbxBnNYUC(>=e}P)1fsL(~Dda;0_T(o%AB3X<2S%X2RCqEK$)O4X z6UtN6tj%};sQs~MI)+bG5`e>2_FG~ho^wL4@Yo?rsv`1`#X1bIVu6NB!6a?({qvOd zG+abVi!N4w6aBISte6Z5~>@71`@~4 zb&&_`|K*`Cv1n5QHsIA^aB{|c2L`bF9^kWq#mu8}`QqpD->!>H&!NiK z5uPN}yAHInADkF*p=$ve*dh3G9rJB!Z_OO$D`3m5i)`^Q5*cYj6eQr)QrUK|Rk1dX zV%@%%eMqSH4Vd?M4B+^38ZZ-h46YO-G=TZAj`@g1R}+z^Dd<@;in>ngls5I@BD>lD zOGtA@VVNZK5?1&_1Evv(3MhglM8Ql75@;J=Q3bOrRE;+a6mlAh;Q=n9xB)XVAjp)wWQz@=QqX-wP9eVR6)}H;4-&lfGt#sf z_5hEmr)nE@T|%V_txCg=vQhKgw3iL=o(C{nDp>b_yZ8U=ZCBZ7C$ix391sQzYMf#4 z9zm_KwkiK}Nk#g|f|aG1cXK5iAk z4xTcusrzrA9|*CZgDxaOJAqt2m#lMTEWXdd~_;u`RZ*1H!SJ<|fKr(%q!1kb5sH`^Q_ z9ax#$7`>~QX?&9r|1m~Jy%;u+dQ8GTz=|iXiy%wW`()AMcmP&JtXSES8-TBec!w2! zPZn+^-$m`9MVzqvGOMcX(~X+p7aCBqD9jyV{W~t_KKbUPk99~}!(;<`jEp+pk=Z?$ z>#8B{lpzZ0kOT(XpcD`_8YBb)S(DJ+`0VsM_W`vy{S#`d4MUg`JiaC>T#SKs!T~)p zhL{d?)wFdqAI~}65q`7-pgBbdWW+4-)<+`h$N|jodP875BN~7IjPLogEdOvW;sN*m z7zpLw03YeVcpXM%nWYz<_jNhU9PSW2$wiKIlxuIGAQdbiLTv<^R6l9xx`+fIq4Ft^ z&kCxK@{KIl;fd_00HA&Tf6XlXxCc~+D7cv^1enX_e?&5Qz}rMoKqoC5Dh5*NYBJ{7 z8Y-M^vzC8ThXStsg8>-ndNQ<-jhW~`$FJA5sAItwA6NWJn@q1yB%%gLK*NPP3WE1g z@@7ak&pMNd`hv=8rNl0NY$9ctpS@1td`3s96l@JyIE9TGBDR7RviTtN*SCE;;n-?W`YH4DY7!7$i%iW4 zQHx+s>tH?>V#sd$z`ixp0jzZ)-%S_YT6NN$F%VyySD$%XQR*J-U1t zGr)d%21$dU;nh?Q_W&SUSJleD=q8SdyHseQ+Q1chRzp2FUrN zR7_30zaJ3_KR$QD?>BB46~#2P22ra|jy{(A zgYw!$t+k`~2*JU_G(WPSJqq)#1wAv}hzo8-2uzK)XV7yntnxG<{jKhpsv)6XUX=X0 zF7jzkcwc!h)kNN@sc3@VDYh{2`>Vgrt1N9wQ^ znOFQMLXsw!F%YJh>+H5?(vUQHK0f1=pdf*QoFXy9slW|+#F^MPR*&JQU(oE$7})gp zd&-Xd4L~UXK>gDWxH}p3s~{(|KOn^VmO&{>s2ka*epc_DtR*e@bJJYtG0^UOr5C-;bu^>>x{65SpK6hSw zOd#m5$b~uKH8QsR#k8*0IJk2GEbw-nA_8|u>BS09jw1Wwhw5CgcRlv|aF%ZK(idfAV1*+)_AoK zM~GA*?<#rnsA`=FW;Q-!>$eE5yl|ZKWaL`TOK#rQ;imY%B2N8i&ap0uwv0b}(5)SP zfRbHF0a8B1R}=!+F1WV|Ru~nKB?(B^gWyCg6ttjlU&tCg4XzL#+!fJ#BCI|u%*A}s z{U@?coZbM5yt$4oW4(=O?>0iCfoM-r9g}l=Y#jTfd#Vec?h4RG=J1Z&8vn^|oAs28IJ%AkW*LU=B51;0C*X)S>b)m>GEDh`V<|3Zfn7m&vhCpl z*vSsT1`rC!AhiYseKk?_b3$8#xjpz7BLC2%bJbI$1Fmss=BE#uLo05s*yGzG7jr&H z2%!a^3i0NIfbO+14tUO-MJ7W3YyTt(5)cM*!1+!P>M5`Y!hu+lAfjTZ*3rrm5b_d!1-g-zW|god zZ~NET+97ZCF&7cYMop3CM!7&2{+PJ(Acj1M<;(ntH2DF`M^}R+O7gq*{e~rW2xj6p z(hXs4qU|w6^xif!!;%1)3 za;Sk}eKDa8Db;DKZ*859X!+kU2^IrJ=_(HLJ0|jfEDc%^$cyA`iUZMX~ zL->Vx+>QSEH^~Fv|kr39w>sXUpE=N1EqqWKVW&)aXPO>?~cO zoY_go1szhaKI~QdH$O-FGLeOlP!&vky~)otgWq4g;YQRAW1G#6Y6+>KL%tZf7%w$@x>*=H zJkkRYTl7KIRA{cw8GTlcrD|tYo}o?YaqUQWYe;Tl-f%9l1&|NCIdIoYR>q^+}}fsw{p0Z zd}w#7oW46sYte3;0t=}zd7VtWuvmCea8H(PzJ-P^9kbOz=`QelxN-by%tLiVdH5Pt z5g{AZFTk+7KW%q4rnYpMFHV?R$#kJr-99cwn4m4BE2Jf-!X*|t@DJQRnOpLt7h!UG zv8>y$0_)~U=sXmpYyIL%)v)E(N-!j zaR<<<6n{5nXcf3D(wO1l7- zLPV+^4nb2Q_VZ7V-0sVGnxA`GzDI7-_l<=!g^e>vQ=IOHPqbK~O{$iGHxvRwc->rV zZl$oPi50eS6z1a`sss0PR;$6{_!tHFS*uiqmibmam@VN0aoo6hCFfx&O)k2Yq4K+f zd2cfq+wmAIaiqIU$vimTKEg^IYiw`!TGy!aoOS#b7wLQO@cy7FE10clc6`;%JwHh4 zMr~u5C|sBrdHu!d-Gp*&8%yENera3gPeG%ZrGN%J9Iz#Cyx%^KI*o%HH-}t0>N3 zbU5FyOKKC8u5J_T_rhvgs&O4=#1lftZ`34JN|!%3{~RoGE<$3T?1Oa6 z^^F|YfO(h>sZ%J5$dZ3iEjxjGuD8Xe6)ucMj&Ap=?XqQQQzQ{(v63^~3YN-wl}KIR zPLvflJqjHvGeIVs(#3?ej{KCPez!C3(#<`Y4Uy|6FpkWE5GK@cjDNC~fAO3hq36CJ zjm8oyqS&{+HZ*Flb&b3HgLz-OR;+|)qXHU(ISPc}-md$aYcJ;}dWAYq^7{BjfOSkX zh%PAH43ePe=6{K}ClRrOkki=6@cwHpILpx4qf$ zMqXg{O;z;@U$ewNc{9E7ax?4jOr$}MNWw|0K3}+Qm_mVZ`{#XEgkl%*0!&5C3X!Ml zy%rIc=AO7kaerO9fR1qYuT#SoSGn!Pube zk0E@5f{j(VB8#W=G*}O*pSDpI%mND;nK)FIHvrA$9IAxKP}#!L9g)5G6;itCnJh6+ zK4eNz^h~2?Saws+!oHk3PNwj}hn&t8+XJG+e)-?zQ(VQ$cT9V;QH6k0Rgk@ViV1Yn zd0^msbD!3Xu)$(ZPU3b~o1saUL}dW;;6yOelR^`3Y+&*O|Lz2hvEjPxnnk^M_4GzS zGH&R9c-O-`YZm0iew>=18Dj1zCgN#W5G>@dX^B!xzbTKY{I2(o(8^%nhnP8=`@jj{g_)7}u-rO#UDPMcDfUck@eRk-9 zR7#ps1QvF+TiQS)-u|7ygLsaT%y5UjjY2=hDVe_tGoR{`g&UYz}B3OyU(!Yx)g0UAYf$N}5L*CR<8Hu(L8{ZL%cyA-wOkC1f|X%aLqOgVDLrgc|WWiLLLu0u%2TbxsfBuxUeY>Clf8Apnir=9-w&j|| zGfn__>=bOzIAFt;Xi;fyEqA5Iv`+CD@!QR>xg6T);~>bxP<h1@`mL z*~f!;u9(wS=pGU68dj`*(K-AfKu@(sgs>QXM8=O)W+LujU%7dY9AMOW}S~A`6H|$GGB5zB%uA_mo5dO%5ErmlqQ`sIo7%ok- zzqUAdUA4$GO_Bq4--h^+pVij3Xckz+`6_RZ_x=)ZTztz6GOy461Lsb|PsB4w8~}+F zSlK~zT}X>NpLRUE4_4Lpjv{}=l(h{)nY5(c!$DsNB9h3Wo}V6*e{@7NW!%iFSbD{{ zR7>;T0lx~iJQz_e4`9gObHi8IPgu++$?!GY`Gf#l#vVv84xh-#Yo02V)>TNWF9qPFBo1GiHuy(@waEyppoQ%U**&UCsK!MrKmVd)bd7 zfEaLvs!nD0a+#j=x|j3ix&zDF~Ir+oe~M=Z^C z1^wdHjLx`0IwYg)4{I{XwK8?_=|;+*l^Nievu_I9Fq ztS_^EQOoTQJd)+M^I0cH!22rCt!%~dMm8Rq#>;CZ$O_2tFS1!}yc#k+lHkv9XBT^YX=Yql997E+e;{g|8HP;T4}zLT zT$@1(&u;@gFIW!`(IL`Ii8KMafD#eBNln|koep7x&K(3PKJX!DJh$Tk<#cUAQ>Ep( zS89?jrYYDg>+HxC=DxQHTSPZoCdW3=rkdD0>zG&4~xysQiVU6%zY9-Cx(0 z*3XC^9cRW1YNB6o%vq%35jyLthHU1?opcJh@PZBdqkmQHVI}c>3z_Bvc&GUbGk^>~{p6^fh_5}lCa1c9J zVLMfRJ6*m58Vk6OzDzArsy_)E3P^J}hUKW430Jt0lSV{BjDWn^cZNm|I3o$hgHkcT zbGR1jOGBih9c#EaM`VsV7T5vQ(T>j4W1E<|?JHd7sCpZcR zxt_HOY!+53*pd~%m{S(e(@c|lXqi4c4^Li~&(Hs`gq|#A`Ii$W8A40$vq2X+;5EKH zwju>$I#=OHp+MP&xu%~jch)p~d3O`p%tI?_@`SX9bE$5etjKt#Ha8X3!}KMk z&Lo%kt}}dB+Up|d4tStsr<0IS{cVI!3%J*irG`xt2mEA{R16zLCoZIL15^&NM0`)E zKJ+o-D4TGgq5Vv&wswuL{rhu0ri&liSg{+!s#AEdP#%nE3!@NVYdnVjQ+v}hIsB*& zXsE+66FVd%^`pON>7kL(S2oin@gt)bt&4D8EQ0$fTv!~DsXfQ+yF(5v&rvsWN9>g# zcQbq95yp{-F{P=U>Y%IJVIErEG6NqbL*ZY6VL6BNvwCy90Y6HQcPnctVcRy~?vnjh zOL-uS0C2am^fld_MqOAGiy7t1)C789AcF?c<$0+(tTdfbMjEiOFj_)c7s0Cw9V}uGW-wJ>&g<0h$#a2R`!1-;>CuXG{Y9K!!{$b!+LOk*?#8x zseh5Xhtqkhd+7ae(;lcP1(GBHU7*6`99daEee-NtZV{eGazpv=&=uf~jQ6tX=1b#; zcE>-#yb2GkuE}STjzlUs!c9q`&;KB@X<5i6xcc>l@6iK1z4X&_z>G$ELm)9Ir6cHB6&gQ>S^vp0q#!dS<qlS$uQve&AuCcY<6!f!{G5h=?OiReTW#oZwf;Y|ZU-bs{WwGy*F-`(|(j z`@6)KG+PmPZ1T)~a#M984i*X2<*b%tKQvdLT)+hFPY)uB1^Pnrw8m)IBsyy`pD!q` z!n7piD@G!|&gv9#HxC@gv$%NYm(5{Z_=Twtmh`cH{~tU%Z0e0Nqt|BC0s zi9fxrNMYpJ1Rs)K0nLmkV=f2v;xQ9=oMb{pd)E6yDs5Rizi}E)=o!XvZdC_1%VYi5 zYVHcI>Q}Zm-4dV~PqMOl;gJ+L`v_v<0W>l`eaUDnj>AY$y5uI4neE07a>%wiJ!ijX zM@x_8V^0<@XS#zj`6nW(Mst(6XZ(LJa_Cm98_nrq9Qd^qhjL?BB+one?$`T0lQB5D zA_pe1x~IJOSwK&3PIqP8Hq*V~bj`0D>a0}UidJipdCQ#QPd+n%p7MFQe73bV9vI4< zqFeY%sz&5IK(GY)y9rnmb~hspiUDAZb%h?5n3_jj#D-#Dw%9Z}tci(nFbZ!=8t_1P zHz$6bx$X3y$DU!%+UvWXZ{qemis3f~j~{WlqU{>kfz|p^J@ibgy4I!pb%VcQ6#rV> z$0I+Fwj=;3N9uA;+4wDfP5#|29skBM%=59X*Kr}Hk(tBQO)S^Yo&^5j&i5l}2mAKHq;A~cX_hzXxaneTIcxq)+#Q0U%yp6=siu_;7vskX zA9DU9Jd(46umHd>&0*}=$ZKWvT!-9_L3!h&I0m*|%iz!8>X8T)0sZ$EGb<31j$C^M|1EhrhJ&=#7?B^VVw)n=%m&& zs>-F@qHaI1%l2&ir~lHVWW_y$P}2e)(iw3$b;RIhOs0^=O&nuDIoRrlv-K;7CYQ7H zeB)dlhuHjH3!eZ{h{yy--_>O9lKtMYtWMr_?{8n4vo#AR5{hsk4VHZt&H?eAP_zr_ z3+#`dK_@~Y5Z`Ix_4gE023dANyKT8-!%c=#w9zLU7_$M#S3A%~%x*rYQaE99uEjI` z{Aio4wEAAd39&{jQ^stSM{^}MLm?Q0v7+zV?WzMFDB=)Sf0+2JWh+akb#&pd=HqQs zAah-IFu<{W#du34{FR?*!yzg`@@nRM?=tDUp$(^`&m5AYKjHh`ihjYM@i1|OqCaAG z@^(#Ww?1aG^uS45FV8?CMxyNIdXbT{;a1gFp7}>~qYc*n;(fb4^%9ObUeig|Lyd_S zb$c_OMT}#NCMn%7Ox`AcbTxZ_@Xd>$?{Nve5}r1q-6GFoZoWC!4$iusoRMMtk@DVL zAEml?QlQV`B-#cp1JU!sQ~$mYb{F8sC-hm!SN}P%4t#6++CB?!!rNiboKS}Ii+_$h zGYS7u5yxzH^2nUJCt$1n$>i@p(lUwRV*DZA^Kj?U7y-C^{EDcP;h4z}wEk89f?jDp z^b|PT{NnFtu5YwAtNybI#hx#HWr5|?$H#=qc5Wl|ug_=4%J;c;mpPoHN*Nrp?2-2n zvePQ)yziG3B0)xWNge~GGI4mQAH`bz`2FmYRo#1kMD!m$^ds-8d6nUJ&U4I0DzL)! zQFSGmgP7FKKG{r@0|Ze`Vm?FhFYPpSu17PZ3KL_ZB4@RO@ObMDKb{n!@nW?vf$b~Y z9e-24lP$SaIR}*;#lwT9yo`=}34h;QlbiK*3uzh45pt#BI6P|!Tywo1VMSqEfW0pN zJr6OQiq6SaR@KkI!|r}kiiv+OJBVc{5v-(!9iE%{cbCRFyQ#h1PXF0KeZo(@IKMA? z3asCbsD2!Y%{@4vBtJenZIQTi2sMJ z_xwxh{{#M!t$^a*g&Q~Sk>wuc2uG!5hI`~dQ`53!gEJf%mKJKcQp2(wS>a60OiRm3 zOS7S6Wn0US{Lc4x-MH>te*iZ)@Hmh2ex29z`FyZ#dtjeq=|uEPoUKA%tqIf(=Iz)gZr@& zL@|UdUlRhjjdA9nr(1PB*u^hkG?tR*wva*;44Ma773Dfa-kHF`&9+L=2hd=wj8#?X z-IAh!&4=|Qfg2%H3{8T%MrjwgoA1}<{mF?;E3^109YLWf^)4vBn}k~pTkzhTKcgBg z!M3@+n7@<5C=u1HMtu{G?Ra*l1_2T4cZ?xm6-9Ne$WpLE7wDP#k6^9CNnF~(8q35f zT;|Da)sDv-HA_ylsH3=5C#B~-NiB4f&v85(nls7Rk*hSiz{vATo87faBNHpfEN#W| z4x%Boqa@Y@@mFE2t;TJ+R43yx|04d=&*AblN$zR?FzIjmL9!PMlK#}{gsIney;@L= zYg1F-;jE)Nxh~y&mn4Rm-O#Ash6D}&I|GVk7b7UXL`32reSE{R#JRvdSN zTW||Px3-P+$%eJ%c3itJnY-?8vLQ`~coS*o!qmJzQkG1LIOSGpqjitSCN~a5>m0am z6xGc-_K=MU_pfN~CMLUF=L2q7I3Pr<+j{FJYgnIX|N_MRIq%Cmfg&w7MPL#cw%F8L(4>s!T_RXO#uWZIhsGE5IW_;R?5U~@f!=Ufq~a%0_9Jy-t|WN_?e+U5)KP)ej|GMMidQqNciDikiX>DD@z;^ zaZuXf@hUvyRmBO4`~Jr_E+mgfgdD=V{(FTjH6K~T$bm9mx4&>o2NB8#te5*_qMQjn z;t%I`Pmh+_@F7M}lA93)PNKkDKoW9ZnHtsA;flzB1y|&@S5^#U0#tQcrdO!yNp{of zJ0vm-P~Q|&8xh0J;cy-N4?7up8gs8#5QN(S8yDDo!0lm}kMq>EI|lyKK_=ysG~ zTl4_^lvwZKQa4hKF?G<1H$9p#If z!e>q+ec90i3K?|$g)@C}L&m-|yY&$J4p*zyzW@B8dcP(dZHqMVG{E`<0C2rIkMo|F zZrwqtPR=)0;@mDDM0yPXfmUfJDwpx7`=^BcAQ-f8*r`(Gbv1vX1V|ZxT7J#H62GV# zF!=Sb%$=LXo;jtSa(hh=x@on~%BQ>7U~24g`el@6 zzx2ZEyo}N_7$}`QsLXW^ZVC==UFu?-{S@k^_NCV0?!J6w(JS0K$82-pB?)Y^8m=Eo z?UPf`itfX4e7!s9Mv2aLUl!abgT_r#yRqZ98Ii6yDXp#7?wo^YFS^?1fZUo;g)jh7 z8R55UzRq}q%vZ@Qda>{@vL~)4+@G-(&KR_NYdk}ZYALbd(eY|x@7%QH&Wa#(tD+Ea?DQhSkJA!KIlaiy#8d24F%<4GCq=Vb;TW`c;u0(j~ zDE4b?8M`hyuL-KBmV=6_=%K zW<=NY>BHH=Hi(GdRbKv~RKpe>`woa%C^ds$FK$bfC6>qnylKgWNT0#T7EKVYI>F;Z z&@MLz@RDjdRVQ>#9F(|TL5Xf1&nHyDenQ1o(j<>(^<5XDs#D`+v z54N}b6hZw2ZP%P3U0jeWJ4&mM_wK$V1R>NW#gP_+7ZY9AYn5@8-5I zObu&TfjgDCeE7u0Q2h~ApFT_Axua&l{H>E>qAIX2E#V_($#AtUgqRsoHONcpM$T&H z%Wks`?5*V=R;7z8dbwM!TA_Wp??nts#NNJ~IR!6QZ+@v|d2o(FJ$=hbVNDpyA0Wxp zlhkz6!SU=9GTe=As!`5bZuNFV`WycVnMonaXrG+JfQ`>Xf*e)sGt9_HOl>{vpK=`! zp77K7jl!yPPDzDMqVskySI1ji;eC}3R9C{7IY4yN`+!KQ!UoMq!`&&yzk|#GJ^a6CW0ZL=JIGqCetinSJ7+#fVj3jjMm;dT z4A8vVE&jl5Lpn4TuQPHdY*qC-N(ujVkT77G(=R{b?BeTA2sJdmGNLf;Y_mFJ?iQw} z&-v>P?Vd>VvQ=wQ1|5Y_An8JMu-U?j|qNdixaC`|TM3qV58;ez7ee=NgK5 zsF=mieRiv&erZcuATZVF1^vmGS-jZRy=km<3)MH-@2KQz$?Nymo>nlXhj=8&+x4B@ zg!p0I2^T-Vy7|VDyu8PnVO(pnU-!#kNu8%;KfiUrK+ahK+K0l*1yK7ekpVUy&N^NL z7Nh}fXkXqczgzR##(qX<2;;@wD{jhk3x2=9oa$bOfzP`JW%w@MEv`mBp&1uVo8+sr zs=qoIm zxihpKK|lc9`KudMpS4=-L)4#kQnVH8QvP2LJ*e%vq4#%{D5~0YzxkS;Uhf0B<$mi; zZ@u7fWusR_wLbGfSF2vt?@GtCZ+UX23ci-7sGs?P(R$=N0k#ZsHFx`@>gKEm0*G(0(Q2QJ6V?AFs&^;dLEBYs z+W9Nx#u~@MX=&|?KG>c(VCvwZ8qF+sL&%tZS0xsgjV8Kn{_v!eZ+8QZ6uO1mQDsXP z{gHm7O&8;GnhpmoX{gyeN*Px}OG>FmiKK<2KKEHXMC#^nyy!2Yh#d~>rd(j0%Q->YN z)g*_uZQ9v$EM5No&u_Cz8vEq*Wd zNUHtp8yf~YELm9kK!S;M>$m~)ahKQw(7?{(xJ$_c-4k#B16w<>_sFtBehxA%T|~dW z9%BAZviYq3p+R$m>fdr^8=9tRNz+(6ud&(Z7-#7|eJe@tPuzgBx+Tq2>3-_qcB@qv z{m^`~MptW!^Fw>Jw3C1COuSQl`^XUhfqv?DRlNUVZ0*)1dAw6H;z4KgU1#-W>NY%B zk3`c62J4btbbdNtx_($0MBDarL5prtFlBSS-Fp9C|6B6 zdY``BJaaXnx%}OB$}^q8Q;*r5Sxy#nvCq~+>syNzvs%pN!tXuqQd{T@y=EH<%i?O8 zM$oMaWRYbyc7v(5?0+@~Vi{b2yL0#7x#F|Wz07b7)Kg=1S1KZVtGf@=yU%xKQw7}@ zj-;NyZ&$4~lJ{)G^PX1KlHH|08-ZWDFD-3UA$qhS_BBs7_}fo#4DIV|{%WiC)Oq$? zMj2e*YkyX+=gKp?`dIsh)w)y|%)_v~Bl_q05+ zZ++I&`r5wjLr>dR`)hxCu8Hggh<|6kPz8$rE=%<}k}kKKp1gEPg{^zk2?WyEfWlxP z5y%Jx3I#Cl!T$&5T{FPO{$I>nvQQ>EfB64k-q*E<_3^&Vp`+KYcyrW5zEqbVSN?y@ z+h{rUe-Ygy9;s=Um$}=Wi+%EB{)c({1-ex4x!LTj=T6(49iw|<^;9bls(ar`9K09< zM0E4^9evg&;%*sUhwd4_nf2r2{DaPvzFUr2xSu_4Ew2YSvW8)yX=5^-5+w?$5xn+y z4|o=)VKTwwf$ixvaodu9=X*AZY%|@wd#pnAoEx_7dx4* zH(ae+#Fpel@8$T)2mYoheutyWwYd6w;4f8A{QA26(gTsh$lsq?Yg+!zM?Rhy^)GaM z1K+TT`hNK5x?cX7iQE%gWji-Oq2;()F{W&gZ>~ zZkO{U65lVKk#sh5bN*)R&xr0xwS?sM>1;vMkU&fW(*3ZPe~XAemYz-SBCJ-n@b!TjB#AG=%)>71^e_U}Q-r@9@658bX!Ur)BX)aC2$FIz;ibC#=eSY4Uk?`Y&4 zyO%b0A>{hD))+z&W+qnS3UmXdd&MF<$n(kkLQK>eP5xPkJ*j#z)Z%k>#?y0y-A4*b z6_0^$Me|XC1tt+v$9KoCw*Z6I0 zIuS3~<+qPtGLG>0HT6tdyYue9o9_`sp_)k2wY+;Wn)dDj@|G_B_RHBgr2Py3+d&m2 zkHlG-ePX_HmR)^!eaJYCL7)@F*!IL$Jor-C!3t4_C4o9JOl$oXh%qi3IMW;FuT!Pi zauI5#AZfNM_WGNoQkfy0^U-M<&g%C{W6p=gm+U<>_B!crhok8mL?M*QpH%VmeCCDP zA8YW&Pk5`1U7f1!ofb)dPd9!rCGg0!ui1b4yZmg^lCVhtbIz75&=v*3#d#znMv8l<$r)lDFtFR1?dy zU&+U$J-_e&d(@=7nTTFR7pYdaybTJNDh;5-0ES{xmY<1g$w%EW^uR zuD-@#3UTGTGEG*qc}l*?L_fy+ZTG_4^Xt7|eDeci6mD^+SV1kRVng{RinMahw~mG* zF0!K|;@*&hpNQkP3;nw-VJ9Mzt9*S&_MN1w4_ICHBo2K%1LusIv<*>)N<%{xdK4++sO8F5Wbkw}QFTk7DfmBjC!1 zDq^itb@b^tIVo|=?R&%E5M3Z;rLXMs?!MZF?q4pX#ao z;5&ntg_n8^C4LSTC09iqVypQP9ILCqQ_9kFnfvtcsR6%epUxhi%Imz(9MdU~5gcst zGp`%+HM&7QSSWNS>`QNB%RHWBw4jWR-W-*?2y>+_b4->kl206yxr$D0xTDj4dH<`S zs^JXh{Td~u{rMx_5M}=&Jd8k$3nCqROCC`@AFt+UCx*&;oNq~WgD%rN|p==5g}FJG5ll|CI-s#~0WhKsjnJiDVNx;{u zCUJ|(8E~IBXXKo1(pM8}V~roN4UVB8ZDE)CLt?Ak77tVggtebJeD=zTlg|}O)QUa@ z=sm6YXTG4z_28Pr&TZ2cfVO{dMVL=sAtCIt;5YqlksY*D-c|@Lt#2PNC)S%-owCeN zHCqXkJDvAjL&mYNh^JmsVW6fVCJIY5ihTIeo*AkY(%8Y?`A6zX%gHx^Q=jEY4vGc9 zuA^-x_9-jim|ZU1qvY&J>(t4V3bmn_9~zmdokc$y3YCM@p6p*sXd`s2_pskHl0w&8 zWD@tf$7~;to&H+>`P_kdZHLdux?pWC#p3NJo^mYsLXu)9r1ibqd!2w{?r3KNY)HPH z`&@%|{IQ&ZCJ|fzTGj#2=CiO^gNMiN z8i6kL&uOLorA=?M2(fix&&a{7;Lh#W*r!O*+oiSkz5V^4g?$+>;<@ebtnYm(c|)dt z=__=^+}JgOYvScL23lifDtEVNsZ`#p6zBLgaCzAmZX%<0Beyf75c?c+9lFeZt>c@5 zeDz35ZMN~q6Pmm9FF7*kz;XSjnHvac2N3M{@!Y6yr|}3XRWs?Kbh>;WWWnbo-r}cx?WsquS%h>MeNWBr7eRv?8ss=5g z+sB;-e*OqJ!lOFzu@_2kH|4g`p;GjhJ2)|+McdNFM@c6&}z4sw)&*O|6v z+fi|hh%6<)ES7Z%#2L3Y!Am`Uopn1Mnu=$4`m;0RWLyQ=*;&e2vuw$7Dy2~|*Oud8 z4m(2PwE1)LdD8Bp1!hsOLLr_t$U|Q!3DRn3c7d&JzWL8vjun83xrCA{kDaJ!G*(Fg(JMey-N9G zvxO7ug$bpsab!_~ZPAT&``fWtfNr10%kVkUz#az)TinJe>V?LD+cEQa>9=u3vWvwf zoFX{fM8w8E5)?~I7w35uD~g>d!k@VgiC>>R1nWC+c39aEb%4TzXO~n^i^ORzr=M{pyRm&BgG3=*wGz98#4B#Vlmo2ZA5)&?SmXud z>xgzs0~Gf?0yY{~3=J>Y)~)F(uJ$S2;DA8FE=1<_np8AM!EvQLns8P=x`j}8*AAc@xzQ9DCWKo_`69DQ>EEdo_(i;JJ@ z#?Irg&jF;Iiw1I|Kr)a=kf-Ii>MSBbb*b>UHK{Q2 zd07qR5D*dowyO9D(IiqJM9+{a0BU%Ui+-?%z74d838+CK`rn4s6&zMsu?!au=^McS zW`GGSW)y(;*U&%_bqZkZDVXO1%pyVZ_9P-?LE;ezolk}x0jjx`+u!nvo{}+72{JiE z!xVEEi6C)rfky$siukA}0D2E#-Z)HbHyRUy0W<_pND{*Wpq)$fHY9bN+}$!>t7d* zlNekBLbG)MHxGdC06H%s)&nSWj3@+i9VY?crhP>5(=@B^(0nz!vnt`0ckuP|6wCk) zbB}^~zs6r8Hv^12kk5XHZDLKrxf}KE5cxVI~$(bwoHoz`y2V<^<^9 z5(w22>JUAXJV3ovSrBLp>4n5J~5k)?pzaH~{a; zIBWGY#5;uO^LE%OVZreCW2LtX>lv^Pp# zfafg0!5Ra}TtqSjZlN+@TLrh>l*-}4Elym7l)x=o;`6!?O*`&o5TV@y!7Kr@f5++C zZa9!weN4o(;Z#(WixatOf4{*Hd$3qLtVH7VzPHd^^3bRdeo_G<0`))H(e;Xm$Q5E1 zoiIl!;EZO2NCNy5D&KfF_SEWqm#=0m~&<0GvJ$P|6ZVEH3lrRiqCH-~#d)z>PkG!$zNP{8#nps963Xg7`E6v$%$` z8SXSCKhOnkOf8c0I0OrO?lBi;wpOMv-|wT@uw)CL^1+U;jyxJ4cfUPe;PCLtrqs}; zH17psfL!rp5^2Xr`L$q9iU<>R0~0`KW)$ef3(+j#`mIR;5&5$0&OHa<+6K})vr9L` zKp-v_g2PTzMk{-3Dyv4H3(BU~q~!m}G%jG!((QgBPKOsz1wBCaUB%T5isw&G3m&Wp zF^PPna1#x0NM#eil9>DbR8!B2$c%+N_vEyDg&GY_i!l4)QokOvY+}(KXs75I#lUIO^iOK zFrfmFz07biA_7K45jS3xo&(K^hl{&B1OU)8cnq?pxAzLp3| zNIQAY-Y*rgm-sIq16K-%0P59uj9(xR0tR~4rdSBTX-5kXx#WI8DWeyPIE+I83XfVq_Clx@kxP3B!zW}7 zuJVg_aSxt1 z@PIt*d6m~|J3pkE6c@C#YM7l3IIRb8(=X@5{no@&H>BnW;?eSC9nlqJk2+?Ocr9f? zVw`yWRZrD}4Qx9IBb;9pr!MZa!`>C3?h0Rs1T#qgnZD|x_;!$|5PsJISw<4)62+?~ z#ZwObk8!Uhf2tyXx>YQm;dH}v@=a2!@TBlX_ri)+i6LW4%GVG3QR(tY{yEjW_COF~ zkce6L83kTinM}MpQY`t7DEXWMPXCPQn=~X5uBaVlpX~n7qk(#~fPPGr@Dqv)d;eEo zKj5^tWgQmC9X`<3Q^ZHiCrp?5t&~d2R9{>aPOga-P$B`UcGt(tuU_PxQ1Yi>Tuk0h zQ!wj=nE5r#3q$M+?B!S3FUwrX2V`j2t2f}(`8n?FAn+OCniMzW$8M9ODc#sQ5d5D+ zJ|jkan(}A{he_nxTNHx_x8(NvR(%0UiLg?^B1T`{agbLGsm$r|9ONqBZel zCv*{^=9_or@dwkAi!YGh^!*xFV5wDyCg*&4uxP<`w?}Bwn&s?ah#?7b#2zE22Bt6Y zg*dyzpKfO6{W^T@oLt`P?CM{V-+$SKpSvg;ooHV}{}(UeJ&BwZl)c7DAPU=CkOqC+ zN00bbJD<0HYxy%O{3GGNCyo;s^5Dzs{&N*ms_lkA6nv$0Xworprg7hcv#_yJ4a@z#jZ{s~o8)PMF{&o+ zP#1|<-17YU70C;^(VG=3sIG`}CP##FTcyF@BKSq>jx4q3S|mcLaZ0GG*|B#fhlE`x zet)gJs$bL`|L+-QUxj(Fxa?Nxm5OI^lcI~#-kE04lkO>9T#(81`Bmzn~q>{I`;WL6(OFfaIFP)E#; zshn!#K`%6PmAuP;8b<#t5!Bqo64l05h3MK8sQss>92+Vosii2=FILUJyulvGtgyZ`>;h4+TZf>DbGqP`}JmroPZ<&n}$U={mzwMWUg|0YFZ2!kq`Y?Uyi2Kbj+ zuy7~%BMJoCRH+lzDplaiigv=OmrL`Bc|&IphL#MgmpX&LDyb(NUbv^U@-j;G^oRz>KF(~Y)Z;G& zQp`2AujObq7uJS1)hStshAQ3-+8=H0e7{UkLfnp!_p1rP$dun$tgpBtrViT&8P%}*ZmY;Hu}yX|WpA<<|3(y6-V{BkzpdEo%ft{fEjA6q3R zr|hg&HTjKT>h^K17$s6pO|*QF+ag2OTTbk9vANQg2J8Do0SFu;I`&mF4g=rob5NY z`0+QW{KnzJTZ;ww^1h=<|2!2y?S+YIg_?!U7ry=^!|E>Uo9b_ZRdHr{yMtY%4&nAZ z0h+WnKK=5%b%a4wO~KTPxF&26-msNX`Ne<2ZbDEMr@*{#ucEbIxXv_9Fe?b=*UD)_ z2XTLi2+zC(<!&Ify)wzrLR4+lbF$_ zD_Uooq^BAl;DH@@Ycn`d4PxiCb9~zhceNCK#g*V!(HNf? ziS-uoTp-mk^vHK0l_wj+j+5<@>HIpixwU)~TmL*qw2SoN)w3G62I2S6O?yx4fLl01rIYK{$V*gXhpu{?*VrH8tXPARHxSwqVGTT9mkrpF1ffD z*awzVB+`SAX35d{=LupaS3cNd26cV+FNm-`hx5T8~_Gzh{B!er50~ooJAmi~#mi1kKzhgW)&F1J) zo33xOd*kAzC|P=|Ki8xx?83!229at1xp zSNj$3t-F8NEH8?N4#-!mdurMG=MPqbD6Tp0 z8&9^^|ME#Ag|qIPLxthX5egk^?$q3NRET6i##KJtd&^8_+5M~Lm&wbwxuU|iS>uuN zyxCz>w-OyE&>Kuk$be#_HPfVbQ5M-AY96|1=!LFD>*x2Y{3Yb=3tCc5+48hEiOSP| zyRh9O&4tk4U6kN3pj5$4R{rROj(5`92Uia%k@pPz3a1R?h8CtE~~yo&bQwG zt|B>jQdvlW>B0Tk_N$9h`%cqkw^A8;9e>3|SuSU*CZX@29@Lt6hinTEuDnaUY))Q> zXd5lsyHYeBoFXtyFMq=vyIiMgb8O7ID!hE6HwIN**^kH0yxQxprPAXxVw5J`H04&4 ze~5fRgF`KjJyxe$Nr&Oa$gn$3E^>Jnm=>4Y3s9YV-9@%-snukLNT_~7r7`Y1QnT8XyQ`18 z?(~cvYyULzr55QhS5jKOnzQZhTD0O@0m2mxmc79nGNIMX1Ug6Fc-Peddl24bio`?l z0;;U;BC#-IN%h70kah(YqEfz7;@lMv6Ww-}NsB#-w7^i?&?`za5d_ ztXXq_vlo|{O{W>+X^M^8Faj~cI@6(qB@*85h#q6jmJYnUaW_fen#_fhoM1ixbVa^k z7+iATsFmCX;vnjdYsXFC@TFxhYuot0#G>OQ#EAg)c49{Hj9*x=(E3xy5 z>=zdp;1D*nsavDKBEySq>xy`(a&4L;u{CvdZxhGEjD_fEw^yR!x*80*5U&MSiYnMi z30|98Ju`5{gUG1;$KJgFt)j7ZqiV_PtQ==VNh3!G1qZk>fb=Glhh;3CwH|O_&+X2R zaI(4VEFl{}FmD)OMT)&hj-{bu2Rn6^ZbELA=TkLo!G1#ExbM8u3)qzslfW1M_j-d)F<8KIrs5R)ZP<&iWJfp2qcz}wKlu6!Voyr&_we&ATaz4P#h zV3=zooxn#LlWOGJWnA3`-PCp}eJ>baXHQV*rdjlz#Pe2-bPocouBtVD&dsl-^@Tx0 z=D)Ti4$#g7uNn*)s=B?|y9H7udyS7P* zhIBE?n6n4KQXuJl$cz?sXsJHUv=+Y%8-pgOO!_u<)7ZX=A2?xJ9G_B1E2+@Nri zy|)&b5qDk9h`no_))ChVkT zwAI{1$j*Y(iR?Xre%`8V+fYHnI9!;vXAm`cO~^`KgC_*TkARrFomx+((JIgnWZ$XW zSiPA`f_SftmO<}PfNKa!n}Z&*zXEF^hVEOZwM*g=)-qC$2SUp!fWjaY!Ih0bK-**j-x z=SHadAc!@A5z@$nTxABY!Mt%=8uQ@LW`>s<^l+Nb&PHe?p?AQwJh2hFi3V?5pyHHD zZ@nB$PVyVK@0GYYDItGAXkdJe*CF$bmBPm*P*~v{m}3ygha_-glq#SX0cynnAHJ7uIJfm6CujUl>q!To ziV`#@TQZDi>Fyt)5lDD22X-U_){+CUUNZ_rva@${6Nyl(JDktyf%#s3Mf|`5JYqfq z9;n1H{4uug2_y(xmD}jzPh_7;fB2~e=}{fDyOopHk35e@945aA5HO;=Jro9&U<^;Go+v?QN;ql4BaVLH5XpIO@p*v$?2YIaI)>gI8SKrResT()6ootkt zXC1_$w@Fu;;0A&@rH?-j6;*eHSsrlHXBrcs2i7J139ti1^?P>&0a#-n%JnSr#kg^n z2lnBrgKr_>q3x5`1;`gI>f)(yBoP0gM@l;PfmqJXu#m|Bi=@JZj-U^{oP^-wwJ7+( z5pC<6$m=(etZ}=q4(2D3@Sw@^Z~ZKC);ZO3R}V#qjj;6I_p7J$)|u*rBQ-CuVQ`}tli+{Qf6Vt4wNJrfiA#)-qi7F zgqlb&cJTl|0K}9;Q^kqFZ6if8IBM=1#VR?{I&5j~Y~*QJWdWX!7q;ybK!Fi&!mOL8 zZ`hllSI)_^drr=NJ0E`F7xO5p@@ZqC4i=W=RsN-DKw^mVcnt5O1dY!^a%?M?k&`YE zQ`|{KOD5y5`?<=aPd0yr6fVEEJM6d3n4PkmAA)Dbuj!LOmlH?}xr7n;tAmSj6DjM0 zp!O#TxV}Py`CgfQ9s98VF^BWx*xA|6J@ZOxRctbkVHC#zsL$hvp4ptVNWpzb#v=kL zkce4Nk7z@Wj1TUui+(RzyKTXE{G5H9k){~nM?k!>WG7({`Jv(N3k-_{=r4VHr{K#r zNctm92mwb2^5-J01+hpo+Cy2I0*R7~2NhpjI+KaMyHJ3<0E^%iu!odVmSa7mSDa&l z-&cDSPATn}W2LMy^$9}LWr!0OVm!b|J2lqm<}mnjHaVE(%rVYmjx8pEc@uUltm}Io%WnyJ^^{*Rej;~`?{F0O~^2ggI<_2KCbm4Rr13rOIR?Tef&%8SaR&C zO}Op`J$(=gWv~veQ*RI%g)5eUBVzfBtbItTmf+RIEbZ*S0#Z2L!j^7Bp<590_2a-6 z!1o7`c|acmb)w|PQ5NAVOYb=#3FG)irCZHPS;;{a$?@k*-lo;^*|jSv>#!#O+OB3m z^$D%1=j<_gRwF45r30&3fA>rba*nqxs^}lC-&N^Og+n@dUHT6d$xDj)9IuE;cpnRx zt0MfCX}j0n4{e2~o?cD!Y8;J;ttd|Rv>G`z&DsgLWwKbi$+U|mH2FsAUVN`aHB_z< zmb3;BA&Bi%vI?(3-X#>8Bl%W1h&cgbj?Z_QPSRLs%VtpkB{CZK0|gw?EIhwhc5-ZH z!9iWhsM6}Q7*~BpDxDlC^wGuy7RS-ek7Q+2T9v4a22Y2ZLs`0pj8qDV(e>T zX_}^BzsUx1QqoH-5=WlL8Tnp(n8Mf$Iq-_}qMviu{K=bYSP-A}VX_JQH~-plSnX5f z{(H!>MTwyEjI=mJ;_A<0!pnT@b*T?X(lt(}4m`zc<}4xAZx$?nE__(wgNnij0LDHb zFg{+9?6iM+TG+C)i(~xQ~;ViLR9w5jr_WGe%Z& z!y$IIbi>zwUN3ykjibuhipkG{X!j`Sb+$P12lLu@b0Rc|5HHG7_;CDU^0~0EZFf2x zp0z{?7_kHxe*IXi6H`+O>J|HOkKfN9nJnjJRvW%#mX?9K^7CKoU7x+@{(1QBFSgi% zlZvq!HFr4HRWScxhc-Uk&VIa*G`3@0DXlm{U$Vj7_uLTy^cWFQ-9K>^n1fEP6<{+9 zf@k)@QjdGF(l{e$g>2a)zvncQ8n%D1`PAXZ&(yf}`)I5o`8;bE2OLLchX|;-WSq7c zWA{~_ABTEm_VI2W*tSc;2LK;!2}eelAtUky|Dd5Xb%JE1$pTn^Eys#Ow*~NjK9VUy z04fkE+y9uCmbi9JI|;B@UcqYL>e;a1g4~sd57$Kwk60;QaKG7ert1*b^9=kf9cO=d zx9Qg!-I91EXfolj>V~1TSV1W+<`L`3=5M&k@wp9>Ic;%QalIeLF?YHQdB^nFw ziO`?3PFA^{{b~L4h0Zf`ck^8KnW&AO!TT2`R5T8`o-BXNEcnml#m#(x9v6>O}7p3Jod zvfDy$dVG7_boH*lKC^S>R>P-)FW2numg$+O{V`SQ$ZbQXzWkIF-kJLicHy;Z;JdE0 zkKTRWe+pmPCx<*cumje!G!X{<%p|!%`si4fWPR;N_Jcf&Ghs`?*Wr4vqXvC#G*0}K z_S$~d9ObRuHu%_^aLUrp>V(rbrflGd#(inCO~Ej3Fz5!Fz)PRvXrBC8!`3Xb?9bD0 z?|LI)a06@c&V0%z@F7tnxy;vUmht0}t?1U~Qa?h=$nxCbxQoi?LYdtgx>c!TA+stR zjs0+?6eXDl7K@3jO1lc!LCM&vod^{hW&IZjwKSt!?h1!&{Sj85#gD7j-;VF`wd57D z9$uMU@R)O%3A!O+b;8+ZG<`PwezpC4(w|2yCu`__S*PRc4YJFM00jFU|59|<$qthUdgrS zbXUxN{A2c3S!3MGtN~wTiR10C%OW>_w&i@Gu3Y_KMo3kvs89{Fe>#!=kX)y!Xl>Qz zoS8ry1c-ERcf8OTEg$;LQ!CKGb`#GYVm8W}9H9wy{$)nwR=<>OnDQ@tY zSkPkkJn1OEb79*cZZ27Uec?9hVO`s;6;d$KVSdE>D;t+t{?+3ZDRPhT#HMV3(C|*w zbKCbMt@|=4ot!~O{4}-o*vlH%`QwLT27bM${}^Ln^*oTCX)^71hlf?yxS7%MruQ%zB(l4y?MV9INt#d}s&Q?P~l_YVzFf`^U$f(UHM^ zrmq*hU#-7axR)1ezN`f2MTLw1w~X++v7|I?`KDsMqd~2E(!HgEYWFej=&`DQ_m|O{ zC3mwPs7t+gu(B0?CVewPUBYg}+Mpc~I1;0(hT6TyL8ds36p8gGGY6yI6v?+QAK$11 zMog2;T(<;qd@xlrf@v7~pMJz4+5Gx6`c9*~dKF+@Qgd7YGXgDcH+flbexmbk&m`>B z+72Cq*vuOj+QT(cC=j(-p^}KOyeRu%5^g^t?<96h&vKFHj%@=m_1z=_4$St;{iYP9 z;PNAY){SM0_Vw7p7S&0hRpxc9z5XFw6=zXnc)6rM!}3Gs!#w%pnadKv(*%Oev_|aK z`+@L8Ew#qRAxs~%0n7B-^(!t@~r;ONw+5Lo-wZcpneO7V#Jw zI6NjPD=<8TIwIRQh;?!siir$-V*C>@5kUHQJCx<~f-tSiM$m zcG@D44uG^&tg2a-K0K5+DMIEDo~{{Dg63^ORGQ>p`s%q1=K0h~1}^u@E2J?L z`6O)TSW@godI>Phxp25t$p)D6`E4$u4k8C8#>sHM7tEZ@=O2SDS{Bc1z+*E{45&Wk z3w8!YD2e7qLhY?@bc&f6`@ha&J=#iT0`sza#Kr-ee#nbl9>fFrv&}kv5sFv)fjWhE z*WYSKM?kfd?h%F^fVzF(Zw*qH;^4Z>d)TpM33Oqi8a zogO)V$ZN3WZjOHHiH(3$3g3(3oi?@)0&dB_T&ANBShgKBY*CRXc^nVjW`O?EHKH>G zVJ3(6-RU@VYq>a!r={>4@TdCOoQe9Wfizvef9eJxswVhaPD(#k91}3`y;Oy`f0Kdt zYKq94HV`=!E%2Et95Db zE$YHw2ox%qCTj0diIYs5SBX#Jg(_(O^<^8Z%fw=F!BYFjRJ)7mt#T)0Q%>1goHrgQ znuae?PW?spzTGS-p4>FeGI`{Z^GhlctEJf2IAmntOwV=FfTDsqV1kftHKD2e+Kat& zj)_aF=Vp)(d3IW360&Cpwyk@4IlK^)I^I~PzUakr05GOl?WOIOFZvx9)}(yM{Yv9= zUUrTDn9K|fRNBZ8+rvU`Bpz~1c%e`;rbB>7hr>+Q#bBW|l22u-%nvGw#Uz)`z2CL-WBjc#soPi19$j{k8zV{_jDl3ZcPbye-2V0MKh-aRaZupN zT3Kq}()J-!2i-IRbJHXWm9ai#Yg_M@z^j!+9bd<}Nuy(pEF6w8=FICCEeTg3+ivri z!=s5;%f5az<@tYiy!;JC;<>)pw%2q#B=@rFQ=sYjN+@ofX1WPB(DqPv>TCLM2+RXX zsPzRJx!SZ0tR$+~=g^<~LVRF!GblZ168!%Wc3(kFEe^c4(?dc6p?9VC5_%COp*JxI zB2|L)q67p5L`kP(sDdC>kfs76(o{kR5h6utDo7FQMs&MT(VYCh`EJhDnc+H2Ca~75 zmGygGHQxLQ2h!F6X_iv8m4}LKY5L>V$!JjYJX@xEK}LCkx6b?Lz{Lm$S_lF99u0Jx zkhS_bQC%@?c8jK>4fH(@6g}XQH7=ku#Q04$U)MMAXHDwjE zvh#1MI1Z< z0Kp8L0pI{Ao(=&BgpffII1FyU%9{*67y`-OJfltq<^%&iK_M~tPsLQDq@mAcm^fKh zm5z-LanK6IwKhngnZyjk@Pi=tXI8G(I;gb+AQ%rC(vuA_Mg_T44+&ia2H4>-W{1!T z1G=p?E|4lR0(y=&+uPyI1i|thz@7Ifr`{16|6szd*dRi;uIC8S;o79z5It}NSM=3;PGcF0GQxo)AqIdgM~SR$_K@V zKCurC%t{yG>8vwIl^W}lF4_*q6<4qa^SF0;5>&sSkxMZ+4P_5M(`-!SI zFmHeZ8;899grRA*f;36ihvzBRC-FiM3VM;ZcP4{zIMdn&;F`J-43PK1&Pdlbs#hbB z5DZw2G!9d(+=Ko}WsAyth#M&`j0;L0*whs4s9|UYQjPkSL%QFAN~-Uy6H0F`PDvc2 zbrDAIje|r13yFV+kgg#*jbLo;ki~(Ze=Su33;WA?*ZUrIS=Ux_HRtkqUIBJkmap;d zhbSN5Xb_ow2mm&eZ#-m%*P+`S`#a>Uud*N+E@8&j#?E%s)AVH3;?J}k91NXJ3X)qB z;=3to;tJOMPN^dUWybNVBS9(Kjeh-+;kppfd2u5$7cX>xH>qwe%Y6~puym-k;Y>~) z&}_hzhlrHo_!b9*Wr2GGnSEdEjLvLV98(G(%naLu!XiHAY$Gz7)Xv5PMR#qBKL+M> z@czjuhj!R9Wc+l|sr2_FH!-tn7-ZGXj)p=6huAhNSqa=Z6(!lmmdw%n*)_XQtr`2mm@D{v7j&<@`<`Ej)^sXc zX>`l*6I-y`+8kwP;tHahe4NK_iDAKSN^C568|TuN`q6uV+RXHnorAS030+cN;(9SK`CSsK7lR1njB6KL;qA ztYYvHRU2_zAg>gGHjCx++_Y`wg8x{`*_i=*&-l$wPD$9ei$29>oQmTw)Hx8g7jMy% z9AN8OiWnW>F&WlE=fG-wCp`pa#ai*ktf=r$))Kb*LE7i)o6k8-W|nIL4F1@BJ>bp4 zpNjtAB$1se<+iVLl%mhHEB@n3{H{6M>?g&`*Ifn6_1I$$<9W~2qDyg7$LSUs+INa* zD%iNEx*Zx-9Z!FaEUCg|cc_=~V0lrXS`Ljs8dA4qA9ikBm}i^ZDh%GpaezOLrs^9juh5DM?t2taHu z@g62Wz?<1BDC6)VB8Cp{j6R%epH{xzvpr~pvoSK_Mb~X{bs$l4h(`%UM1Am{c7X<< z7*gH=^u(c1OT5i`uO-f-6l5BkS8ySixZcx-9{# zTyCB;BrO?hK_8;MU{4QVec)X^U%zkk21soV#+(`w@}=5ym|8fzuHIR9k&3UMfoqfP z8d?BoKNly|N~yj0?@c*@=@D_2xeafAlY3ofo>nHdhOuAwLezF`MP(`IezK|{8>=j1 z5o05Pa_;~RidAd5)tSUq37_bz%y?n#NF2iXfg*8k+L#30aJDdU@SE&{o~X{cfl;8V z{-y9zAOVN=v^Gd($!vLE9expV5TEi7OI`F5f7OFsYNv$wXJyEYY$iw_zloUx7|4M8 zVr<0sFPZ`V6Bjl~RWtMXW2d#Cd$osZVhc8D9TfH)GzhiT$+-{`%N9xnq5cS}m1?2V z&s=h8&rG|dDMT)?2JoZ@#3BwO$`F;vnyJv5%*>ycSP@4?G0U)Jk`P2dYFMG%xCbN_4>XvYe9gzOkI8q)Z z=!!S^zIZ;Itt{zgHqI7wjoJnEHaxj^eFJf|*xJ|xjvxv2^Hqo}h zqO5~OyY2<{p)dD*wnu8a(tKFI4wD^C=rlbuO0~t7geYFO)5apz=CB#}GfAyG?AWIW zO+eS}iyD2Oq=yIZi2fE{1awGHR0tk$7rf0Wqme;&oAlw3+tI)vA7 zwJ{(LX^T=0B@CHr&tU;)-yF~NP_?-b!mIvWzWFBdp2LCLjYMtW;GC0=>^QMsd$nhduZT@idI6mht z5cLADW42_#0m)A7w2clPD#FkIxUwDoSN!oAsnS6MGaIZrb?_=)*OJG_&~yP*SsYMM z78qb&WwtPg3jRwSd;IP9h)fY%8G<~N2suQ3X%urJ*3t$G0LfGjs-diJCbiC5r41i8 zi@!mopP;Gu_b9dw>g|K{2SCaw9%Nvx5z(YU1evsgaXG&f-#xRU!G%;kyID2XCTIow zBD4IncG(j8Ve{Sk3gziBfJCiu?bU~|8&fqd*9Bf#l`LHS47?u}gwHdL+n(utiplV^|dyiKyZmr=qX(ZmRw*KIu7T zI{13+>pS>CS+1;kikZ)7dEsQfRz>(#1hX{st<-J7&#oWco**&%B21QGn2h*y*$R^(DS%x$i?*M*-_`5}s?-?+=UFML!=` zGX3@GKK~yXVIz~Z#aum+%ugSWDYsAU$!lz>=jR$*`YPFoz65M!>sdAnj%4(HbF^l@ zowWW|pT1S|A2(`zs%B$(P4C;Z(s{>O-S2bH#a|x0bK5{=eS7Yb ztF1n%gZAy1{4B15nsZl6DAaP1=`EN)<+L*Y*jZ`0aKR@sfN#b(?%DifYKE)+tlOz) zrxsn(j4cO55hKx|xU0h?rE`D8ONKF?n(=Q;q z)-yFqJeJRCO?z+n7LGWcxO$YC^n(0<;Sqj3JYwYkiAT6ijJMZ~{r`Bxt%*W)kCPRH z@y?>f*x<j5!4^6PGD$)U?pTJMq10 z7MUzi#9uv-+^-wYPt$9O4|v*jXXW9!8Z26v4j1eQPA|QQe0cB86EgVvf%J~?$} z&%C^ZH4(#ymy0|0r?Bm15D^JE>_3Gbx?d!lf6<~KUbIapIXIy$! z8pe5eR_yeq#QFuqxsVzh_`Db+HL<3|J=7AyzQNEtnzWt;Y96n(74h zZ9?;j1aXrF$6&GbwdPg}hxLn|y6wY4E@odn+TiU8CAUIGQf|gQn55r2Pe9dtysHLX zw@se19$yjKdmnn+!;Zxa>DGQ`;gwh#*;Vi6So^E4OF2c;15@Hw8vJ1WZ~f~WD+@8N zRxrL^Dy7w}eYlVLNR1UW33~dxx1-*-zQbF*`?J;Rpl8Eb<1Qp=AePdy-s)}#6~iyj z7R!y+xdFAtInGDo!oCBcO1VaN5k|h2#V#)eFG{O#Q%rUAV`}|;bK?CBXYI9nFD{#7 zZt5Ox_j&fS9TH82XmlI4jtWQ!cOCn+Q=A&QT|Tr*l`dX-7&B{O98!IbJ6734c%;pG z0TN^TMq`?H^a@S*6Rxj?33}H0gyWqE;wF2rwe!%h+Z>$xHAMD61T?^1~cg=}F1YJ}+dm8ErD`FQ%{b?Dp@G z?QJWsFDdvo5!)LD+^}#Pj7NYw#K*zG4I}vIh>>Obv62!HQwRX-=0Sla4{0o7*Q#o2`M&{#$Lm&kLd&QAA3 zv)mK_-G!jNS)7KAy)5O*=h<(<(2iNZ^82gEQ|HE;-`qLWMttrtS9q|$!RQ|&N7T}< zf=1ic3M_+~&maBeTOB<{P{H&_Hd?_*TTR{3_GD&I)Q#?8Wn^Yr%}6ruO~(=FrACV%x+bYk4@oq}Zk{IwY@Z1~9i*GRgq?R5=3 z57nz%ZkA%mTfd04de7HG3J(EKshsOM{QB$D=wXo;VvB`O%E$MV&D$vGN!y-w^asAI zvkh0{p;{17XmMlq>~(tPRN*GSnw@oyr|+J^>4Hr@;}Ci9$a}7AeEjFb6PmBANQ+Wd zry>x~?oW(G&AL2eZ8J+(6c0N{3a8qCuZ3)dpMyzFu#d-6@{UAIL9qXjDYb57=80@X-;5p#hsvdW?Qvb4?C zFQ^z?H_uI$dR8>m@_RjU^SdyCHJd2^)Cg(6Zo#-uTu@d@ZM9A^6~DDm&*ziWjFa8L zy>%CBhEqPIcZhQq39t}B{EayQ{>9x8*6pLYGyio(u!gzf&^pl4V1?tn)a2ixYqOV| zJTEw(q9?33oxE|#$Kl8@L%osjbazm+leth`iH@Gflu-0~{7~QBfA#ydPWHaOF-Lj4 zpir&z+Qm-O!+Bz_$Ak4Hp_qK* zIG*q)@o^qI)c*VrSNFdvy?*<%3EN{YA2fzNsQT=f%-9Y{KUizPFmvc?u4*H;-*i^) ze7Lq_vZ&L_*Zi8t7{3nL7!a416+$v95gIi67a;bjxKCMe0|c%v&e6n9^)ak z9XS{$Ej$$iUEY2%pj?AlE%Fw3F8Fu+bd79}=6{0mS3Sq3+{&I-cDP{B5L2HOb_*LCJnDPFd0(sqN$WC{tH=&fbMbKsz|vPg z7|}%bXx7)fRddhjPK~B~uotw`?Q1G1KmOLd?bm`~%nem<%XoN?>=lIEh(2j@(=62!(cT$4pVY%oRD4%wls5{pcf7}|Q{(LEP$N!zhKQE&{nJ=&M zcB3K3LoJ$*$^0Zn6{de)S}HQ?=)5pk@Sk0gsdn&vYiUHjuU>4{+WDLolRqrokRSX8 zuUy%s*NsCo`13D+O6dIb*2`ulgx_6~x)bwm^UB;N+n<}bR+RUz5UuK|hEZzn&(8h= z%9-;?v&AisJna;0P5@uexSV;M4&N?{2||?RWc9k97fW-vKn4Yl|7<;CX7OlMBeHNg zu#3acGAmh6^c+u4RDPL%a^X#Bd0tTEqeZWl5PmzeVEneYGtl5q*L5XF<`Kz6X2OX} z54_GSLq6PaQ4;+Wy1^Fuev~y+DaSV9b}-YgK9>`dQxUwivKrvM*(U;hQhFijpz$&5 zx|D3dnIri%@#Q2o>D5O?`i$nvc^i&q`9Rq-lH1mYxH8)y?T;oIbE{vHYd`HXb;}%; z9*l;Vos4`BznB`1liBn38n(sM9NoAe_p>PQuX5_VCHGAK$j;ltD!duN$&0_-Yy*08 z+9dSky5-)auu07;a-(Ncd#!|T3yv5)c@T~~8?~(_Dz%mNxz9VfaD6A8?_#YKi3R$e_Btb> z^?T@;s=?VuYIGy_%dP(pq-q{6fVek3Rh z<()On;ho}Ai&O|4H$31R;>E>}`xn#9vSk$|LU*XTq8IM%E05SDl|fJ!6FAVo>Iz$h>zpQ$ z0b}Tl`AutuvPB<}ArpvH_sun_%C}te%1xij?c~axvdRxVm7kU>zx=G+m8#k^uaf-= zed$!SpDgxYS=B+L=pofMpdE6 zxHv(GjTW~M2?P`zZmxLyvc^cLMlh%f3@n(?xOz!Jebo~)g{xHxs^zEDf@*4qoJ=F; z9k7~pvd^JIlv=ZtTBGIikK5Pz<}CGCN*qj`;8)bKUd-d&3KNT4(4aaEB+NUiKHWV1 z!p5OTM*8tmXm_OW00Bk-z(RKgHJ2N}K~+RRL7en$@5vil^_Z{oF2@oLXVz|oE(^WE zphzre^kh9m|MqF$TU|koYg{qYf!oJ(eB3UrBe`p+rvRae-P`;%Ji$Sb#1Z-71C#Ih zp8?X)3#;YjgAoPOO^pxOjh6f3=E5P(afaW(4ps%=Xw3hHzM!Bc{SUW!QN*(5COErB z))^JiT+b!IqCd1Uq;KD-ubqB>xNX@YR!H>9@U>YFSg!p4Opn@K#6}i^%R=?G@djRk zgkPxZlc;I=RtT_u)EToPk6GpXe`p2H(q{n1iE54^H%IwapBhPAm{j~vDkLMlh>#C< zYlB^`=DSRQXSE?`2hfW+^sGGkId6o`qbqg=@3-BZ!lJlsD1G17e{W#^o8-Ma^qcDT z6#{yiyN0~UvqiA{JR78Mjh{D=;0Ym@0q|lXBDM{B*!MPOuS#M^D47s`S5rv-C)TD7 z##}=V6OjQ}zIFn7YXJSG?SI$+PgJQ@@v@P>mfXt}5V8@)~zSR6nE5)q?hB$sp( znxYUb-2#t8%LSpIvG|nN?HE+VgEmwn2AZ|Tzl;@lg%RKl?|WPRCzh-bQM}prVQaC;>AX-7nmJ_kiBP z1q@*i1T|J4VPynXc{mG?Xj$WdCIVYLHiRXxBR~9}B``C92pB*-#qh^-pwlwie^&$# zqQzzqunPvzEFAwr8|o$jmWDyExAErTsFpQkVjG0G>)G)eHACQ6TS4TC8G*jGD!UFr z^3mMt_PMoN#yp>be@N-KAT0(n0YIlrohaivqjFpyscUtp#X&pT##YuIN%Nz408IVB zP;WK=lL3@mpTOrefgf#Xo;eedjktn)GIweO5d>3u4!wU8!eXI$iUMsHI*aLgy@pC0 zfR!04xe?$KJaq+!s;c&@u)_q9?x?5UeP{7-8HbtUjErJYIWef=$-&j99dID}AYqg@ z3Ex;nR16?W$cPfc!-x8ouXY8CKcQdYQ1dL*=bBP=BFKgVEo>9mO@|PeIz>=ep(X&`Iiv*p)bcG(kCQd3C8$e^)_!n8IhBlZ_ zb@L9d>^A^Q+!x%-!aU|WV}<~T33>iTBC?){Y}}o*(!sQlMnA=1DqGy^ut?B4@-Bei zO^2UwNBF@2y0;v?h(WJl`29`fcx;9h3tHSJum=#_-$h?;Lw^RKhyJUcZatie!=TP# zeq&yO^j}{5Eckms@E7*E29WnuFC>lAy@*A4uu%Cj0?z^P3FD(YU}qVN&aF;2ne3k_ zQ>rIn`pB3HBGNe*Ndh2x+V~$-KkUTvcMQxI{Sv%MK*Y4c?~X1t5bq|qpk`U)Dim{L zBYti*dW8r0aL{D|ICC<{1OWY`E%=ovI7@&p5(SGptyf~Vm?&36A`N}n8tyH zS$!?PYIq5j|LGc9%@R{OfVe4->E#JbYseOI*j-+zo{YeA)<6oa-Cl?lSc z<27778$5gzQqm^y6(jf!fC_9w@8I}NIM5hYJqh%b%YwWA7L1U`L=a)gYcQ*0n20qP zPgjW?fLm3`1j`~t3j(AEi{u;GU78v z!ic`s#ykb^+x0@L2t#=v`9bddeMAK4G(Q{5Z}-Z#PKUn~fEeN+dbogXE^%R@ZRcU` z1?_bnA%qV0L;l@SQa=wiA3)S;3+}b?2Vw-?WBB!nH(yHKj+s;p#lXOL9*F{T{rv$h z^m^_1M=nock%yhZ!n)22M+2aVYbz^cq#X{mwYwEmnc`)gN8UxrweTCht*HIbG~_Y2 z5{*nAPOM*R34Mx6?VE4hMT7JC#ac0lY1ao>Bn^wg`lBCZqrI-I{Ef14;lTR{kIVR> z@~89E31CZkBu`hUtmKc~MSsBXn~m@^*=lXdhtLorVW}@*|BnDJ+=T!BqQB? z(fSrcFok8@Z~j78%$L84cC$Nt>)%28Xkc`DFjoL!dPt9BfHM?yd=d_24`nsD%J_{#|on`Neko*E6jDrlK%{+}+l{f(}sQOvt(jL2gQ zNgf`*%lqWY{0Q)R!vAcm)oMO4cjS;B8DvBRw{hO;9OI8}LvNFjhYPmc{Uer;ujT&tc-h`Rlf@@zwT~HgGSKA~ z++WJUKTd2q=1-K|S++j8^+d0sG}JFLM%gsKH+5@oEBgAqK;?F}zm(?=q1G< zfSjh^5w{|Ks=TXlwKe6p!yV~P@4~1*Pwfmoo%Sfb3U$$(S9nppY_IuC?H#(@fXD+T zW<#G=`YyJ`(Y)ZbB8PYdifW4H>$gcgi>Wr@Sz}^-+)31<3a@Wr(>DtXgb*pWc8s{r zBG3)<$fy^}Phv7p!X%AjI=L0|n*SYtSgYvQ;CgT+6xk3yTYdbSN_uWZH|wvC$PL$! z$r>G-zb!vvLOau*h<=`5FbpR=^l|V~$~^OL$`36d%6pB-2JuO{R`UhnpFq)aivH`j zicY%X>v5j8KW~Y;nPoQI@U{OLex2{sY<97|WXS0-|u-Z&9XP+s#q$r=diUjAM3fX-p@3%S@7)0wisT3;PbWWe-2T!W# zYYCwqHK=b$#5C7z@tvMTyep40)2PkPoE8-*2ye(#m5#6lg7ZxO>W4nkY864qgk?xM zHAHm(48>Fo{toXiE@*;rWvWDPc07MZ7(a`?_G2d)V+WH85q80@LquFmMmDS5U4KGE z?AF#4GQ@0=>s2Q$@S!EXSMy1?DiX3tqHfIO>H7j!o)dS@HzeEW_I?@sn57%?GyFzm z8)7?KqgL`QBh9Ni27GqkZUxdbY+UnJK-&6LU}|1Et@3r@tg5g?7D0y*-3t%>E==G2aC?1SBfMS ztNt3>PFt18Z19;}Tej`TT!NZ(Dq=jU&mp3svsU~S1QUveWGet*3rwoDQFeW8)Xa6At{Vb8ZgP;W+w9&q+fysR zi~kK%6)iqMM*PLJUcEOcL4B#NPqa<;G9V~q`_jiGr;4iz z!E#FO4$|EG(ZtT@xv`d&-#2S=6{@RY>T8vV{Y|vvfwlDL_*;VzhzcT*TsPo5TVXn>k=<_*)2zN7UOqT-~I%*^L_N}eN zY(}O6xkk5YZcREtLs)TXL^ttkDAvZz{;as6df#R}^xV&DMV#8~;ck%XV<7>05wFY6 z?FX6M?}2)T1k0EWjT(yTGyWB_Q`P?IAU>}l?9>$^bu~_EGy`~~3OFQuLKbP-*3OGF zIpS(}Y;W7{Wx96{B7RW@Qu%mak4g?)di)!1yEGyS&aee4m0bhd792Ip36^{B80@&T zZ(q>$_3&rhd&|x^h)65(oO%Y+e3F880oX`CCEI_>(a*7#HV~h6WEzsZdS|~9q`BlF z4Ljj{3NT&#)&90gM2B6m_6Ab=U`+2O2hlNt3M<{u6=7m%lJf)S+F4^3%Hx@))f+Nd zjVh9x)qFMw!MeSxM2ngE`A3s8rD;%Y1LH-PzJ7c0)kb@RE@Gx6?*Zb*QYsg{kUVsf zsHBNyGqtk_{3>3FQcsEY+RF9Q6uV&qH*c_JHE`h^ZsYI_(f(&CJ~xd-6}J!acPjYW zn^sYHoBhAxzJB(poI&BN9onJSZ48C^0sH;;0GK8(d8Ix8cxj9S976Wj(A{$|82~7j zqbUkuBO3lVC>bpv+)F{?ZPg6@sTu)Rb8v2yY^2jm%E_HSLx(tgAS7?jFCQ~x^L~w~ zNL4^9>^UocsUJ0{eUARuaiRSU(OYf@4*gKXb`4;${#b4rg0I;o#Oy3fOcK%ql%t9k zNHBT&1)TKeK#x`iLu)QbAl!9G_BoDm=sSREtPT>dD}~7a-~!T(*BE6k-c*rWvZ3mp z=Ew-zpj8FHTUSKnUcekts)!-swizSkE;(#6xsE>1u{K+p-|ke!qZ{+4LStEsi`0%B z;^tee3(MBk4~c9Y02NJ>9MKL7$~qmSSlzZySGn7mePZoG{*qa<{>5K{@Iu?UcSY0T zHHA6#UpH0nXN+iyCNv$LuMr4Pw-$IIP8A>kKp55x2vto6YTNYzFbs8jeRM7 zHqx1^cDgYJ*-k_R%BmGCv$w~xNT6nQ+<12+Qn7%0o=1unq)5K)M&57laUAu)w4f`> zbv&kW!{4`!S-xmTADgij%_Y+nrJrY{eX3db+3*5WBwwJ&x5dXzO1>U?%q)PaU>8b= zM>7mF0&~z}cXxl7cGYCx7jre!r&`NZuIHZ1`Q(#%mFbjn+3os5tq@|y`b<>mzq(Tk zm|Vhe{3Hgthzl~mvNo({FP!ZQwbQ!0RY)s8`d^sf-rS9=0tux+-~0%*uBQXB>7D9-uG~hCMSqh>Rw^4a z$l?_iLV{)2_Z`e;gL0fOLsB=?Mt!12-X+&^1;uOmPN~@%WaN>rU@miP{{9OT zAngxoXpN&9CHG}g3+CT_G00AneXu^dNF!Af=!X~OVJ0}XrgVfr+yu>UImv#ib{$2m zw*FlD0es;*AblQ}jNtA+;o*5@O1Q$F9OERKg(Usn3QfA1=2XoH9f!eF9=%*)2I4AO zKmr-1AQMSggNS@&1~Z5y5XO>QlB#++%dnE9>KIC3cWC*2qBsB@&u7ld?FIl~;n=)O z!OWY!T|_>q+Fe*o1`KbZc8oWwzTc;MU=$kN6|`2Y;@_3ORxB!->3#%Xh&}v0^PwgP z_O0yp>$e&xDa(4HA!~!GNWdd*0?Ecy^k|gQBU6i}!uWK<8}PF@$_uUZG(gkOc$n)z z)1otzSk3q)0*%;*ZTi5YGzUZZhU(XwR3Nakjm&c#WK#jmGl{0cG7~WaMJ~b`4BPxk zeCC{bJpd$v#f&U13;zi%r*$ig1En)4LnLT|WO^DdJ&ly!X>c=w1q=5sj3ny>??XKY zAT}i9t&ddoBx+j5XoU7al#x_`wZ8cx*hL?LBS7rh=su-#v4hMwfXFMpLVimHw5UKv zH&v$^ifM%j#f;bnVKsRf#l`boXJ*=0t_ zGu=e8e0QN-UlJpbB@i)iR8_XBZ|De16mgcwj9Bc`>Fi5(hV`xJoL#go^qzPn4J%&l zo3S4C@fj>LstN)y0~#5;?W&0=!qihKU%qKJy3VYi@?u7MS}9a*4VJJ7i{xXbML%#Z zrQ7e*jxB;6`No1su(ATC`vBe1(CET*Q)yX<>zYWmO=WB;!*rh_jeDX&(rL~ZKPf5c z38C>Wtz%fQWoemn9EJuNt%j=>Yt#4UjKmYthVGg?W^ySw)8ZT~YaZAhSBOH^InDqJ1jBt62bz}tCK<<}^OyQz3U zl{X6-OcHY8N2-yjnFO2%Y2sy;+|=*RUVf+0k2(de%GR2ezWYvqdT9ALG&iAZ(*NO% z1v`JZ-fOX@qT>|8apL*$t(E@ucw@khbOJkyg{>D-HC{fG=^IN2kD>|vG& zdzvFxnFDnrjg$fbJy8Ad*ZMvu-9H!8d0+Edn)t z0Fcu*`etofW~2wl_+DBpI`a${yIp%XNvO|0qiogHN1flU5$t0rgrXkb-cZJpsNUsN zBH)p+bXJ}A(f2~nH!10f9A<2(Ow@0fw_`dtEn{kgm7enlo?M!qx3!p0;KfBAG^m66 z39_W}@-%FEtT#g=iE{l4<>)ReWDR0Qrpm5S6ud#XSVn$k*-9DA9b2jr(bE-F?#}0Y z$vY>RCqZE0QQfXnIlksM=N>7InqOfCkf3go5Kq}mwMC#tHt(o5-*L1dmk2f^(k>Of zaKi zzjFO^nG`yb_Xm4!k){rysgU?^rHqE}c2)_nPK-Mv03o4Ba*~S{0bv} zO{y$j%MEAMRYTF0#JIOXoSG9AoD@;&N&GY@8$GmB#3Lk`yysb5CF%Mk>Z@}go%;dz0|eQ=1@nvYibM9;QmXC_ z*iIc9m;`hAw_&;IX4Bfjho{O{lNIv!VNqmyFLCU?#wn9(7)kS{juXto@rYX+#F10v zLZRcyV8bO47b^*gsP=zY4GSM&T4YeJR#4>u5MQ&SXHo@n%#{kI5due*e!nHqjGbx5 z>F0NtF_L(qBf+uzp{^NKrL9AoMm;7eX5)R*)iHf_ml;bmtdJ_dR6ZWGGg2#xthG%~ zY-9-h&#PgaLg`d}e zZ;(8iM_f(w=S1>!&ZfSe&CH6*fSsg4-S{AeW->B&?G-2LpK>1Rshhj(LW9Z7MocxA z2W|CkEvR%MkH7L%vu_QLpsQdDTeDkL)M=3sy;&s@rx?t_Rh2dXa5F-KV$#Orv z6Jt+f#Fl8NiHpRLX^wx>4>ua9;V1^)=|h!p^t9O}Z2F}Ob6%Z%rylTelk+@+=u%Ir zVTJUGr{AIWSnymFNXh$4TErJA6VSW7FYiot3Y+)^`=-YYAfEE}5xe}SE6S>J{hDx* zO&!RV#R#>(QSNONT<~pCAT1o{_Hb=FoA-o(vVRN?s@4d08FzMNxzFfgpzNZzufQ?@ zdVn`PskHLUYT;blcgn<|jy_I0D}1=G-sv#_!^x;#x*xQh1<0TrM+x$v)k8 z0M5DcF2p=q3P|CZLFZ3+=fw99Ue6xu%T8_c1eVN0GAcW{WmSI=MY^%yj>TSZjrG-I z{0Q6WQ3th`MUt&jFi@UDei}B43bJMaL}_6c2;R; zP7mjK#jc%x7@KqKA4z2Nza|b7xC#vb#03C3_6OG${`U7V!4t(yt3_yI^UjCp-wi{s zv->nB4g>#X1g{xl>Wr-S%9OfII{)Tk<@4LNgLLVob8<$Uf@)E>s z^ccRH%=A9yO?lGAP)Yi_aN!&lel@@POu`c`4`qq+W~!+LUXFkh_rb#llyH`Yr-Rf1ci|anAh@u0#IF1x9m#Dk8_X;>ikvCYxvYB5_d$+y zdmPWvUg=^f+Qn=6#QXQeX^t^||9Do>B=1*(YN~7Tx+%|oT#SmG4RJiT#0(Q@JSQz5 z@TN29_rqrz56>+(M750Gk(PJtTz(*u@#T%)S?-HFGJU&&y_B8D;zsGD&!-=9Cxs5Y zvRBNaxRok}KR&k(#Pjfoh9lRvy1C~j3WEH8@Zanu1<&Q%d6#H!C;FRSN__Y8{*k2G z^^x-bVh=^lL*5;&W9n7*&wWn)w``A=Zj_+@wCv;OKGW624%a&_;Rj}W8y`x-^WNni zukvXeDE&1Iwv+N$cv0ggaW`8xac0si`F_Kcv`Oq@Yh)k{CHXz{bsz;4iY!t&5_V8X zlBYpr_>_S}sC1G%6`|=(l!{kBo8>)gEAq@}j&PyIej&j~)KMw-yT}=hJG?upuZw6| zA{E595Xs>fnv=l5?);pIJj`yM;C9MzTzB2|loIPCqW+oHY6Lh#@qVXA32yt70m`yi zzlq?>ZB1M&=8m6YS?HDoPw8?m3UkM+2d$(~MQjL{F7t&V@82fh`@T7Qt$mjbv{!j-dmn8w?-{Xf zznJ>HF8%Ng`mA$C+=W_^yZeJM$tPwsgy4fI$Bh$scRSBh%!9WZo+=|w#ki9|gK_OI z*vvfhUWmvR?`XU9!kYU?0fI8#;_S^BO&=)bG7rV=Z=ZnLeZOG)pO<8=Vel7&akGz} zO2;k9VGRO!ddK9FRpwPyU&=Q#o8$GMXZPK7X(2+xLW`-=QH`Vr_p{W~YHu{hwVZ7b zu$&K3$etznyF<`M+Q5;E+l{p2qE80yyGk??J4WT}6V5swPMKGL%O=j(j2$X_**+$h zSP=x1@T}OKL8wg;sL0hyEe=GY^UN9?bmy_|=Lh$)Ut5JH#Mz0bmF{I5&6A4OnTP*f z`Nv7|uTq;(d#a%KOoB95v6^bx`)bx3p!2Ny$^xra_s$ zC%!dI!*PU0gHxC$+dz$_c@XBbq_ybOsLbo?XR^^k{u=h{Cfyoikr$#Jj31LTBelU8 zVg?9#21mmuDamblj~R8aaxQXe`3y6uDuco~fjia`J>v}BFrn;IMb@I49k!|#&k-kL zti{T^?et2X^ZO02gIhKRAjbwM;AhV-UhEI1ZW~samco5Q7r~lj-ZVcXSV`96&|a^Q zz}B5xGFzMx3Hx=VNes}}PjXN*i8`S8FWugAy%P7Bo#{_kk}-Z;<~S*#h67lOyPV9P zSOqq6+i|=*Yu}K6ZrJw4O@TzX`>?W|h5S4e)<)M{HPioO!xQb;WUa__UF~-9ND}SH z10((e5Q9xiZIGAiEW+9E3x*MG=x00}5L|t_R)~%1HnAplZO=Nfh-&uK9gf}*S5NE& z%LD+1#(u~@lGr37(s(f9R820il`YmLIV_&F%dk+ViqzvqG-Z+5xwD|`AO(B7igi?2 zXM@aZj=d3NkAm`F^S?t2mV*ycN`>j*c9{(Re#Y3%y(0M(Td&KD>I_rNBAS=vI(vBHxt_~G4-T!|QQ}(}{w5AA$H8Kh^P2^3QC={u--^-PUk0!fC*}Lv@1Bw!M#%u5FPokYE`s z+ixMDbI*s}B(S3`lT?z-){y2X?p;IRH>6%Cjv6{< z3Hco;2^e&f-6o)86nb>t2FMlWPi9okN?aS8l*7N4XZ=eK;*TcK+0{Z~d8F9Dbjvjl zyIaD-<{jb=JA)E=Z3`MPY(&W#vkVhaV2XHu*lcp#>Y{#TNK&0T*F-u~qwYDM-EghJ zMHSc?XyQ!Trw7i+iF*951Lh%sBL<0_&_}<`Rk&OypB5B$qu{;pL^VyMf#hIO=`L`l zrr!DTqp+GruLm<;=iI%2k44yL`<~6Qq3^NMqkQzxS1I*}c7gQ?Jkf#RbR zk?uQ#Lh|jo!ntDVq)Bhn$H0(S^|Q!fvnD@E!DI|-NVsr-yHdny=a;%o`y(hO& z_lY>&bO^(z32@&`bKEZdA)sO?btEM9GEGJm{So-aQ^2M4?TIPOYU7Xom(Tvz-LL%I zfEz?@3s8SPUAN3VdNoME@V2;ICtz6kEQvwt@4?dJ6hh8^g`Z9sV(7LiRQ>m>S<$h( zTj3gBfT=H-JkUUs^B*&<{hH(Ck+NfZ&&aS8cm`=l6VKm=i&&q-le} z{iX=qkkkZul&%V7+;n8L#$K7!)6T!u>((XCCXL~^VV10$|A(;q{!6O={|An28HyV> zZZtI9BM0J4%`h!TR=7u6WLny|L{PvPZd~QcY?)iL1+JW>X=PTXre@>n!lstRhxc#a z-@bnUw;OQaob!5}=i~k$et4w&HOG05@m9aB3YR?XZbPZ#4C@arjI9W*>L-1U!NEWN zF0gE=rpR{PhF#p-Yo*o)71?K_%?}rBTKD*1E>=Y=y5~{Gz@CCm-)`|_`?Q+#`N9ZXGA5#6&Bk- z{G33IC!x+k-j;U-js*Y<<{^#q&adR3?^*QiVw0NL&(+73TAg)zfu@lz50oWeR><=S zSTCZBlB>i&v#(#T)`VXjWf(K{J_%NXW{n6S{ZC@#Vb1sN!UM7WJhiZs3e=q?ACd%{ z+JM%kMRN`>u)6l3#+4WAGK2C(hsh<(plzn%CYBM)+l{ISOM9$;&y|Q_Chh?RQ=iq| zxDf3&Ny2&s22_`uBFZH*?Q5aNH<qy)v-X3&%C3wBY?Y%PiIC#J*)x}WUWZf$1jm@l-AAFFd0F+TK{9aQr z>uEt5ko-~{P1yaj5HjnDAtw~89mpTba(5P1@2FljEE~4DLH6a5&_O^0aU1mwvI&t% zigmTJXBuJ|TKIm&)(nFDp46vgV;kmu3{QI!Lc77VP!qga^pLsBN7nLPdIzE#U5u8G z?9g7w45)Mq09o2Ichbn}KHf?}waU#Iz1uF{qmM|QHNd_{9|mQPB=JGl0zlg!(;^9F zY(rSfcpASF@|T&GoRR)r!U!{9o-j~ki1jXUv83uC5g|$>XG6HiJ-lmhuV)o8qAzLl)%mQJNjZA^f`^|&0|;Rd>K2 z{lqF@;I46GVN|2L*-YW>g`G9omFHY6bzP2%gTCj;dcFOGPyOZz_19C~-_@v|>Dv5(OE(5&>cVYkAtIT>7P0f@+hl`J8ELNq z&+3jlO+06iaNr!|9zDJjM?w`h9wzmkF^9iln)#2ZXzlZ6pgM_66gQ>ctq7=au}aAJ ztz3mrsbcjrtlLMeBSH5g2k&EnubrL65{Mk0Y*E7O{^MdWD^Lm*IAP26LKkGaV}c1w z$M+nt^@JJ_Kp{b+G>yT)8xtoqBAxR>gt9QDHFLdBsMEF~x#vp8%84 z&S)C}GMR*?lycC}YjLT##ii_q;1Tty|l}a)+ zQ2jVwe<K9C*%+Ae$D zsOSMRf_qJ@i}eZv>E=R6W{j6PpNNvcQf8f+M+j3l#&1H}59glvE;z)SC8^DDOF4f7 zu3NPVWQC99Q`Zl)(Dj6(S9T*$>^MhnL-Q?W`<=vvDCaUr>mNyt50&NDobvc^0E>?I zM1G?y3$K5E^5_B7(1kBh z;UirFi0I&!@59uyaaR2Fzdd99b7Vq7$xhlkCxW|P0150$hw}IbzVH?45{O>L3B+)X zGN(W}^Y1puBB)&N2ID7A?oGbJsUlB4KDdx@p&}KOT`QIooDByVJ2?GwLQ=DdIIqqJ zph$pi5CFg@;l&mT3EpUEOc*elVC<}A?4b4QeiL-r2&8NOho3;i3HR+*3Aiil?nB*e zvL#&%IkI7`sXe2-@ldAOSBf%#57)^A@fl== z3SSj=*xWQ>Rsx60Fl-(u>H>dyIs&|KX={UZqSH8w?~EN??T-_#W2Lfq^pfI*|9Gi`q*#{c?% z`0LV@-02ZGuDVU8iskx%0aZ2Uk?58K@lkr*=9oc{@DJYv5_Rb#>9ICzTnJyCk668_15R~i0>raci$Qb?SowmrNQuD$DH9$ z66t^!s44@cFBMR3g{=fKYv)|Y^$?!a|`K{z~B?vcmnAE z0ccDjpWVJj*(mhA#fF^XJAL}$d`{?;Nzcm^F%4~|3EYWj0}i#eT>`}il1T8>burnM z^gIJU>+kWj-;75u7f*Et03eM*;3kc1-{HL7;iLpkGJv1Z1go`5j2Eb9n1lH&b(h6Q zxNjNbV}cJF$j`d2`DLe-)-&)-FKc?z316hp?Z2HhFK;t7 zGxd64+=~)^xo18br34s!&$z1+aO)RiFO~cEX9hN|=I3Wc3x)+M%TNpk)iOKJ|BwC( zjA|Gp3uIa|tb%$Y!Js?!z2O&a{?S5eet)hQDX=^2th8=(HuW{LiVfZFC1U`9T7U)5 z#bBExA1R1P_)(;p`8N+_1QY$NR2>A`&_S-G%ImhrAMPSIryc((4_!&4Kt9psz!%M3 z!@pR3gxWA4hAiJL?mq^?b@sH+^PTL^$n^HxcQVBv#^_T<%t*iDRBSD1_{!dPN{xir z#ZtZ7LTWr0cG0<}=NCiTj6q5Hs6DQTB~L_yjA-U|or6xbhPgIbff@Nl5s^X=Fg3@RMha7#z9$|A5LIXIB3i9 z)@`Sc=9S~;$ah1+Pub1BP5x)q;iJa;e)iAK0N`ZVfOX!0wYUPOv0aXG4lRa6_>8#T z0sCGcu3DP_UxyTk?lP(+Ca>Imm+ttw$8PdrANfXx(%BfRUb0MH|Kpxe0!C?2<)wm} zvodvHR~^`zlb`eOE}7pqdl0uE?Rw?(?_l~amm5Izw|JS6jE#87_r|wJC5J;Z2QZoU zziVdpAHmV@ur)|7Ay`Mdb4L_?wvlsmIU8rfxjnml{^Dle7-8sg0-cw~ho`nP!#q7Q0lmUG`!osW8Iqpm%#21E_@BKhzc?XWU zm*agC?zXmpMP9~fAD>OSK$>eD0x5a2#vN<12YZe$4VS)rhTq$*p8fY_J504Fu`MT= zY+X=wJH!d0QC-vuuj)5@5H{Z*Z9H?k+*SoHXh4=2+JDIWZCwztr_%cPliBQwl#jJ3 zrUvR>CGV?tQkyQ1XFr;~VC+}0J@Y{~r{&him6Uz6$1YygJ5Fo~{#bb?<>T|03q#}4 z<5lrXcc+sWqpy08ttD$6P}3VL#hvLdwYI+LtNgpH?ExB>zMIvsDDzA&+)7M1+57~BrUVCos8w2Bg=kQ)KBZT=@R@3xXi0&xIDjM zWe2b8I+D9YAHs8E`Xvo`c$^=_<;d*8TaAv(IycLV8;jApbFs6Y8B!FQHr&Tk$5=ma zOxbw+gnUVMC#_F;S+lU-=c}`UdHobVcZ?)kb9^j&HHT0SU(QlW;?(C-@7u`~`SpKj zgxmfV_)WSgr-+luY3(1B6S{Xp5iaZ7`ZY)P?fX=_BL_R!(vQZ(Y_}Y z?~1h-Nl%|27o_aCwXS_=xgx$b7@Fg3w%>rK(oKZd5g7Erx~&Bs$1v=*{Ifui8hnYP z)g%JA%6NM@Usen+N{bect-fdZmXD>yT}V$wpPoLk1pJ{H_9DC?*QwhOrXDLB!eA-I z!=LGNnL?n`bkDe!EnjGu)JRzaRi4ztYKHCqFH z^K$SX%HP#ODbx%IZk?Zbvwbdt>p*b6EQ18h%_sJ~N=QNy8!IX+n%F^UaC|i&Lb<{~ z>#>Gs_~yI#OK*H%1rD0$l=JE;D#9-upLnmYPG#XE+M>$QWclQ&r6y@&-%aoRL*@uN zT>@_NRTDtHaPQv&zzFvH%bp!ZcDel8)w&q^sqV+dfUD4pDf1aIMb6?$*+N32=q&TZ ztE+6xiS9A#6JA_nSkoO=pYywl9euspIZ=~NK*ehr5^vGz*WblG`144OPyZcgP5-{R zB?>y7JBMRiqkyVw^kGGYxUOuQDt}(!Zu2UvJ{1c@JmIhi`qL2X_X|v^v2j%rye2Pb z1$hMPYF59L8|u14brm0MMVMw=CumpCwR_he+FAUe#b|eqJFQUI zCYT7d(e-{O@#;>zcGMe~V(pr0D@X=}L;AQZg1m&odUXLODic{k!m1M^B~Uz1s735F zM^lU|zMXTo?k&taf?D={m)nntEi6dz2}jdXHI*lMo^DdP=;K_`fMT>vK~zbG%@O=d zT|gT<>YPsa#diUoPi9m!t$uS;nHUT!$!aOWiHyq z)#TtPce^lDeC;wKh39IFSCEPyA&c?VA)^nrNMDqW`grOa?A~DCjD^nuv`wHvw^n|P z587v)^3ZNp!Y-xpt?DoqjI{~lTvt9(s}9+%vIJnK`z$LxCPJ_ugcl+y#_I1W4_zPD zivRfu=Ff8koSr8)4f0Q$?qWL1)*eY=6gNc4W#v$Gx}Jqi{fWpK)wk(sIm|UBLTuO%(>5}CGN-w$mLhwodcJsxjRvvpU?yQ1<3v+tiP$udMQt?!LG9+mlo{q4Q~YtsqiD1eJnA5Hbs zf>q;>xJbX@U5E2Ch1Dly;uu%wZ|SPE6pG-6Y?mB7M!I_dfB1TKF>m2zll8spv$O!) z>bKWgcn=-kE15cNw_S^$pK>UQ=81-bGUgGwBQdAUDP^A&jUL>7`0n!8n)LSQ3lqwZ-CngB?SHZ6 zDE-UqjJ}14Jg?}Y`RfrQ+9H?PzV7kh#dL8&d!zBcYhE#ZXS7}?@BLG?`cK1k zb_vv@e`wnx)-I%0&Ohz$?z*J@DZvFlZQB0f(vTVkptS{tKmZ~D0RRXUQ#ynHUrOh1 zZhK+F@c&EcTnoJ_n^UagU4W{1QF!@(l+K4&qa9ZsS5sJEN%2F*X&sO1)a}vEt20+9 z4PK=mB6njN#3x6!FUKY2@%{rj>fYTqUNrB_jdTmregC3|_I9ksyNCKz+b!P+l>{Cu z>7#>?8a})g+3RV_P{U<#!qP+Wy@HgzxQ-EvA zo@?1o;l~fCnC2wDtUvj2!8pb8{Y;Y*S#a>1w)B19>h1iZkfp>|cN=m)5$^Q~cFc7g z$y@I0eU_V>n=0D#`c+?A@zmo>k01SArq#c({JZ(^bVS9&-uD#fXHzkg^$ zNgnk@w>C!x9keKITm5Yyyz}OIgp1tYplNxC{^_8IbhN}!mhB{7=2S3^^E^XWZ*xG_ z2XR%;c@EN}=jtcFu{U9tiB)uV@Is@OYbkpA878~V;!=_HKkwfjd0-lNrAGRE4=-ZZ zA(<#HC{4GS^q)fMK#1X_(xB|QzrB}oCsN=MXA1SfapYwQqqol6%7*?4dCRWa^oq5I z7ro20x?J}IO-1F`oGIYKsqm_WJgqyhKO|8- zBlw8wRmH1vNcGc$q&Tu`1bOmA@6&K|v|IFHd&2zeIXB{FRr^F`AocpeY1cQ=iGDvn zmxr6p{ug*e;>7~#`k`{M+n+G2679O$+B|TX{GBOh^5w(g#SPgXxWaC<0rw_+(gNI# zeS}CNhoVc5lEpyh$9B-|l{)mUq7SWoU){C?WEwALpBTPScz`N@AklJa^qb6gW|7{l z^{5_r{^s63mm8?~Q^7Wyuln7R|NZW>Uf>zcd6R{GTIx^wV?GrVrGEY5n5;N~&)Ym;8U++bO70Aoc2W`!UOqQFs4c-6FT&wmdJ9Vn| z&Hh{D)0cez%_+H7>Vk1_ENLi3oVU~y-eD@b&JTGH%XnPAt(!Qe*>LWglh>g_TBg5i z#f$17@=EKcXw{XFK5K2-g1ew>+cwTjfOP@Frc zpDCI<@J5uR(~MvAWV!Fy{j>gSv+6RJ==r`f9P?!$z4vO|F;Hz}XmVD__ec8Jys+4k zl|ci=rX#IkVHk&~8pG5PeEg>iSaY+?$O;*=SVEZdAlTAavB}J&F)Ss#r?V04<8$(^ zr~02%k7*Pn?!Nbw%reYqnug~_TwC^PE|DXDhUps14ng+$-=D6yFO6O&q%0wL zezVe~-H*=5HfF=zn$t)>JpcT0DJznQO)~jak-R70_48W;`|n$hHT4}ey^W7$7hhce z}n}QZtjPD z*Bho7Hu3prLSC)o*qzaUn>%wqG7dkzc7WyjH1|q^=h~CLXMa(?EXfaR5q;DlH+2Q# z%*i7m4=SF-dNtm9wMB728jkg=9LNAw2(^_DIueZ1N@ zCOY$@V|S-^MCz(<-#7V7Z_kC!0gFMAKW49o=O-%t4D|IK`rd7;JNmw|{65Yq+2rBs zem1whGgCZ{yNiYiNR)o>k?2c7VZ_n9tix_Qs8sSU4r=5SD3W9S9fXHK1==IX* zYhRUH%c9F2Z5Zq#_4PVve7c!oE;TC~zDXtCnK%5DUxYOEktuu{__$^tEfUjxw2-pv zQt@z%)7R`rnH%rQ1(AePa|y*e)H3RA$3hD(Ih5UylzTSgu6Og}4{x6M%lJCM`EPC$ zL+d+*ll<7fy!P5J?1!uI{Px58)U|{&yWC!#wz|S{J4Jp;=pg3ZnV{?K%giZO>h2J! zo$^w@Uu+nv=dm?VG-~`vF3vr>My}#`to1_Iv%edbyUDY%murI}?;kqpfAhi>?Gol1BJ-JE!r;Kj2u8f?@|UN6<*;Jv-^<2QGR4At$;+}|XGcijw!S(X_2lRXaGnM->Z0Z| z8FO~q(~KOBT<1@+=@nWjM=sy*6x`sftEZVmIJ*if`{SW~&w-qzLSlAvj;)^pFWT0h zdA7wnVNOv!z0lYg!s zr00=8$1{3Yp6{rY{|Ya2kK^?QtFT~GVDGo<=z)T@-hyWvhmPb}_QnZHphJI?*y&-!h6lwlHqJ7{}3kjE)ts5Em!| zY78oczXR*M!0f3gHOnrWi?u08vU48*N$Q}10#tam%#skfbE_C30X2P50<0^)&nY05 z?fzLr^0qBcNt1aglpbh>UiE{bmP+Dm^A1rh(GegE5=hqCp`ufVUBS~7pdV0Rfs`UD zpaS9tjpCM@ZIu_j&&@THcWZ@E0nmh2=utlO`uk&4Dq?~!c3`SGl9dW=;+Qz<2o0>c zRdkey;ch|BZ50Gd@*ePXZpuNZt+1L_cj48 zF@*CKC88e- zQ6e0Av=uc@L5>qqj{w#0CAlwyWOKKq7Z%0T2O58-`oSWyhJt7(BD;jh0m?-_5yhuV z9tXgVPC?YSDzC*Vvxta0{2fdR@+ncuU>^O7i5(onO*oStjHc z`V{~r7F1S+wc%rEpp4X8iqs1_;y6+AIt7~dy&@zD{S0>{$`5)F1CGKV2Zj5XT;v@8 z@=GH62?qU`g7&$Fo+2V=FsMP15T#@!Ru|GGr>jK(7+kCTF$pbicL^keDw~3x6~aUS zxT4ZkM*&*rSFK2hHn2l?(ZzO5V@Th%H;d>PJE^zaCK^Su9RNEXQuSaCGMNTWT11Qs zQDuB+DG7b!m-G}DDQHF1;E*kJMD_n{XmQP9^DqfDPDkVyTK<%{0%=2y3Zcf@Vr!)p-6DkM;E?C}4OgE? z1KXs-{Hmy}4HLbvK|Xp0AbFOKUc*SeSwvl;NT$;X-KJm~20cS-8C|@2xmD&Mu?4-< z0xCoYitI4MV&rA&CeRd>_X3lDrem=A8mR8_0tRhVg&gD}MHrNrj1ki?Yg1Am7p2}% z5Z7Cg4Xv;euRQ*krZ*AwijJf@(e!;RM5&BUqp!=nf*)JDa#EuFFW2> zwg6?&m0qZ`l^sg)*QDE8rm`eOBxDXSMm6(Opt9 zN(-sdOSSM-5?Vy-do&VfwTOIKi*DJ4Zd1L(t5sXkX}KlX5#vCpV4AWe1!POP zLlgFYrc2LG!9Ehv7R17FL)~LS=`^AAIUFWSbWYJl_+SZu3>Ko=eoL=X5WAk zUV?3)MQi#QfV3E9`GG^1ElSM@fxCVcHs98s^}#5k6`DqiuS6l*?2v)2s5PP#=qMsD zLwBPCd0YC0YZDj)WYJDE&`V_B>XPm;umB_eDf>fj3$c zB^GfWyPco)xF;w$D|?)TIXYD+Sq?eA_~aGf>8cn@Dgo`F0|)PdSwiWri&N9J@DD_k zDfNPXXin7m%Kt{S=n~PEY zgvmK1GfJ$_5&b*2)drm7_q`B`6=HN~PRnd7s(~+gTZq&ojQ3HH9)F~psgfYA&Q9P? z`MMXNjY6F*LD$&jGCnpW1860LQ0Q;IimfFQirFe9pny&xYNGaFuBT0(#@2?-CC{BH zrwEbj)WHS}_>`)(>WRs&ZRxI7n9n_EN-Io?&?D*GE7jHx6QJ)*!8d+MJKUe?B%;O0 zR~KrbI1J!<41j0__&i!c2v?A`04NQ-pi=#q*!+lIH_As%QP7V8NFD_t7QiA=7yBr) zVt1&CE~&mUt!VvU=&4$mX`v3DHsCLm+`vh{^G2qN7RA3M zkiwp}+clM?o<)eB#WpXL{UYK$Q7Sqf;Y{+{-cb6@mR!BZdqR}#x(!XDz=q=ADhV3@ zL-dOMvm{$&&eYqDdBH{>@@pdEx)Aj;aRn4MezFZ960Rvsp`VlGqG{f{D;80kA0P(p2q(&6koHDza3!KGZZK`8jT@Yx%RR5KBL z1aOzJ51d0szg|SoahuQn4=nm{mR^UGc)V)&ky(qpUyFRVAb21Gym1LZUIw7%0iS?{ zEq3XO60Hyc5jC(VzEp^B3Q$uxwD=M5<+bAmO;qQh24lN=8x1_3T;Tv4! zPO%FY{0^}f6Fuq3rJ%Kt5IIe$1&UNtZ8iT+v>zY(h;qI0nB)-6@%TCV zYEm#VfAP!GUuiz^>ziAkzmI z5YA)&Jt{=Npk0((!N0`!VHIu^@jwCa&wU6$%lo2z@_U=$oWPHe>Ydav`)wt^G2Sd0&)cd_$!ACKtKX9s-@btKWnnir8?+@-SCGeH(#y|x=P9K2(O;7GC4qq8!h{z z>z;sZf;Y6kr(pi>pO4xyQDZv~0-WCt|Et+^ zysO>DkgPm(;bQ07kV{X?6Wz2HAL6I>==>p0!rz-$H}$l=3aETxmgbSa=YFZ1A8-}v zjp5==9+VgvRHxrJjLJ4i%304buBQR9$D^M>t733zZ}SNU!dN;H`1v?(h~Ah`vsphU zzA`;oka;b=(<@gwW>8k9DvoEGCFf_ir2CNLB?D0qCnm&W2=YWa{qC1*w;CmM&1y%} z<+=DwOu{BaeE{K4Fdgnr$TK~CEd!XP^fy~8)428hT9vO)_L_P0U2T0{`ah=sY6e^k zj;=`T+%l+oM*Yn$v#jSWcN1s>|B5c*L$^FsoqDo!>yH;z0Pm%9e%I$2hmHAM%YHHHLGgt!p5c!mmjZ{sa}5P{D^#Y#qBc1h8OBzy+8uGh%_=xs zL-xN*X0ztfSDfo54Jr+73}dQchisRp(rNxQghr^i=k$2*Xj5hITvW%`MCN-bj5V_RsC|`_))$3oAxL5RpNZ|jO(XX&Yk8RNg5q0 z-S4xNS-ku2R7Ac>b*;~slo5@e0a3&i0UiKJ`x#%A+e@(hCwYy5l3VnHDN7Gz=u1WU@`CdhAk>~B)G3F{D0 z!XQ5TN==H?fWbFfbas@Yx4v@Yh4c?}kd)P#nq`ho8C@GOuPwVj`1R>`HI31ezuD+I zWBX<4@J&}eUc!U-tb;Cs{he^%ZquJVi9F@Vd_uhB#--EF2O*{VjJCRvL*uC~_Z$f_ zdWr##g2*C{T{c8Epe#+le00yQT{k0n8Ge@6Hl$+`Rue?nVd7n-(q@{TcGO2NVv@MS zS8P#S#|{|~!Z?nocoE!|lObs*9%UodMiF&nCkrymw^GRR_4pyDaOd36*?PskO*f*v zCyG7#-r#Op$Sh_+PuoggV}?$Kr(%F4T7$~n_bk0gt$b>NxGgcj&#-Z$B9Q`8m8f#Z zwk_rDzp;d%Ol5BYPG{k_T@)u$S>_ivP}%!+_K7wpF_<3ca%3H{g+FZ7w_$k9XBqBi z(=UCP&X_P3$cuRMRC0`Txle>Lzt+vr3NK^Z>Z>Q3D!+fRwNUTdqmnD1UaKrj^;vF9 zU}yjrP5rvum}HP?rt(AUyQaB1n=2IC3bK6J6eo`NF?4>8Md%}Dh1d*W>jd?tJej;G zqeogpqPVep^mkq)pE^`a1as?0E8B~+tl1Dd=oD|77sDFHZWae&8%!rVp3YqS;j?(4 zc61>1kM7}dDl5LX-qdb2T(KDo(f5UM9M&_?k0rDaWD;2}XE=Qnp6~ii8P&B)Ol;Ua zY<&@Gsij!R`M`Z>p3us`pu$z3Zldk3g_Xu|+}Q$QmWhfLB34%?T-6S5;M1lYU_p*` zJ-Mo(+Yhy&)yvn+E#jc!X?#k9pR6QVd5*&RDGpC0V4;nUUHJ*v`ts~^0z@h|OM5C@ z{VmDErq6UV0pntM(R7#?zl7KWtd|nIJSv@lECa$4d@okd?OT@DuvOX4|Jr6nD!dSw z1Ar`(gMrLkG*yhtb0LcGFpa8{o;8d5JU=``quEW4o-czz&xc>s!0!pS$$(vkFg0u> za)bHga}DcYeJ>g0A;7G17^=ZS_zmTKQBKh@v%$4p;%dC-+wd*a9><>gz2`$_n|_pV zq?CfXFO67f52voZh_^uo^2qYld~gk2+@xtlOuVxgWJ24NE*Ohbe!J;`@s8h@A|Ip< ziDR2HFpS-)AXQq{QA@8AZn2N9DvEf6=DlGBpeqq-X8S&yeepugr0QWusmHADn-