Merge branch 'feat/relative_umi' into feat/openarm_relative_ee

This commit is contained in:
Pepijn
2026-01-08 10:21:36 +01:00
5 changed files with 601 additions and 0 deletions
+321
View File
@@ -0,0 +1,321 @@
#!/usr/bin/env python
"""
OpenArms Policy Evaluation with Relative Actions
Two modes supported (based on training config):
Mode 1: Relative actions only (use_relative_state=False)
- Policy outputs relative action deltas
- State input is absolute
Mode 2: Relative actions + state (use_relative_state=True)
- Policy outputs relative action deltas
- State input is also converted to relative
Example usage:
python examples/openarms/evaluate_relative.py
"""
import time
from pathlib import Path
import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.configs.policies import PreTrainedConfig
from lerobot.configs.train import TrainPipelineConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.utils import build_dataset_frame, combine_feature_dicts
from lerobot.policies.factory import make_policy, make_pre_post_processors
from lerobot.policies.utils import predict_action
from lerobot.processor import make_default_processors
from lerobot.processor.core import RobotAction
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.utils.constants import ACTION, OBS_STR
from lerobot.utils.control_utils import init_keyboard_listener, precise_sleep
from lerobot.utils.device_utils import get_safe_torch_device
from lerobot.utils.relative_actions import (
convert_from_relative_actions_dict,
convert_state_to_relative,
PerTimestepNormalizer,
)
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
# Configuration
HF_MODEL_ID = "your-org/your-relative-policy"
HF_EVAL_DATASET_ID = "your-org/your-eval-dataset"
TASK_DESCRIPTION = "your task description"
NUM_EPISODES = 1
FPS = 30
EPISODE_TIME_SEC = 300
FOLLOWER_LEFT_PORT = "can0"
FOLLOWER_RIGHT_PORT = "can1"
CAMERA_CONFIG = {
"left_wrist": OpenCVCameraConfig(index_or_path="/dev/video5", width=640, height=480, fps=FPS),
"right_wrist": OpenCVCameraConfig(index_or_path="/dev/video1", width=640, height=480, fps=FPS),
"base": OpenCVCameraConfig(index_or_path="/dev/video3", width=640, height=480, fps=FPS),
}
def make_robot_action(action_values: dict, features: dict) -> RobotAction:
robot_action = {}
for key in features:
if key.startswith(ACTION + "."):
action_key = key.removeprefix(ACTION + ".")
if action_key in action_values:
robot_action[action_key] = action_values[action_key]
return robot_action
def load_relative_config(model_path: Path | str) -> tuple[PerTimestepNormalizer | None, bool]:
"""Load normalizer and relative_state setting from checkpoint."""
model_path = Path(model_path) if isinstance(model_path, str) else model_path
normalizer = None
use_relative_state = False
# Try local path first
if model_path.exists():
stats_path = model_path / "relative_stats.pt"
if stats_path.exists():
normalizer = PerTimestepNormalizer.load(stats_path)
print(f"Loaded per-timestep stats from: {stats_path}")
config_path = model_path / "train_config.json"
if config_path.exists():
cfg = TrainPipelineConfig.from_pretrained(model_path)
use_relative_state = getattr(cfg, "use_relative_state", False)
else:
# Try hub
try:
from huggingface_hub import hf_hub_download
stats_file = hf_hub_download(repo_id=str(model_path), filename="relative_stats.pt")
normalizer = PerTimestepNormalizer.load(stats_file)
print("Loaded per-timestep stats from hub")
config_file = hf_hub_download(repo_id=str(model_path), filename="train_config.json")
cfg = TrainPipelineConfig.from_pretrained(Path(config_file).parent)
use_relative_state = getattr(cfg, "use_relative_state", False)
except Exception as e:
print(f"Warning: Could not load relative config: {e}")
return normalizer, use_relative_state
def inference_loop_relative(
robot,
policy,
preprocessor,
postprocessor,
dataset,
events,
fps: int,
control_time_s: float,
single_task: str,
display_data: bool = True,
state_key: str = "observation.state",
relative_normalizer: PerTimestepNormalizer | None = None,
use_relative_state: bool = False,
):
"""
Inference loop for relative action policies.
If use_relative_state=True, also converts observation state to relative.
"""
device = get_safe_torch_device(policy.config.device)
timestamp = 0
start_t = time.perf_counter()
while timestamp < control_time_s:
loop_start = time.perf_counter()
if events["exit_early"] or events["stop_recording"]:
break
obs = robot.get_observation()
observation_frame = build_dataset_frame(dataset.features, obs, prefix=OBS_STR)
current_pos = {k: v for k, v in obs.items() if k.endswith(".pos")}
# Convert state to relative if using full UMI mode
if use_relative_state and state_key in observation_frame:
state_tensor = observation_frame[state_key]
if isinstance(state_tensor, torch.Tensor):
observation_frame[state_key] = convert_state_to_relative(state_tensor)
# Policy inference (outputs normalized relative actions)
action_values = predict_action(
observation=observation_frame,
policy=policy,
device=device,
preprocessor=preprocessor,
postprocessor=postprocessor,
use_amp=policy.config.use_amp,
task=single_task,
robot_type=robot.robot_type,
)
# Unnormalize actions
if relative_normalizer is not None:
action_keys = [k for k in action_values.keys() if not k.startswith("task")]
action_tensor = torch.tensor([[action_values[k] for k in action_keys]])
action_tensor = action_tensor.unsqueeze(1)
action_unnorm = relative_normalizer.unnormalize(action_tensor)
for i, k in enumerate(action_keys):
action_values[k] = action_unnorm[0, 0, i].item()
# Convert to absolute
relative_action = make_robot_action(action_values, dataset.features)
absolute_action = convert_from_relative_actions_dict(relative_action, current_pos)
robot.send_action(absolute_action)
if dataset is not None:
action_frame = build_dataset_frame(dataset.features, absolute_action, prefix=ACTION)
frame = {**observation_frame, **action_frame, "task": single_task}
dataset.add_frame(frame)
if display_data:
log_rerun_data(observation=obs, action=absolute_action)
dt = time.perf_counter() - loop_start
precise_sleep(1 / fps - dt)
timestamp = time.perf_counter() - start_t
def main():
print("=" * 60)
print(" OpenArms Evaluation - Relative Actions")
print("=" * 60)
print(f"\nModel: {HF_MODEL_ID}")
print(f"Dataset: {HF_EVAL_DATASET_ID}")
print(f"Episodes: {NUM_EPISODES}, Duration: {EPISODE_TIME_SEC}s")
# Load relative action config
relative_normalizer, use_relative_state = load_relative_config(HF_MODEL_ID)
mode = "actions + state" if use_relative_state else "actions only"
print(f"Mode: relative {mode}")
# Setup robot
follower_config = OpenArmsFollowerConfig(
port_left=FOLLOWER_LEFT_PORT,
port_right=FOLLOWER_RIGHT_PORT,
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0,
cameras=CAMERA_CONFIG,
)
follower = OpenArmsFollower(follower_config)
follower.connect(calibrate=False)
if not follower.is_connected:
raise RuntimeError("Robot failed to connect!")
teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors()
action_features_hw = {k: v for k, v in follower.action_features.items() if k.endswith(".pos")}
dataset_features = combine_feature_dicts(
aggregate_pipeline_dataset_features(
pipeline=teleop_action_processor,
initial_features=create_initial_features(action=action_features_hw),
use_videos=True,
),
aggregate_pipeline_dataset_features(
pipeline=robot_observation_processor,
initial_features=create_initial_features(observation=follower.observation_features),
use_videos=True,
),
)
dataset_path = Path.home() / ".cache" / "huggingface" / "lerobot" / HF_EVAL_DATASET_ID
if dataset_path.exists():
print(f"\nDataset exists at: {dataset_path}")
if input("Continue? (y/n): ").strip().lower() != 'y':
follower.disconnect()
return
dataset = LeRobotDataset.create(
repo_id=HF_EVAL_DATASET_ID,
fps=FPS,
features=dataset_features,
robot_type=follower.name,
use_videos=True,
image_writer_processes=0,
image_writer_threads=12,
)
policy_config = PreTrainedConfig.from_pretrained(HF_MODEL_ID)
policy_config.pretrained_path = HF_MODEL_ID
policy = make_policy(policy_config, ds_meta=dataset.meta)
preprocessor, postprocessor = make_pre_post_processors(
policy_cfg=policy.config,
pretrained_path=HF_MODEL_ID,
dataset_stats=dataset.meta.stats,
preprocessor_overrides={"device_processor": {"device": str(policy.config.device)}},
)
listener, events = init_keyboard_listener()
init_rerun(session_name="openarms_eval_relative")
episode_idx = 0
print("\nControls: ESC=stop, →=next episode, ←=rerecord")
try:
while episode_idx < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Episode {episode_idx + 1} of {NUM_EPISODES}")
inference_loop_relative(
robot=follower,
policy=policy,
preprocessor=preprocessor,
postprocessor=postprocessor,
dataset=dataset,
events=events,
fps=FPS,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
relative_normalizer=relative_normalizer,
use_relative_state=use_relative_state,
)
if events.get("rerecord_episode", False):
log_say("Re-recording")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
if dataset.episode_buffer is not None and dataset.episode_buffer.get("size", 0) > 0:
print(f"Saving episode {episode_idx + 1}...")
dataset.save_episode()
episode_idx += 1
events["exit_early"] = False
if not events["stop_recording"] and episode_idx < NUM_EPISODES:
input("Press ENTER for next episode...")
print(f"\nDone! {episode_idx} episodes recorded")
log_say("Complete", blocking=True)
except KeyboardInterrupt:
print("\n\nInterrupted")
finally:
follower.disconnect()
if listener is not None:
listener.stop()
dataset.finalize()
print("Uploading to Hub...")
dataset.push_to_hub(private=True)
if __name__ == "__main__":
main()
+59
View File
@@ -0,0 +1,59 @@
#!/usr/bin/env python
"""Unify all tasks in a dataset to a single task."""
import argparse
import json
from pathlib import Path
import pandas as pd
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.utils import write_tasks
def unify_tasks(repo_id: str, new_task: str):
"""Set all episodes to use a single task."""
print(f"Loading dataset: {repo_id}")
dataset = LeRobotDataset(repo_id)
root = dataset.root
print(f"Current tasks: {list(dataset.meta.tasks['task']) if dataset.meta.tasks is not None else []}")
# 1. Update tasks.parquet to have only one task
tasks_df = pd.DataFrame({"task": [new_task]})
write_tasks(tasks_df, root)
print(f"Set single task: '{new_task}'")
# 2. Update all data parquet files to set task_index=0
data_dir = root / "data"
parquet_files = sorted(data_dir.glob("*/*.parquet"))
for parquet_path in parquet_files:
df = pd.read_parquet(parquet_path)
df["task_index"] = 0
df.to_parquet(parquet_path)
print(f"Updated: {parquet_path.relative_to(root)}")
# 3. Update info.json
info_path = root / "info.json"
with open(info_path) as f:
info = json.load(f)
info["total_tasks"] = 1
with open(info_path, "w") as f:
json.dump(info, f, indent=2)
print(f"\nDone! All {dataset.meta.total_episodes} episodes now use task: '{new_task}'")
print(f"\nTo push: huggingface-cli upload {repo_id} {root} --repo-type dataset")
def main():
parser = argparse.ArgumentParser(description="Unify all tasks in a dataset to a single task")
parser.add_argument("--repo_id", type=str, required=True, help="Dataset repo_id")
parser.add_argument("--task", type=str, required=True, help="New task description")
args = parser.parse_args()
unify_tasks(args.repo_id, args.task)
if __name__ == "__main__":
main()
+11
View File
@@ -66,6 +66,17 @@ class TrainPipelineConfig(HubMixin):
eval: EvalConfig = field(default_factory=EvalConfig)
wandb: WandBConfig = field(default_factory=WandBConfig)
# UMI-style relative actions with per-timestep normalization
# Mode 1: use_relative_actions=True, use_relative_state=False
# - Actions: relative to current position + per-timestep normalized
# - State: absolute (unchanged)
# Mode 2: use_relative_actions=True, use_relative_state=True (full UMI)
# - Actions: relative to current position + per-timestep normalized
# - State: relative to current position (provides velocity info)
# Stats are computed automatically from first 1000 batches at training start
use_relative_actions: bool = False
use_relative_state: bool = False
# RA-BC (Reward-Aligned Behavior Cloning) parameters
use_rabc: bool = False # Enable reward-weighted training
rabc_progress_path: str | None = None # Path to precomputed SARM progress parquet file
+51
View File
@@ -13,6 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import time
from contextlib import nullcontext
@@ -46,6 +47,11 @@ from lerobot.utils.train_utils import (
save_checkpoint,
update_last_checkpoint,
)
from lerobot.utils.relative_actions import (
convert_to_relative_actions,
compute_relative_action_stats,
PerTimestepNormalizer,
)
from lerobot.utils.utils import (
format_big_number,
has_method,
@@ -298,6 +304,41 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None):
device=device,
)
# Compute per-timestep normalizer for relative actions
relative_normalizer = None
if cfg.use_relative_actions:
mode = "actions + state" if cfg.use_relative_state else "actions only"
cfg.output_dir.mkdir(parents=True, exist_ok=True)
stats_path = cfg.output_dir / "relative_stats.pt"
if is_main_process:
logging.info(colored(f"Relative mode: {mode}", "cyan", attrs=["bold"]))
if stats_path.exists():
logging.info(f"Loading pre-computed stats from: {stats_path}")
else:
logging.info("Computing per-timestep stats (first 1000 batches)...")
logging.info("Using fresh dataset to avoid video decoder state issues...")
# Create separate dataset instance to avoid corrupting main dataset's video decoders
stats_dataset = make_dataset(cfg)
temp_loader = torch.utils.data.DataLoader(
stats_dataset, batch_size=cfg.batch_size, shuffle=False, num_workers=0
)
mean, std = compute_relative_action_stats(temp_loader, num_batches=1000)
del temp_loader, stats_dataset
gc.collect()
torch.save({"mean": mean, "std": std}, stats_path)
logging.info(f"Saved stats to: {stats_path}")
# Poll for stats file instead of using NCCL barrier (avoids timeout during long computation)
if not is_main_process:
while not stats_path.exists():
time.sleep(5)
data = torch.load(stats_path, weights_only=True, map_location="cpu")
relative_normalizer = PerTimestepNormalizer(data["mean"], data["std"])
accelerator.wait_for_everyone() # Sync after everyone has loaded
step = 0 # number of policy updates (forward + backward + optim)
if cfg.resume:
@@ -385,6 +426,13 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None):
start_time = time.perf_counter()
batch = next(dl_iter)
batch = preprocessor(batch)
# Convert to relative actions (and optionally state) if enabled
if cfg.use_relative_actions:
batch = convert_to_relative_actions(batch, convert_state=cfg.use_relative_state)
if relative_normalizer is not None:
batch["action"] = relative_normalizer.normalize(batch["action"])
train_tracker.dataloading_s = time.perf_counter() - start_time
train_tracker, output_dict = update_policy(
@@ -439,6 +487,9 @@ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None):
preprocessor=preprocessor,
postprocessor=postprocessor,
)
# Save relative action stats with checkpoint
if relative_normalizer is not None:
relative_normalizer.save(checkpoint_dir / "relative_stats.pt")
update_last_checkpoint(checkpoint_dir)
if wandb_logger:
wandb_logger.log_policy(checkpoint_dir)
+159
View File
@@ -0,0 +1,159 @@
"""
UMI-style relative actions with per-timestep normalization.
Two modes supported:
Mode 1: Relative actions only (use_relative_state=False)
- Actions converted to relative, state stays absolute
Mode 2: Relative actions + state (use_relative_state=True, full UMI)
- Both actions and state converted to relative
Per-timestep normalization (TRI LBM / BEHAVIOR style):
Training: action_norm[t] = (action_rel[t] - mean[t]) / std[t]
Inference: action_rel[t] = action_norm[t] * std[t] + mean[t]
"""
import torch
from pathlib import Path
class PerTimestepNormalizer:
"""Per-timestep normalization using precomputed dataset statistics."""
def __init__(self, mean: torch.Tensor, std: torch.Tensor, eps: float = 1e-8):
self.mean = mean
self.std = std
self.eps = eps
self._cache = {} # Cache for device/dtype converted tensors
def _get_stats(self, device, dtype):
"""Get cached stats for device/dtype, or create and cache them."""
key = (device, dtype)
if key not in self._cache:
self._cache[key] = (
self.mean.to(device, dtype),
self.std.to(device, dtype),
)
return self._cache[key]
def normalize(self, x: torch.Tensor) -> torch.Tensor:
mean, std = self._get_stats(x.device, x.dtype)
if x.dim() == 3 and mean.dim() == 2:
mean, std = mean.unsqueeze(0), std.unsqueeze(0)
return (x - mean) / (std + self.eps)
def unnormalize(self, x: torch.Tensor) -> torch.Tensor:
mean, std = self._get_stats(x.device, x.dtype)
if x.dim() == 3 and mean.dim() == 2:
mean, std = mean.unsqueeze(0), std.unsqueeze(0)
return x * (std + self.eps) + mean
def save(self, path: Path | str):
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
torch.save({"mean": self.mean.cpu(), "std": self.std.cpu(), "eps": self.eps}, path)
@classmethod
def load(cls, path: Path | str) -> "PerTimestepNormalizer":
data = torch.load(path, weights_only=True, map_location="cpu")
return cls(data["mean"], data["std"], data.get("eps", 1e-8))
def compute_relative_action_stats(
dataloader,
state_key: str = "observation.state",
num_batches: int | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Compute per-timestep mean/std from relative actions."""
all_rel = []
for i, batch in enumerate(dataloader):
if num_batches is not None and i >= num_batches:
break
action, state = batch["action"], batch[state_key]
current_pos = state[:, -1, :] if state.dim() == 3 else state
min_dim = min(action.shape[-1], current_pos.shape[-1])
rel = action.clone()
rel[..., :min_dim] -= current_pos[:, None, :min_dim]
all_rel.append(rel)
all_rel = torch.cat(all_rel, dim=0)
return all_rel.mean(dim=0), all_rel.std(dim=0).clamp(min=1e-6)
def convert_to_relative(
batch: dict,
state_key: str = "observation.state",
convert_state: bool = True,
) -> dict:
"""
Convert actions (and optionally state) to relative.
Args:
batch: Training batch with "action" and state_key
state_key: Key for observation state
convert_state: If True, also convert state to relative (full UMI mode)
"""
if "action" not in batch or state_key not in batch:
return batch
action = batch["action"]
state = batch[state_key]
batch = batch.copy()
# Get current position as reference
current_pos = state[:, -1, :] if state.dim() == 3 else state
# Convert state if requested
if convert_state:
if state.dim() == 3:
batch[state_key] = state - current_pos[:, None, :]
else:
batch[state_key] = torch.zeros_like(state)
# Convert actions to relative
min_dim = min(action.shape[-1], current_pos.shape[-1])
rel_action = action.clone()
rel_action[..., :min_dim] -= current_pos[:, None, :min_dim]
batch["action"] = rel_action
return batch
# Backward compatibility alias
convert_to_relative_actions = convert_to_relative
def convert_state_to_relative(state: torch.Tensor) -> torch.Tensor:
"""Convert state to relative (for inference with use_relative_state=True)."""
if state.dim() == 1:
return torch.zeros_like(state)
current_pos = state[-1, :] if state.dim() == 2 else state[:, -1, :]
if state.dim() == 2:
return state - current_pos[None, :]
return state - current_pos[:, None, :]
def convert_from_relative_actions(
relative_actions: torch.Tensor,
current_pos: torch.Tensor,
) -> torch.Tensor:
"""Convert relative actions back to absolute for robot execution."""
current_pos = current_pos.to(relative_actions.device, relative_actions.dtype)
min_dim = min(relative_actions.shape[-1], current_pos.shape[-1])
absolute = relative_actions.clone()
if relative_actions.dim() == 2:
absolute[..., :min_dim] += current_pos[:min_dim]
elif relative_actions.dim() == 3:
absolute[..., :min_dim] += current_pos[None, None, :min_dim]
else:
absolute[..., :min_dim] += current_pos[:min_dim]
return absolute
def convert_from_relative_actions_dict(
relative_actions: dict[str, float],
current_pos: dict[str, float],
) -> dict[str, float]:
"""Convert relative actions back to absolute (dict version for inference)."""
return {k: v + current_pos.get(k, 0.0) for k, v in relative_actions.items()}