add pid ramp

This commit is contained in:
croissant
2025-11-03 19:23:24 +01:00
committed by Michel Aractingi
parent fff719cb4f
commit 6808a42455
11 changed files with 1190 additions and 133 deletions
@@ -0,0 +1,395 @@
"""
OpenArms Dataset Recording with Gravity + Friction Compensation
Records a dataset using OpenArms follower robot with leader teleoperator.
Leader arms have gravity and friction compensation for weightless, easy movement.
Includes 3 cameras: left wrist, right wrist, and base camera.
Uses the same compensation approach as teleop_with_compensation.py
"""
import shutil
import time
from pathlib import Path
import numpy as np
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.teleoperators.openarms.config_openarms_leader import OpenArmsLeaderConfig
from lerobot.teleoperators.openarms.openarms_leader import OpenArmsLeader
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
# Recording parameters
NUM_EPISODES = 1
FPS = 30
EPISODE_TIME_SEC = 600
RESET_TIME_SEC = 120
TASK_DESCRIPTION = "OpenArms task description"
# Friction compensation scale factor (1.0 = full, 0.3 = 30% for stability)
FRICTION_SCALE = 1.0
def record_loop_with_compensation(
robot,
leader,
events,
fps,
dataset,
dataset_features,
control_time_s,
single_task,
display_data=True,
):
"""
Custom record loop that applies gravity + friction compensation to leader.
Based on record_loop but with integrated compensation.
"""
dt = 1 / fps
episode_start_time = time.perf_counter()
# All joints (both arms)
all_joints = []
for motor in leader.bus_right.motors:
all_joints.append(f"right_{motor}")
for motor in leader.bus_left.motors:
all_joints.append(f"left_{motor}")
while True:
loop_start = time.perf_counter()
elapsed = loop_start - episode_start_time
# Check if we should exit
if elapsed >= control_time_s or events["exit_early"] or events["stop_recording"]:
break
# Get leader state
leader_action = leader.get_action()
# Extract positions and velocities in degrees
leader_positions_deg = {}
leader_velocities_deg_per_sec = {}
for motor in leader.bus_right.motors:
pos_key = f"right_{motor}.pos"
vel_key = f"right_{motor}.vel"
if pos_key in leader_action:
leader_positions_deg[f"right_{motor}"] = leader_action[pos_key]
if vel_key in leader_action:
leader_velocities_deg_per_sec[f"right_{motor}"] = leader_action[vel_key]
for motor in leader.bus_left.motors:
pos_key = f"left_{motor}.pos"
vel_key = f"left_{motor}.vel"
if pos_key in leader_action:
leader_positions_deg[f"left_{motor}"] = leader_action[pos_key]
if vel_key in leader_action:
leader_velocities_deg_per_sec[f"left_{motor}"] = leader_action[vel_key]
# Calculate gravity torques for leader using built-in method
leader_positions_rad = {k: np.deg2rad(v) for k, v in leader_positions_deg.items()}
leader_gravity_torques_nm = leader._gravity_from_q(leader_positions_rad)
# Calculate friction torques for leader using built-in method
leader_velocities_rad_per_sec = {k: np.deg2rad(v) for k, v in leader_velocities_deg_per_sec.items()}
leader_friction_torques_nm = leader._friction_from_velocity(
leader_velocities_rad_per_sec,
friction_scale=FRICTION_SCALE
)
# Combine gravity + friction torques
leader_total_torques_nm = {}
for motor_name in leader_gravity_torques_nm:
gravity = leader_gravity_torques_nm.get(motor_name, 0.0)
friction = leader_friction_torques_nm.get(motor_name, 0.0)
leader_total_torques_nm[motor_name] = gravity + friction
# Apply gravity + friction compensation to leader RIGHT arm (all joints including gripper)
for motor in leader.bus_right.motors:
full_name = f"right_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
# Get damping gain for stability
kd = leader.get_damping_kd(motor)
leader.bus_right._mit_control(
motor=motor,
kp=0.0,
kd=kd, # Add damping for stability
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque,
)
# Apply gravity + friction compensation to leader LEFT arm (all joints including gripper)
for motor in leader.bus_left.motors:
full_name = f"left_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
# Get damping gain for stability
kd = leader.get_damping_kd(motor)
leader.bus_left._mit_control(
motor=motor,
kp=0.0,
kd=kd, # Add damping for stability
position_degrees=position,
velocity_deg_per_sec=0.0,
torque=torque,
)
# Send leader positions to follower (both arms)
follower_action = {}
for joint in all_joints:
pos_key = f"{joint}.pos"
if pos_key in leader_action:
follower_action[pos_key] = leader_action[pos_key]
# Send action to robot
if follower_action:
robot.send_action(follower_action)
# Get observation from robot (includes camera images)
observation = robot.get_observation()
# Add to dataset if we have a dataset
if dataset is not None:
# Build properly formatted observation frame
obs_frame = build_dataset_frame(dataset_features, observation, prefix="observation")
# Build properly formatted action frame (keep .pos suffix - it matches the feature names)
action_frame = build_dataset_frame(dataset_features, follower_action, prefix="action")
# Combine into single frame
frame = {**obs_frame, **action_frame}
# Add metadata (task is required, timestamp will be auto-calculated by add_frame)
frame["task"] = single_task
dataset.add_frame(frame)
# Display data if requested
if display_data:
log_rerun_data(observation=observation, action=follower_action)
# Maintain loop rate
loop_duration = time.perf_counter() - loop_start
sleep_time = dt - loop_duration
if sleep_time > 0:
time.sleep(sleep_time)
def main():
"""Main recording loop with gravity compensation."""
print("=" * 70)
print("OpenArms Dataset Recording with Compensation")
print("=" * 70)
# Create camera configurations (3 cameras: left wrist, right wrist, base)
# Using actual device paths found by lerobot-find-cameras opencv
camera_config = {
"left_wrist": OpenCVCameraConfig(index_or_path="/dev/video0", width=640, height=480, fps=FPS),
"right_wrist": OpenCVCameraConfig(index_or_path="/dev/video1", width=640, height=480, fps=FPS),
"base": OpenCVCameraConfig(index_or_path="/dev/video7", width=640, height=480, fps=FPS),
}
# Configure follower robot with cameras
follower_config = OpenArmsFollowerConfig(
port_left="can2",
port_right="can3",
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0,
cameras=camera_config,
)
# Configure leader teleoperator (no cameras needed)
leader_config = OpenArmsLeaderConfig(
port_left="can0",
port_right="can1",
can_interface="socketcan",
id="openarms_leader",
manual_control=False, # Enable torque control for gravity compensation
)
# Initialize robot and teleoperator
print("\nInitializing devices...")
follower = OpenArmsFollower(follower_config)
leader = OpenArmsLeader(leader_config)
# Connect devices
print("Connecting and calibrating...")
follower.connect(calibrate=True)
leader.connect(calibrate=True)
# Verify URDF is loaded for gravity compensation
if leader.pin_robot is None:
raise RuntimeError("URDF model not loaded on leader. Gravity compensation not available.")
# Configure the dataset features
# For actions, we only want to record positions (not velocity or torque)
action_features_hw = {}
for key, value in follower.action_features.items():
if key.endswith(".pos"):
action_features_hw[key] = value
action_features = hw_to_dataset_features(action_features_hw, "action")
obs_features = hw_to_dataset_features(follower.observation_features, "observation")
dataset_features = {**action_features, **obs_features}
# Create the dataset
print("\nCreating dataset...")
repo_id = "<hf_username>/<dataset_repo_id>" # TODO: Replace with your Hugging Face repo
# Check if dataset already exists and prompt user
dataset_path = Path.home() / ".cache" / "huggingface" / "lerobot" / repo_id
while dataset_path.exists():
print(f"\nDataset already exists at: {dataset_path}")
print("\nOptions:")
print(" 1. Overwrite existing dataset")
print(" 2. Use a different name")
print(" 3. Abort")
choice = input("\nEnter your choice (1/2/3): ").strip()
if choice == '1':
print(f"Removing existing dataset...")
shutil.rmtree(dataset_path)
print("✓ Existing dataset removed")
break
elif choice == '2':
print("\nCurrent repo_id:", repo_id)
new_repo_id = input("Enter new repo_id (format: <username>/<dataset_name>): ").strip()
if new_repo_id and '/' in new_repo_id:
repo_id = new_repo_id
dataset_path = Path.home() / ".cache" / "huggingface" / "lerobot" / repo_id
print(f"✓ Using new repo_id: {repo_id}")
# Loop will continue if this new path also exists
else:
print("Invalid repo_id format. Please use format: <username>/<dataset_name>")
elif choice == '3':
print("Aborting. Please remove the existing dataset manually or restart with a different repo_id.")
follower.disconnect()
leader.disconnect()
return
else:
print("Invalid choice. Please enter 1, 2, or 3.")
dataset = LeRobotDataset.create(
repo_id=repo_id,
fps=FPS,
features=dataset_features,
robot_type=follower.name,
use_videos=True,
image_writer_threads=4,
)
# Initialize keyboard listener and visualization
_, events = init_keyboard_listener()
init_rerun(session_name="openarms_recording")
# Enable motors on both leader arms for gravity compensation
leader.bus_right.enable_torque()
leader.bus_left.enable_torque()
time.sleep(0.1)
print("\n" + "=" * 70)
print(f"Recording {NUM_EPISODES} episodes")
print(f"Task: {TASK_DESCRIPTION}")
print("=" * 70)
print("\nLeader BOTH arms: Gravity + Friction comp | Follower BOTH arms: Teleop")
print("\nKeyboard controls:")
print(" - Press 'q' to stop recording")
print(" - Press 'r' to re-record current episode")
print("=" * 70)
episode_idx = 0
try:
while episode_idx < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}")
# Record episode with compensation active
record_loop_with_compensation(
robot=follower,
leader=leader,
events=events,
fps=FPS,
dataset=dataset,
dataset_features=dataset_features,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
)
# Reset the environment if not stopping or re-recording
if not events["stop_recording"] and (episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]):
log_say("Reset the environment")
record_loop_with_compensation(
robot=follower,
leader=leader,
events=events,
fps=FPS,
dataset=None, # Don't save reset period
dataset_features=dataset_features,
control_time_s=RESET_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
)
# Handle re-recording
if events["rerecord_episode"]:
log_say("Re-recording episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Only save episode if frames were recorded
if dataset.episode_buffer is not None and dataset.episode_buffer["size"] > 0:
dataset.save_episode()
episode_idx += 1
else:
log_say("No frames recorded, skipping episode save")
# Clear the empty buffer
dataset.episode_buffer = None
except KeyboardInterrupt:
print("\n\nStopping recording...")
finally:
# Clean up
log_say("Stop recording")
try:
leader.bus_right.disable_torque()
leader.bus_left.disable_torque()
time.sleep(0.1)
leader.disconnect()
follower.disconnect()
print("✓ Shutdown complete")
except Exception as e:
print(f"Shutdown error: {e}")
# Upload dataset
print("\nUploading dataset to Hugging Face Hub...")
try:
dataset.push_to_hub()
print("✓ Dataset uploaded successfully")
except Exception as e:
print(f"Warning: Failed to upload dataset: {e}")
print("You can manually upload later using: dataset.push_to_hub()")
print("✓ Recording complete!")
if __name__ == "__main__":
main()
+166
View File
@@ -0,0 +1,166 @@
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenArms Dataset Replay Example
Replays position actions from a recorded dataset on an OpenArms follower robot.
Only position commands (ending with .pos) are replayed, not velocity or torque.
Example usage:
python examples/openarms/replay.py
"""
import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.robots.openarms.config_openarms_follower import OpenArmsFollowerConfig
from lerobot.robots.openarms.openarms_follower import OpenArmsFollower
from lerobot.utils.constants import ACTION
from lerobot.utils.robot_utils import busy_wait
from lerobot.utils.utils import log_say
# Configuration
EPISODE_IDX = 0
DATASET_REPO_ID = "lerobot-data-collection/replay-this-2025-11-02-17-58" # TODO: Replace with your dataset
DATASET_ROOT = None # Use default cache location, or specify custom path
# Robot configuration - adjust these to match your setup
ROBOT_CONFIG = OpenArmsFollowerConfig(
port_left="can2", # CAN interface for left arm
port_right="can3", # CAN interface for right arm
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
max_relative_target=10.0, # Safety limit: max degrees to move per step
)
def main():
"""Main replay function."""
print("=" * 70)
print("OpenArms Dataset Replay")
print("=" * 70)
print(f"\nDataset: {DATASET_REPO_ID}")
print(f"Episode: {EPISODE_IDX}")
print(f"Robot: {ROBOT_CONFIG.id}")
print(f" Left arm: {ROBOT_CONFIG.port_left}")
print(f" Right arm: {ROBOT_CONFIG.port_right}")
print("\n" + "=" * 70)
# Initialize the robot
print("\n[1/3] Initializing robot...")
robot = OpenArmsFollower(ROBOT_CONFIG)
# Load the dataset
print(f"\n[2/3] Loading dataset '{DATASET_REPO_ID}'...")
dataset = LeRobotDataset(
DATASET_REPO_ID,
root=DATASET_ROOT,
episodes=[EPISODE_IDX]
)
# Filter dataset to only include frames from the specified episode
# (required for dataset V3.0 where episodes are chunked)
episode_frames = dataset.hf_dataset.filter(
lambda x: x["episode_index"] == EPISODE_IDX
)
if len(episode_frames) == 0:
raise ValueError(
f"No frames found for episode {EPISODE_IDX} in dataset {DATASET_REPO_ID}"
)
print(f" Found {len(episode_frames)} frames in episode {EPISODE_IDX}")
# Extract action features from dataset
action_features = dataset.features.get(ACTION, {})
action_names = action_features.get("names", [])
# Filter to only position actions (ending with .pos)
position_action_names = [name for name in action_names if name.endswith(".pos")]
if not position_action_names:
raise ValueError(
f"No position actions found in dataset. Action names: {action_names}"
)
print(f" Found {len(position_action_names)} position actions to replay")
print(f" Actions: {', '.join(position_action_names[:5])}{'...' if len(position_action_names) > 5 else ''}")
# Select only action columns from dataset
actions = episode_frames.select_columns(ACTION)
# Connect to the robot
print(f"\n[3/3] Connecting to robot...")
robot.connect(calibrate=False) # Skip calibration for replay
if not robot.is_connected:
raise RuntimeError("Robot failed to connect!")
print("\n" + "=" * 70)
print("Ready to replay!")
print("=" * 70)
print("\nThe robot will replay the recorded positions.")
print("Press Ctrl+C to stop at any time.\n")
input("Press ENTER to start replaying...")
# Replay loop
log_say(f"Replaying episode {EPISODE_IDX}", blocking=True)
try:
for idx in range(len(episode_frames)):
loop_start = time.perf_counter()
# Extract action array from dataset
action_array = actions[idx][ACTION]
# Build action dictionary, but only include position actions
action = {}
for i, name in enumerate(action_names):
# Only include position actions (ending with .pos)
if name.endswith(".pos"):
action[name] = float(action_array[i])
# Send action to robot
robot.send_action(action)
# Maintain replay rate (use dataset fps)
loop_duration = time.perf_counter() - loop_start
dt_s = 1.0 / dataset.fps - loop_duration
busy_wait(dt_s)
# Progress indicator every 100 frames
if (idx + 1) % 100 == 0:
progress = (idx + 1) / len(episode_frames) * 100
print(f"Progress: {idx + 1}/{len(episode_frames)} frames ({progress:.1f}%)")
print(f"\n✓ Successfully replayed {len(episode_frames)} frames")
log_say("Replay complete", blocking=True)
except KeyboardInterrupt:
print("\n\nReplay interrupted by user")
finally:
# Disconnect robot
print("\nDisconnecting robot...")
robot.disconnect()
print("✓ Replay complete!")
if __name__ == "__main__":
main()
+7 -42
View File
@@ -14,8 +14,8 @@ from lerobot.teleoperators.openarms.config_openarms_leader import OpenArmsLeader
follower_config = OpenArmsFollowerConfig(
port_left="can0", # CAN interface for follower left arm
port_right="can1", # CAN interface for follower right arm
port_left="can2", # CAN interface for follower left arm
port_right="can3", # CAN interface for follower right arm
can_interface="socketcan", # Linux SocketCAN
id="openarms_follower",
disable_torque_on_disconnect=True,
@@ -24,8 +24,8 @@ follower_config = OpenArmsFollowerConfig(
leader_config = OpenArmsLeaderConfig(
port_left="can2", # CAN interface for leader left arm
port_right="can3", # CAN interface for leader right arm
port_left="can0", # CAN interface for leader left arm
port_right="can1", # CAN interface for leader right arm
can_interface="socketcan", # Linux SocketCAN
id="openarms_leader",
manual_control=True, # Enable manual control (torque disabled)
@@ -89,39 +89,23 @@ loop_times = []
start_time = time.perf_counter()
last_print_time = start_time
# Detailed timing accumulators
timing_stats = {
"leader_read": [],
"filter_data": [],
"follower_write": [],
}
try:
while True:
loop_start = time.perf_counter()
# Get action from leader
t0 = time.perf_counter()
leader_action = leader.get_action()
t1 = time.perf_counter()
timing_stats["leader_read"].append((t1 - t0) * 1000)
# Filter to only position data for all joints (both arms)
t2 = time.perf_counter()
joint_action = {}
for joint in all_joints:
pos_key = f"{joint}.pos"
if pos_key in leader_action:
joint_action[pos_key] = leader_action[pos_key]
t3 = time.perf_counter()
timing_stats["filter_data"].append((t3 - t2) * 1000)
# Send action to follower (both arms)
t4 = time.perf_counter()
if joint_action:
follower.send_action(joint_action)
t5 = time.perf_counter()
timing_stats["follower_write"].append((t5 - t4) * 1000)
# Measure loop time
loop_end = time.perf_counter()
@@ -138,31 +122,12 @@ try:
max_hz = 1.0 / min_time if min_time > 0 else 0
min_hz = 1.0 / max_time if max_time > 0 else 0
# Calculate average timing for each operation
timing_avgs = {
k: (sum(v) / len(v) if v else 0.0)
for k, v in timing_stats.items()
}
# Print detailed timing breakdown
print(f"\n{'='*70}")
print(f"Teleoperation - {current_hz:.1f} Hz | Total: {avg_time*1000:.1f} ms")
print(f"{'='*70}")
print(f" Leader Read: {timing_avgs['leader_read']:6.2f} ms")
print(f" Filter Data: {timing_avgs['filter_data']:6.2f} ms")
print(f" Follower Write: {timing_avgs['follower_write']:6.2f} ms")
print(f" ───────────────────────────────")
total_accounted = sum(timing_avgs.values())
print(f" Accounted: {total_accounted:6.2f} ms")
print(f" Unaccounted: {avg_time*1000 - total_accounted:6.2f} ms")
print(f" ───────────────────────────────")
print(f" Hz Range: {min_hz:.1f} - {max_hz:.1f} Hz")
print(f"{'='*70}\n")
print(f"[Hz Stats] Avg: {current_hz:.1f} Hz | "
f"Range: {min_hz:.1f}-{max_hz:.1f} Hz | "
f"Avg loop time: {avg_time*1000:.1f} ms")
# Reset for next measurement window
loop_times = []
for key in timing_stats:
timing_stats[key] = []
last_print_time = loop_end
except KeyboardInterrupt:
@@ -28,8 +28,8 @@ def main():
# Configuration
follower_config = OpenArmsFollowerConfig(
port_left="can0",
port_right="can1",
port_left="can2",
port_right="can3",
can_interface="socketcan",
id="openarms_follower",
disable_torque_on_disconnect=True,
@@ -37,8 +37,8 @@ def main():
)
leader_config = OpenArmsLeaderConfig(
port_left="can2",
port_right="can3",
port_left="can0",
port_right="can1",
can_interface="socketcan",
id="openarms_leader",
manual_control=False, # Enable torque control for gravity compensation
@@ -49,8 +49,8 @@ def main():
follower = OpenArmsFollower(follower_config)
leader = OpenArmsLeader(leader_config)
follower.connect(calibrate=True)
leader.connect(calibrate=True)
follower.connect()
leader.connect()
# URDF is automatically loaded in the leader constructor
if leader.pin_robot is None:
+200 -8
View File
@@ -39,13 +39,35 @@ h3 {
}
.container {
max-width: 1600px;
max-width: 1920px;
margin: 0 auto;
display: grid;
grid-template-columns: minmax(500px, 600px) 1fr;
gap: 2rem;
align-items: start;
}
/* Left column container */
.left-column {
display: flex;
flex-direction: column;
gap: 1.5rem;
}
/* Right column container */
.right-column {
display: flex;
flex-direction: column;
gap: 1.5rem;
}
/* Responsive: Stack on smaller screens */
@media (max-width: 1200px) {
.container {
grid-template-columns: 1fr;
}
}
.panel {
background: white;
border-radius: 8px;
@@ -129,6 +151,60 @@ h3 {
cursor: not-allowed;
}
.btn-zero {
background: #8b5cf6;
color: white;
border: none;
padding: 0.5rem 1rem;
border-radius: 4px;
font-size: 0.875rem;
font-weight: 500;
cursor: pointer;
transition: background 0.2s;
}
.btn-zero:hover:not(:disabled) {
background: #7c3aed;
}
.btn-zero:disabled {
background: #d1d5db;
cursor: not-allowed;
}
.zero-position-section {
margin-top: 1rem;
padding-top: 1rem;
border-top: 1px solid #e5e7eb;
}
.btn-zero-large {
width: 100%;
background: #8b5cf6;
color: white;
border: none;
padding: 0.875rem 1.5rem;
border-radius: 8px;
font-size: 1rem;
font-weight: 600;
cursor: pointer;
transition: all 0.2s;
box-shadow: 0 2px 4px rgba(139, 92, 246, 0.2);
}
.btn-zero-large:hover:not(:disabled) {
background: #7c3aed;
box-shadow: 0 4px 8px rgba(139, 92, 246, 0.3);
transform: translateY(-1px);
}
.btn-zero-large:disabled {
background: #d1d5db;
cursor: not-allowed;
box-shadow: none;
transform: none;
}
.btn-disconnect {
background: #ef4444;
color: white;
@@ -225,10 +301,9 @@ h3 {
}
.control-horizontal {
display: grid;
grid-template-columns: 2fr 1fr;
gap: 2rem;
align-items: start;
display: flex;
flex-direction: column;
gap: 1.5rem;
}
.control-left {
@@ -325,6 +400,108 @@ button {
color: #991b1b;
}
.status.recording.recording-active {
display: flex;
flex-direction: column;
gap: 1rem;
background: #dc2626;
color: white;
padding: 1.5rem;
border: 4px solid #991b1b;
box-shadow: 0 4px 12px rgba(220, 38, 38, 0.4);
font-weight: 700;
font-size: 1rem;
}
.status.recording.recording-active .indicator {
width: 20px;
height: 20px;
background: #fef2f2;
animation: pulse-strong 1s ease-in-out infinite;
}
@keyframes pulse-strong {
0%, 100% {
opacity: 1;
transform: scale(1);
}
50% {
opacity: 0.7;
transform: scale(1.1);
}
}
.status.recording.recording-active .time-display {
display: flex;
align-items: center;
gap: 0.75rem;
font-size: 1.5rem;
font-weight: 700;
color: white;
}
.status.recording.recording-active .btn-stop {
align-self: stretch;
}
.ramp-up-countdown {
display: flex;
justify-content: center;
margin-bottom: 1rem;
}
.countdown-box {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 2rem 3rem;
background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%);
border: 4px solid #f59e0b;
border-radius: 16px;
box-shadow: 0 6px 20px rgba(245, 158, 11, 0.4);
min-width: 280px;
animation: pulse-warm 1.5s ease-in-out infinite;
}
@keyframes pulse-warm {
0%, 100% {
box-shadow: 0 6px 20px rgba(245, 158, 11, 0.4);
}
50% {
box-shadow: 0 6px 25px rgba(245, 158, 11, 0.6);
}
}
.countdown-label {
font-size: 1rem;
color: #92400e;
text-transform: uppercase;
letter-spacing: 1.5px;
font-weight: 800;
margin-bottom: 1rem;
text-align: center;
}
.countdown-value {
font-size: 4.5rem;
font-weight: 900;
color: #d97706;
font-family: 'Courier New', monospace;
line-height: 1;
text-shadow: 2px 2px 6px rgba(0, 0, 0, 0.15);
margin-bottom: 0.5rem;
}
.countdown-subtitle {
font-size: 0.875rem;
color: #78350f;
font-weight: 600;
font-style: italic;
text-align: center;
margin-top: 0.5rem;
}
.status.idle {
background: #f3f4f6;
color: #374151;
@@ -421,10 +598,25 @@ select:disabled {
cursor: not-allowed;
}
.camera-grid {
/* Camera Layout */
.camera-layout {
display: flex;
flex-direction: column;
gap: 1.5rem;
}
.camera-base {
width: 100%;
}
.camera-wrist-container {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
gap: 1rem;
grid-template-columns: repeat(2, 1fr);
gap: 1.5rem;
}
.camera-wrist {
width: 100%;
}
.camera {
+71 -18
View File
@@ -17,6 +17,8 @@ function App() {
const [error, setError] = useState(null);
const [statusMessage, setStatusMessage] = useState('Ready');
const [uploadStatus, setUploadStatus] = useState(null);
const [rampUpRemaining, setRampUpRemaining] = useState(0);
const [movingToZero, setMovingToZero] = useState(false);
const [configExpanded, setConfigExpanded] = useState(false);
// Configuration
@@ -73,6 +75,8 @@ function App() {
setError(data.error);
setStatusMessage(data.status_message || 'Ready');
setUploadStatus(data.upload_status);
setRampUpRemaining(data.ramp_up_remaining || 0);
setMovingToZero(data.moving_to_zero || false);
if (data.config) {
// Only merge server config if we don't have a saved config (first load)
@@ -228,6 +232,21 @@ function App() {
}
};
// Move robot to zero position
const moveToZero = async () => {
setError(null);
try {
const response = await fetch(`${API_BASE}/robots/move-to-zero`, { method: 'POST' });
if (!response.ok) {
const data = await response.json();
throw new Error(data.detail || 'Failed to move to zero position');
}
await response.json();
} catch (e) {
setError(`Move to zero failed: ${e.message}`);
}
};
// Format time as MM:SS
const formatTime = (seconds) => {
const mins = Math.floor(seconds / 60);
@@ -270,8 +289,10 @@ function App() {
</header>
<div className="container">
{/* Configuration Panel */}
<section className="panel config-panel">
{/* Left Column: Configuration and Recording Control */}
<div className="left-column">
{/* Configuration Panel */}
<section className="panel config-panel">
<div
className="config-header"
onClick={() => setConfigExpanded(!configExpanded)}
@@ -491,9 +512,20 @@ function App() {
</button>
</div>
{/* Recording Status */}
{isRecording && (
<div className="status recording">
{/* Ramp-up Countdown */}
{isRecording && rampUpRemaining > 0 && (
<div className="ramp-up-countdown">
<div className="countdown-box">
<div className="countdown-label"> WARMING UP - PID RAMP-UP</div>
<div className="countdown-value">{rampUpRemaining.toFixed(1)}s</div>
<div className="countdown-subtitle">Recording will start automatically...</div>
</div>
</div>
)}
{/* Recording Status - Only show after ramp-up */}
{isRecording && rampUpRemaining <= 0 && (
<div className="status recording recording-active">
<div className="indicator"></div>
<span className="time-display">
{formatTime(elapsedTime)} @ {currentFps.toFixed(1)} FPS
@@ -517,6 +549,20 @@ function App() {
</div>
</div>
{/* Move to Zero Button */}
{robotsReady && !isRecording && !isInitializing && (
<div className="zero-position-section">
<button
onClick={moveToZero}
disabled={movingToZero}
className="btn-zero-large"
title="Move follower robot to zero position (2s with 60% gains)"
>
{movingToZero ? '⏳ Moving to Zero Position...' : '🎯 Move to Zero Position'}
</button>
</div>
)}
{/* Error Display */}
{error && (
<div className="error-box">
@@ -524,25 +570,31 @@ function App() {
</div>
)}
</section>
</div>
{/* Camera Feeds */}
<section className="panel cameras">
{/* Right Column: Camera Feeds */}
<div className="right-column">
<section className="panel cameras">
<h2>📹 Camera Views</h2>
{robotsReady || isRecording || isInitializing ? (
<div className="camera-grid">
<div className="camera">
<h3>Left Wrist</h3>
<img src={`${API_BASE}/camera/stream/left_wrist`} alt="Left Wrist Camera" />
</div>
<div className="camera">
<h3>Base</h3>
<div className="camera-layout">
{/* Base camera - full width */}
<div className="camera camera-base">
<h3>Base Camera</h3>
<img src={`${API_BASE}/camera/stream/base`} alt="Base Camera" />
</div>
<div className="camera">
<h3>Right Wrist</h3>
<img src={`${API_BASE}/camera/stream/right_wrist`} alt="Right Wrist Camera" />
{/* Wrist cameras - side by side */}
<div className="camera-wrist-container">
<div className="camera camera-wrist">
<h3>Left Wrist</h3>
<img src={`${API_BASE}/camera/stream/left_wrist`} alt="Left Wrist Camera" />
</div>
<div className="camera camera-wrist">
<h3>Right Wrist</h3>
<img src={`${API_BASE}/camera/stream/right_wrist`} alt="Right Wrist Camera" />
</div>
</div>
</div>
) : (
@@ -552,6 +604,7 @@ function App() {
</div>
)}
</section>
</div>
</div>
</main>
+18 -1
View File
@@ -71,7 +71,24 @@ echo ""
# 1. Start FastAPI backend (Rerun will start when recording begins)
echo -e "${BLUE}[1/2]${NC} Starting FastAPI backend on port 8000..."
cd "$SCRIPT_DIR"
python web_record_server.py > /tmp/openarms_backend.log 2>&1 &
# Use Python from current environment (if lerobot env is active, it will use that)
# Otherwise, check if we need to use conda run
if [[ "$CONDA_DEFAULT_ENV" == "lerobot" ]]; then
# Already in lerobot environment
echo -e "${GREEN}✓ Using active lerobot environment${NC}"
PYTHON_CMD="python"
elif command -v conda >/dev/null 2>&1 && conda env list | grep -q "^lerobot "; then
# lerobot env exists but not active - use conda run
echo -e "${YELLOW}Using conda run with lerobot environment...${NC}"
PYTHON_CMD="conda run -n lerobot --no-capture-output python"
else
# Fall back to system python
echo -e "${YELLOW}⚠ Warning: lerobot environment not found, using system python${NC}"
PYTHON_CMD="python"
fi
$PYTHON_CMD web_record_server.py > /tmp/openarms_backend.log 2>&1 &
BACKEND_PID=$!
sleep 3
@@ -56,6 +56,9 @@ recording_state = {
"status_message": "Ready",
"upload_status": None,
"current_fps": 0.0,
"ramp_up_remaining": 0.0, # Remaining seconds for PID ramp-up
"recording_started_time": None, # Time when actual recording starts (after ramp-up)
"moving_to_zero": False, # Whether robot is moving to zero position
"config": {
"leader_left": "can0",
"leader_right": "can1",
@@ -76,18 +79,26 @@ robot_instances = {
}
# Camera frames from recording loop (for streaming during recording)
camera_frames = {
# Store pre-encoded JPEG bytes with timestamps to track freshness and discard stale frames
camera_frames_jpeg = {
"left_wrist": None,
"right_wrist": None,
"base": None,
}
camera_frames_timestamp = {
"left_wrist": 0.0,
"right_wrist": 0.0,
"base": 0.0,
}
camera_frames_lock = threading.Lock()
# Maximum age for frames (in seconds) - discard frames older than this
MAX_FRAME_AGE = 0.2
# Recording control
recording_thread = None
stop_recording_flag = threading.Event()
FPS = 30
FRICTION_SCALE = 1.0
RAMP_UP_DURATION = 3.0 # Seconds to ramp up PID/torque from 0 to full
class RecordingConfig(BaseModel):
@@ -358,12 +369,13 @@ def record_loop_with_compensation():
print(f"[Recording] Error: Missing components")
return
print(f"[Recording] Starting recording loop...")
print(f"[Recording] Starting recording loop with ramp-up...")
dt = 1 / FPS
episode_start_time = time.perf_counter()
loop_start_time = time.perf_counter()
episode_start_time = None # Will be set after ramp-up completes
frame_count = 0
last_fps_update = episode_start_time
last_fps_update = loop_start_time
fps_frame_count = 0
# All joints (both arms)
@@ -376,15 +388,39 @@ def record_loop_with_compensation():
try:
while not stop_recording_flag.is_set():
loop_start = time.perf_counter()
elapsed = loop_start - episode_start_time
elapsed_total = loop_start - loop_start_time
# Calculate actual FPS every second
fps_frame_count += 1
if elapsed - (last_fps_update - episode_start_time) >= 1.0:
actual_fps = fps_frame_count / (elapsed - (last_fps_update - episode_start_time))
recording_state["current_fps"] = round(actual_fps, 1)
fps_frame_count = 0
last_fps_update = loop_start
# PID ramp-up: calculate ramp factor and completion status
ramp_up_remaining = max(0.0, RAMP_UP_DURATION - elapsed_total)
ramp_factor = max(0.0, min(1.0, 1.0 - (ramp_up_remaining / RAMP_UP_DURATION)))
is_ramp_up_complete = ramp_up_remaining <= 0.0
# Start episode timer after ramp-up completes
if is_ramp_up_complete and episode_start_time is None:
episode_start_time = loop_start
recording_state["recording_started_time"] = time.time() # Set timestamp for UI
print(f"[Recording] Ramp-up complete, starting data recording")
# Update ramp-up status for UI
recording_state["ramp_up_remaining"] = round(ramp_up_remaining, 2)
# Track if we're falling behind - if so, skip encoding to maintain FPS
# Check if previous loop took too long (indicating we're falling behind)
if hasattr(record_loop_with_compensation, '_last_loop_duration'):
# If previous loop took longer than frame time, we're falling behind - skip encoding
skip_encoding = record_loop_with_compensation._last_loop_duration > dt * 1.1
else:
skip_encoding = False
# Calculate actual FPS every second (only after ramp-up)
if is_ramp_up_complete:
elapsed = loop_start - episode_start_time
fps_frame_count += 1
if elapsed - (last_fps_update - episode_start_time) >= 1.0:
actual_fps = fps_frame_count / (elapsed - (last_fps_update - episode_start_time))
recording_state["current_fps"] = round(actual_fps, 1)
fps_frame_count = 0
last_fps_update = loop_start
# Get leader state
leader_action = leader.get_action()
@@ -426,11 +462,13 @@ def record_loop_with_compensation():
friction = leader_friction_torques_nm.get(motor_name, 0.0)
leader_total_torques_nm[motor_name] = gravity + friction
# Apply compensation to RIGHT arm
# Apply compensation to RIGHT arm with ramp-up scaling
for motor in leader.bus_right.motors:
full_name = f"right_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
torque_raw = leader_total_torques_nm.get(full_name, 0.0)
# Scale torque by ramp factor during ramp-up phase
torque = torque_raw * ramp_factor
kd = leader.get_damping_kd(motor)
leader.bus_right._mit_control(
@@ -440,11 +478,13 @@ def record_loop_with_compensation():
torque=torque,
)
# Apply compensation to LEFT arm
# Apply compensation to LEFT arm with ramp-up scaling
for motor in leader.bus_left.motors:
full_name = f"left_{motor}"
position = leader_positions_deg.get(full_name, 0.0)
torque = leader_total_torques_nm.get(full_name, 0.0)
torque_raw = leader_total_torques_nm.get(full_name, 0.0)
# Scale torque by ramp factor during ramp-up phase
torque = torque_raw * ramp_factor
kd = leader.get_damping_kd(motor)
leader.bus_left._mit_control(
@@ -454,7 +494,7 @@ def record_loop_with_compensation():
torque=torque,
)
# Send positions to follower
# Send positions to follower with ramped PID gains using send_action
follower_action = {}
for joint in all_joints:
pos_key = f"{joint}.pos"
@@ -462,35 +502,94 @@ def record_loop_with_compensation():
follower_action[pos_key] = leader_action[pos_key]
if follower_action:
follower.send_action(follower_action)
# Build custom ramped PID gains for all motors
motor_index = {
"joint_1": 0, "joint_2": 1, "joint_3": 2, "joint_4": 3,
"joint_5": 4, "joint_6": 5, "joint_7": 6, "gripper": 7,
}
custom_kp = {}
custom_kd = {}
# Calculate ramped gains for all motors in both arms
for motor in follower.bus_right.motors:
full_name = f"right_{motor}"
idx = motor_index.get(motor, 0)
kp_base = follower.config.position_kp[idx] if isinstance(follower.config.position_kp, list) else follower.config.position_kp
kd_base = follower.config.position_kd[idx] if isinstance(follower.config.position_kd, list) else follower.config.position_kd
custom_kp[full_name] = kp_base * ramp_factor
custom_kd[full_name] = kd_base * ramp_factor
for motor in follower.bus_left.motors:
full_name = f"left_{motor}"
idx = motor_index.get(motor, 0)
kp_base = follower.config.position_kp[idx] if isinstance(follower.config.position_kp, list) else follower.config.position_kp
kd_base = follower.config.position_kd[idx] if isinstance(follower.config.position_kd, list) else follower.config.position_kd
custom_kp[full_name] = kp_base * ramp_factor
custom_kd[full_name] = kd_base * ramp_factor
# Send action with custom ramped gains (includes safety checks)
follower.send_action(follower_action, custom_kp=custom_kp, custom_kd=custom_kd)
# Get observation
observation = follower.get_observation()
# Store camera frames for streaming (no extra camera reads needed!)
with camera_frames_lock:
# Get current timestamp for frame freshness tracking
frame_timestamp = time.perf_counter()
# Pre-encode camera frames as JPEG for streaming (minimizes lock time and encoding overhead)
# This avoids encoding the same frame multiple times for multiple stream endpoints
# OpenCV cameras configured with ColorMode.RGB return RGB frames (default config)
# Skip encoding if we're falling behind to maintain 30 FPS
encoded_frames = {}
# Only encode if we're not falling behind
if not skip_encoding:
for cam_name in ["left_wrist", "right_wrist", "base"]:
if cam_name in observation:
camera_frames[cam_name] = observation[cam_name].copy()
frame = observation[cam_name]
if frame is not None and len(frame.shape) == 3:
# Frame is already RGB from OpenCVCamera (ColorMode.RGB by default)
# Encode once, use many times - eliminates per-stream encoding overhead
# Quick encode - use lower quality if we're tight on time
encode_start = time.perf_counter()
success, jpeg_bytes = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 70])
encode_duration = time.perf_counter() - encode_start
# Only use encoded frame if encoding was fast enough (< 10ms)
if success and encode_duration < 0.01:
encoded_frames[cam_name] = jpeg_bytes.tobytes()
# If encoding took too long, skip this camera for this frame
# Add to dataset
try:
obs_frame = build_dataset_frame(dataset_features, observation, prefix="observation")
action_frame = build_dataset_frame(dataset_features, follower_action, prefix="action")
frame = {**obs_frame, **action_frame}
frame["task"] = task
dataset.add_frame(frame)
frame_count += 1
# Log progress every 5 seconds (150 frames @ 30 FPS)
if frame_count % 150 == 0:
print(f"[Recording] {frame_count} frames @ {recording_state['current_fps']} FPS")
except Exception as frame_error:
print(f"[Recording] Frame error: {frame_error}")
# Continue recording even if one frame fails
# Store encoded frames with timestamps with minimal lock time
with camera_frames_lock:
for cam_name, jpeg_bytes in encoded_frames.items():
camera_frames_jpeg[cam_name] = jpeg_bytes
camera_frames_timestamp[cam_name] = frame_timestamp
# Add to dataset ONLY after ramp-up completes
if is_ramp_up_complete:
try:
obs_frame = build_dataset_frame(dataset_features, observation, prefix="observation")
action_frame = build_dataset_frame(dataset_features, follower_action, prefix="action")
frame = {**obs_frame, **action_frame}
frame["task"] = task
dataset.add_frame(frame)
frame_count += 1
# Log progress every 5 seconds (150 frames @ 30 FPS)
if frame_count % 150 == 0:
print(f"[Recording] {frame_count} frames @ {recording_state['current_fps']} FPS")
except Exception as frame_error:
print(f"[Recording] Frame error: {frame_error}")
# Continue recording even if one frame fails
# Maintain loop rate - don't wait if behind, just continue
loop_duration = time.perf_counter() - loop_start
# Track loop duration for next iteration's encoding skip decision
record_loop_with_compensation._last_loop_duration = loop_duration
sleep_time = dt - loop_duration
# Only sleep if we're ahead of schedule (more than 2ms remaining)
@@ -551,6 +650,8 @@ async def start_recording(config: RecordingConfig):
recording_state["is_initializing"] = False
recording_state["is_recording"] = True
recording_state["start_time"] = time.time()
recording_state["recording_started_time"] = None # Will be set after ramp-up
recording_state["ramp_up_remaining"] = RAMP_UP_DURATION # Initialize ramp-up
stop_recording_flag.clear() # Clear the stop flag
# Start recording in background thread
@@ -586,6 +687,8 @@ async def stop_recording():
recording_state["is_recording"] = False
recording_state["status_message"] = "Stopping recording..."
recording_state["ramp_up_remaining"] = 0.0 # Reset ramp-up
recording_state["recording_started_time"] = None # Reset recording start time
# Stop the recording thread
stop_recording_flag.set()
@@ -749,12 +852,97 @@ async def disconnect_robots():
return {"status": "disconnected"}
@app.post("/api/robots/move-to-zero")
async def move_to_zero():
"""Move follower robot to zero position with reduced gains (for 2 seconds)."""
global recording_state
if recording_state["is_recording"] or recording_state["is_initializing"]:
raise HTTPException(status_code=400, detail="Cannot move to zero while recording")
if not robot_instances.get("follower"):
raise HTTPException(status_code=400, detail="Robots not initialized. Please setup robots first.")
if recording_state["moving_to_zero"]:
raise HTTPException(status_code=400, detail="Already moving to zero position")
try:
recording_state["moving_to_zero"] = True
recording_state["status_message"] = "Moving to zero position..."
follower = robot_instances["follower"]
# Motor name to index mapping
motor_index = {
"joint_1": 0, "joint_2": 1, "joint_3": 2, "joint_4": 3,
"joint_5": 4, "joint_6": 5, "joint_7": 6, "gripper": 7,
}
# Build zero position action with reduced gains (20% of original)
zero_action = {}
custom_kp = {}
custom_kd = {}
# Right arm - all motors to zero
for motor in follower.bus_right.motors:
full_name = f"right_{motor}"
zero_action[f"{full_name}.pos"] = 0.0
idx = motor_index.get(motor, 0)
kp_base = follower.config.position_kp[idx] if isinstance(follower.config.position_kp, list) else follower.config.position_kp
kd_base = follower.config.position_kd[idx] if isinstance(follower.config.position_kd, list) else follower.config.position_kd
custom_kp[full_name] = kp_base * 0.05
custom_kd[full_name] = kd_base
# Left arm - all motors to zero
for motor in follower.bus_left.motors:
full_name = f"left_{motor}"
zero_action[f"{full_name}.pos"] = 0.0
idx = motor_index.get(motor, 0)
kp_base = follower.config.position_kp[idx] if isinstance(follower.config.position_kp, list) else follower.config.position_kp
kd_base = follower.config.position_kd[idx] if isinstance(follower.config.position_kd, list) else follower.config.position_kd
custom_kp[full_name] = kp_base * 0.05
custom_kd[full_name] = kd_base
# Send zero position commands for 2 seconds at 30 FPS
duration = 2.0
fps = 30
dt = 1 / fps
start_time = time.perf_counter()
while time.perf_counter() - start_time < duration:
loop_start = time.perf_counter()
# Send action with reduced gains
follower.send_action(zero_action, custom_kp=custom_kp, custom_kd=custom_kd)
# Maintain loop rate
loop_duration = time.perf_counter() - loop_start
sleep_time = dt - loop_duration
if sleep_time > 0:
time.sleep(sleep_time)
recording_state["moving_to_zero"] = False
recording_state["status_message"] = "Moved to zero position" if recording_state["robots_ready"] else "Ready"
return {"status": "success", "message": "Robot moved to zero position"}
except Exception as e:
recording_state["moving_to_zero"] = False
recording_state["status_message"] = f"Error: {str(e)}"
raise HTTPException(status_code=500, detail=str(e))
@app.get("/api/status")
async def get_status():
"""Get current recording status."""
elapsed = 0
if recording_state["is_recording"] and recording_state["start_time"]:
elapsed = time.time() - recording_state["start_time"]
# Only show elapsed time after ramp-up completes
if recording_state["is_recording"] and recording_state["recording_started_time"]:
elapsed = time.time() - recording_state["recording_started_time"]
return {
"is_recording": recording_state["is_recording"],
@@ -769,6 +957,8 @@ async def get_status():
"error": recording_state["error"],
"status_message": recording_state["status_message"],
"upload_status": recording_state["upload_status"],
"ramp_up_remaining": recording_state["ramp_up_remaining"],
"moving_to_zero": recording_state["moving_to_zero"],
"config": recording_state["config"]
}
@@ -799,20 +989,30 @@ async def stream_camera(camera_name: str):
while True:
frame = None
# If recording, use frames from recording loop (zero contention!)
# If recording, use pre-encoded JPEG frames from recording loop (zero encoding, minimal lock time!)
if recording_state["is_recording"]:
with camera_frames_lock:
frame = camera_frames.get(camera_name)
# Acquire lock only to read the reference and timestamp, not to encode
jpeg_bytes = None
frame_timestamp = 0.0
current_time = time.perf_counter()
if frame is not None:
# Frame already in RGB from recording loop
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
_, buffer = cv2.imencode('.jpg', frame_rgb, [cv2.IMWRITE_JPEG_QUALITY, 70])
with camera_frames_lock:
jpeg_bytes = camera_frames_jpeg.get(camera_name)
frame_timestamp = camera_frames_timestamp.get(camera_name, 0.0)
# Only serve frames that are fresh (not stale)
frame_age = current_time - frame_timestamp
if jpeg_bytes is not None and frame_age <= MAX_FRAME_AGE:
# Frame is fresh and available - yield it
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() + b'\r\n')
time.sleep(0.033) # ~30 FPS (matches recording)
b'Content-Type: image/jpeg\r\n\r\n' + jpeg_bytes + b'\r\n')
time.sleep(0.1) # ~10 FPS for streaming (independent of recording FPS)
elif jpeg_bytes is not None and frame_age > MAX_FRAME_AGE:
# Frame is stale - skip it, wait a bit and try next frame
time.sleep(0.01)
continue
else:
# Wait a bit if frame not available yet
# No frame available yet - wait a bit
time.sleep(0.01)
continue
else: