remove unces

This commit is contained in:
Jade Choghari
2025-09-11 14:24:06 +02:00
parent 9784123463
commit 8567ab60d8
3 changed files with 2 additions and 55 deletions
+2 -1
View File
@@ -471,7 +471,8 @@ class LeRobotDataset(torch.utils.data.Dataset):
if self.episodes is not None and self.meta._version >= packaging.version.parse("v2.1"):
episodes_stats = [self.meta.episodes_stats[ep_idx] for ep_idx in self.episodes]
self.stats = aggregate_stats(episodes_stats)
# Load actual data
try:
if force_cache_sync:
raise FileNotFoundError
-53
View File
@@ -80,59 +80,6 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten
return return_observations
def preprocess_observation1(
observations: dict[str, np.ndarray], cfg: dict[str, Any] = None
) -> dict[str, Tensor]:
# TODO(aliberts, rcadene): refactor this to use features from the environment (no hardcoding)
"""Convert environment observation to LeRobot format observation.
Args:
observation: Dictionary of observation batches from a Gym vector environment.
Returns:
Dictionary of observation batches with keys renamed to LeRobot format and values as tensors.
"""
# map to expected inputs for the policy
return_observations = {}
image_key = list(cfg.image_features.keys())[0] if cfg else "observation.image"
state_key = cfg.robot_state_feature_key if cfg else "observation.state"
if "pixels" in observations:
if isinstance(observations["pixels"], dict):
# imgs = {f"{image_key}.{key}": img for key, img in observations["pixels"].items()}
imgs = observations["pixels"] # keys should be OBS_IMAGE, OBS_IMAGE_2, OBS_IMAGE_3
else:
imgs = {f"{image_key}": observations["pixels"]}
for imgkey, img in imgs.items():
# TODO(aliberts, rcadene): use transforms.ToTensor()?
img = torch.from_numpy(img)
# sanity check that images are channel last
_, h, w, c = img.shape
assert c < h and c < w, f"expect channel last images, but instead got {img.shape=}"
# sanity check that images are uint8
assert img.dtype == torch.uint8, f"expect torch.uint8, but instead {img.dtype=}"
# convert to channel first of type float32 in range [0,1]
img = einops.rearrange(img, "b h w c -> b c h w").contiguous()
img = img.type(torch.float32)
img /= 255
return_observations[imgkey] = img
if "environment_state" in observations:
return_observations["observation.environment_state"] = torch.from_numpy(
observations["environment_state"]
).float()
# TODO(rcadene): enable pixels only baseline with `obs_type="pixels"` in environment by removing
# requirement for "agent_pos"
return_observations[state_key] = torch.from_numpy(observations["agent_pos"]).float()
if "task" in observations:
return_observations["task"] = observations["task"]
return return_observations
def env_to_policy_features(env_cfg: EnvConfig) -> dict[str, PolicyFeature]:
# TODO(aliberts, rcadene): remove this hardcoding of keys and just use the nested keys as is
# (need to also refactor preprocess_observation and externalize normalization from policies)
-1
View File
@@ -155,7 +155,6 @@ def rollout(
while not np.all(done) and step < max_steps:
# Numpy array to tensor and changing dictionary keys to LeRobot policy format.
observation = preprocess_observation(observation)
# observation = preprocess_observation1(observation)
if return_observations:
all_observations.append(deepcopy(observation))