diff --git a/docs/source/using_dataset_tools.mdx b/docs/source/using_dataset_tools.mdx index 22c28828c..29e16ea0a 100644 --- a/docs/source/using_dataset_tools.mdx +++ b/docs/source/using_dataset_tools.mdx @@ -163,3 +163,41 @@ lerobot-edit-dataset \ ``` There is also a tool for adding features to a dataset that is not yet covered in `lerobot-edit-dataset`. + +# Dataset Visualization + +## Online Visualization + +When you record a dataset using `lerobot`, it automatically uploads to the Hugging Face Hub unless you specify otherwise. To view the dataset online, use our **LeRobot Dataset Visualizer**, available at: +https://huggingface.co/spaces/lerobot/visualize_dataset + +## Local Visualization + +You can also visualize episodes from a dataset locally using our command-line tool. + +**From the Hugging Face Hub:** + +```bash +lerobot-dataset-viz \ + --repo-id lerobot/pusht \ + --episode-index 0 +``` + +**From a local folder:** +Add the `--root` option and set `--mode local`. For example, to search in `./my_local_data_dir/lerobot/pusht`: + +```bash +lerobot-dataset-viz \ + --repo-id lerobot/pusht \ + --root ./my_local_data_dir \ + --mode local \ + --episode-index 0 +``` + +Once executed, the tool opens `rerun.io` and displays the camera streams, robot states, and actions for the selected episode. + +For advanced usage—including visualizing datasets stored on a remote server—run: + +```bash +lerobot-dataset-viz --help +``` diff --git a/src/lerobot/policies/smolvla/modeling_smolvla.py b/src/lerobot/policies/smolvla/modeling_smolvla.py index cce41def8..485d3e4e5 100644 --- a/src/lerobot/policies/smolvla/modeling_smolvla.py +++ b/src/lerobot/policies/smolvla/modeling_smolvla.py @@ -527,6 +527,7 @@ class VLAFlowMatching(nn.Module): num_vlm_layers=self.config.num_vlm_layers, self_attn_every_n_layers=self.config.self_attn_every_n_layers, expert_width_multiplier=self.config.expert_width_multiplier, + device=self.config.device if self.config.device is not None else "auto", ) self.state_proj = nn.Linear( self.config.max_state_dim, self.vlm_with_expert.config.text_config.hidden_size diff --git a/src/lerobot/scripts/lerobot_info.py b/src/lerobot/scripts/lerobot_info.py index 9b49cad18..879d392be 100644 --- a/src/lerobot/scripts/lerobot_info.py +++ b/src/lerobot/scripts/lerobot_info.py @@ -27,6 +27,25 @@ lerobot-info import importlib import platform +import shutil +import subprocess +from importlib.metadata import PackageNotFoundError, distribution + +PACKAGE_NAME = "lerobot" + + +def get_ffmpeg_version() -> str: + """Get the ffmpeg version if installed, otherwise return 'N/A'.""" + command_path = shutil.which("ffmpeg") + if command_path is None: + return "N/A" + try: + result = subprocess.run([command_path, "-version"], capture_output=True, text=True, check=True) + first_line = result.stdout.splitlines()[0] + version_info = first_line.split(" ")[2] + return version_info + except (subprocess.SubprocessError, IndexError): + return "Installed (version parsing failed)" def get_package_version(package_name: str) -> str: @@ -38,16 +57,17 @@ def get_package_version(package_name: str) -> str: return "N/A" -def get_sys_info() -> dict: +def get_sys_info() -> dict[str, str]: """Run this to get basic system info to help for tracking issues & bugs.""" # General package versions info = { - "lerobot version": get_package_version("lerobot"), + "LeRobot version": get_package_version(PACKAGE_NAME), "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface Hub version": get_package_version("huggingface_hub"), "Datasets version": get_package_version("datasets"), "Numpy version": get_package_version("numpy"), + "FFmpeg version": get_ffmpeg_version(), } # PyTorch and GPU specific information @@ -58,10 +78,10 @@ def get_sys_info() -> dict: try: import torch - torch_version = torch.__version__ + torch_version = str(torch.__version__) torch_cuda_available = torch.cuda.is_available() if torch_cuda_available: - cuda_version = torch.version.cuda + cuda_version = str(torch.version.cuda) # Gets the name of the first available GPU gpu_model = torch.cuda.get_device_name(0) except ImportError: @@ -71,24 +91,34 @@ def get_sys_info() -> dict: info.update( { "PyTorch version": torch_version, - "Is PyTorch built with CUDA support?": torch_cuda_available, + "Is PyTorch built with CUDA support?": str(torch_cuda_available), "Cuda version": cuda_version, "GPU model": gpu_model, "Using GPU in script?": "", } ) + scripts = "N/A" + try: + dist = distribution(PACKAGE_NAME) + scripts = [ep.name for ep in dist.entry_points if ep.group == "console_scripts"] + except PackageNotFoundError: + pass + + info.update({f"{PACKAGE_NAME} scripts": str(scripts)}) return info -def format_dict_for_markdown(d: dict) -> str: +def format_dict_for_markdown(d: dict[str, str]) -> str: """Formats a dictionary into a markdown-friendly bulleted list.""" return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) def main(): + """ + Main function to print system info in markdown format. + """ system_info = get_sys_info() - print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the last point.\n") print(format_dict_for_markdown(system_info))