feat(DeviceProcessor): Enhance tensor processing with device detection and float dtype conversion

- Improved the _process_tensor method to preserve GPU placement for tensors already on a GPU, facilitating multi-GPU training scenarios.
- Introduced a new _detect_device method in TokenizerProcessor to ensure tokenized tensors match the device of existing tensors in transitions.
- Added comprehensive unit tests to validate the functionality of device detection and float dtype conversion across various scenarios.
This commit is contained in:
AdilZouitine
2025-08-08 19:33:24 +02:00
parent 8bde9d0ab7
commit 5ca3920611
4 changed files with 468 additions and 4 deletions
+137
View File
@@ -820,6 +820,143 @@ def test_complementary_data_none():
assert TransitionKey.COMPLEMENTARY_DATA not in result
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_preserves_gpu_placement():
"""Test that DeviceProcessor preserves GPU placement when tensor is already on GPU."""
processor = DeviceProcessor(device="cuda:0")
# Create tensors already on GPU
observation = {
"observation.state": torch.randn(10).cuda(), # Already on GPU
"observation.image": torch.randn(3, 224, 224).cuda(), # Already on GPU
}
action = torch.randn(5).cuda() # Already on GPU
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Check that tensors remain on their original GPU
assert result[TransitionKey.OBSERVATION]["observation.state"].device.type == "cuda"
assert result[TransitionKey.OBSERVATION]["observation.image"].device.type == "cuda"
assert result[TransitionKey.ACTION].device.type == "cuda"
# Verify no unnecessary copies were made (same data pointer)
assert torch.equal(
result[TransitionKey.OBSERVATION]["observation.state"], observation["observation.state"]
)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_multi_gpu_preservation():
"""Test that DeviceProcessor preserves placement on different GPUs in multi-GPU setup."""
# Test 1: GPU-to-GPU preservation (cuda:0 config, cuda:1 input)
processor_gpu = DeviceProcessor(device="cuda:0")
# Create tensors on cuda:1 (simulating Accelerate placement)
cuda1_device = torch.device("cuda:1")
observation = {
"observation.state": torch.randn(10).to(cuda1_device),
"observation.image": torch.randn(3, 224, 224).to(cuda1_device),
}
action = torch.randn(5).to(cuda1_device)
transition = create_transition(observation=observation, action=action)
result = processor_gpu(transition)
# Check that tensors remain on cuda:1 (not moved to cuda:0)
assert result[TransitionKey.OBSERVATION]["observation.state"].device == cuda1_device
assert result[TransitionKey.OBSERVATION]["observation.image"].device == cuda1_device
assert result[TransitionKey.ACTION].device == cuda1_device
# Test 2: GPU-to-CPU should move to CPU (not preserve GPU)
processor_cpu = DeviceProcessor(device="cpu")
transition_gpu = create_transition(
observation={"observation.state": torch.randn(10).cuda()}, action=torch.randn(5).cuda()
)
result_cpu = processor_cpu(transition_gpu)
# Check that tensors are moved to CPU
assert result_cpu[TransitionKey.OBSERVATION]["observation.state"].device.type == "cpu"
assert result_cpu[TransitionKey.ACTION].device.type == "cpu"
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_multi_gpu_with_cpu_tensors():
"""Test that CPU tensors are moved to configured device even in multi-GPU context."""
# Processor configured for cuda:1
processor = DeviceProcessor(device="cuda:1")
# Mix of CPU and GPU tensors
observation = {
"observation.cpu": torch.randn(10), # CPU tensor
"observation.gpu0": torch.randn(10).cuda(0), # Already on cuda:0
"observation.gpu1": torch.randn(10).cuda(1), # Already on cuda:1
}
action = torch.randn(5) # CPU tensor
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# CPU tensor should move to configured device (cuda:1)
assert result[TransitionKey.OBSERVATION]["observation.cpu"].device.type == "cuda"
assert result[TransitionKey.OBSERVATION]["observation.cpu"].device.index == 1
assert result[TransitionKey.ACTION].device.type == "cuda"
assert result[TransitionKey.ACTION].device.index == 1
# GPU tensors should stay on their original devices
assert result[TransitionKey.OBSERVATION]["observation.gpu0"].device.index == 0
assert result[TransitionKey.OBSERVATION]["observation.gpu1"].device.index == 1
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_multi_gpu_with_float_dtype():
"""Test float dtype conversion works correctly with multi-GPU preservation."""
processor = DeviceProcessor(device="cuda:0", float_dtype="float16")
# Create float tensors on different GPUs
observation = {
"observation.gpu0": torch.randn(5, dtype=torch.float32).cuda(0),
"observation.gpu1": torch.randn(5, dtype=torch.float32).cuda(1),
"observation.cpu": torch.randn(5, dtype=torch.float32), # CPU
}
transition = create_transition(observation=observation)
result = processor(transition)
# Check device placement
assert result[TransitionKey.OBSERVATION]["observation.gpu0"].device.index == 0
assert result[TransitionKey.OBSERVATION]["observation.gpu1"].device.index == 1
assert result[TransitionKey.OBSERVATION]["observation.cpu"].device.index == 0 # Moved to cuda:0
# Check dtype conversion happened for all
assert result[TransitionKey.OBSERVATION]["observation.gpu0"].dtype == torch.float16
assert result[TransitionKey.OBSERVATION]["observation.gpu1"].dtype == torch.float16
assert result[TransitionKey.OBSERVATION]["observation.cpu"].dtype == torch.float16
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_simulated_accelerate_scenario():
"""Test a scenario simulating how Accelerate would use the processor."""
# Simulate different processes getting different GPU assignments
for gpu_id in range(min(torch.cuda.device_count(), 2)):
# Each "process" has a processor configured for cuda:0
# but data comes in already placed on the process's GPU
processor = DeviceProcessor(device="cuda:0")
# Simulate data already placed by Accelerate
device = torch.device(f"cuda:{gpu_id}")
observation = {"observation.state": torch.randn(1, 10).to(device)}
action = torch.randn(1, 5).to(device)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Verify data stays on the GPU where Accelerate placed it
assert result[TransitionKey.OBSERVATION]["observation.state"].device == device
assert result[TransitionKey.ACTION].device == device
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_policy_processor_integration():
"""Test integration with policy processors - input on GPU, output on CPU."""
+261
View File
@@ -725,3 +725,264 @@ def test_custom_padding_side(mock_auto_tokenizer):
processor_right(transition)
assert tracking_tokenizer.padding_side_calls[-1] == "right"
@require_package("transformers")
def test_device_detection_cpu():
"""Test that tokenized tensors stay on CPU when other tensors are on CPU."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
# Create transition with CPU tensors
observation = {"observation.state": torch.randn(10)} # CPU tensor
action = torch.randn(5) # CPU tensor
transition = create_transition(
observation=observation, action=action, complementary_data={"task": "test task"}
)
result = processor(transition)
# Check that tokenized tensors are on CPU
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cpu"
assert attention_mask.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
def test_device_detection_cuda():
"""Test that tokenized tensors are moved to CUDA when other tensors are on CUDA."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
# Create transition with CUDA tensors
observation = {"observation.state": torch.randn(10).cuda()} # CUDA tensor
action = torch.randn(5).cuda() # CUDA tensor
transition = create_transition(
observation=observation, action=action, complementary_data={"task": "test task"}
)
result = processor(transition)
# Check that tokenized tensors are on CUDA
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cuda"
assert attention_mask.device.type == "cuda"
assert tokens.device.index == 0 # Should be on same device as input
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
@require_package("transformers")
def test_device_detection_multi_gpu():
"""Test that tokenized tensors match device in multi-GPU setup."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
# Test with tensors on cuda:1
device = torch.device("cuda:1")
observation = {"observation.state": torch.randn(10).to(device)}
action = torch.randn(5).to(device)
transition = create_transition(
observation=observation, action=action, complementary_data={"task": "multi gpu test"}
)
result = processor(transition)
# Check that tokenized tensors are on cuda:1
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device == device
assert attention_mask.device == device
@require_package("transformers")
def test_device_detection_no_tensors():
"""Test that tokenized tensors stay on CPU when no other tensors exist."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
# Create transition with no tensors
transition = create_transition(
observation={"metadata": {"key": "value"}}, # No tensors
complementary_data={"task": "no tensor test"},
)
result = processor(transition)
# Check that tokenized tensors are on CPU (default)
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cpu"
assert attention_mask.device.type == "cpu"
@require_package("transformers")
def test_device_detection_mixed_devices():
"""Test device detection when tensors are on different devices (uses first found)."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
if torch.cuda.is_available():
# Create transition with mixed devices
observation = {
"observation.cpu": torch.randn(10), # CPU
"observation.cuda": torch.randn(10).cuda(), # CUDA
}
transition = create_transition(
observation=observation, complementary_data={"task": "mixed device test"}
)
result = processor(transition)
# The device detection should use the first tensor found
# (iteration order depends on dict, but result should be consistent)
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
# Both should be on the same device
assert tokens.device == attention_mask.device
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
def test_device_detection_from_action():
"""Test that device is detected from action tensor when no observation tensors exist."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
# Create transition with action on CUDA but no observation tensors
observation = {"metadata": {"key": "value"}} # No tensors in observation
action = torch.randn(5).cuda()
transition = create_transition(
observation=observation, action=action, complementary_data={"task": "action device test"}
)
result = processor(transition)
# Check that tokenized tensors match action's device
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cuda"
assert attention_mask.device.type == "cuda"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
def test_device_detection_from_complementary_data():
"""Test that device is detected from tensors in complementary_data."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
# Create transition with tensor in complementary_data
transition = create_transition(
observation={"metadata": {"key": "value"}}, # No tensors
complementary_data={
"task": "comp data test",
"index": torch.tensor([42]).cuda(), # Tensor in complementary_data
},
)
result = processor(transition)
# Check that tokenized tensors match complementary_data tensor's device
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cuda"
assert attention_mask.device.type == "cuda"
@require_package("transformers")
def test_device_detection_preserves_dtype():
"""Test that device detection doesn't affect dtype of tokenized tensors."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
# Create transition with float tensor (to test dtype isn't affected)
observation = {"observation.state": torch.randn(10, dtype=torch.float16)}
transition = create_transition(observation=observation, complementary_data={"task": "dtype test"})
result = processor(transition)
# Check that tokenized tensors have correct dtypes (not affected by input dtype)
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.dtype == torch.long # Should remain long
assert attention_mask.dtype == torch.bool # Should be bool (converted in processor)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_integration_with_device_processor(mock_auto_tokenizer):
"""Test that TokenizerProcessor works correctly with DeviceProcessor in pipeline."""
from lerobot.processor import DeviceProcessor
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
# Create pipeline with TokenizerProcessor then DeviceProcessor
tokenizer_processor = TokenizerProcessor(tokenizer_name="test-tokenizer", max_length=6)
device_processor = DeviceProcessor(device="cuda:0")
robot_processor = RobotProcessor([tokenizer_processor, device_processor])
# Start with CPU tensors
transition = create_transition(
observation={"observation.state": torch.randn(10)}, # CPU
action=torch.randn(5), # CPU
complementary_data={"task": "pipeline test"},
)
result = robot_processor(transition)
# All tensors should end up on CUDA (moved by DeviceProcessor)
assert result[TransitionKey.OBSERVATION]["observation.state"].device.type == "cuda"
assert result[TransitionKey.ACTION].device.type == "cuda"
# Tokenized tensors should also be on CUDA
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cuda"
assert attention_mask.device.type == "cuda"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
def test_simulated_accelerate_scenario():
"""Test scenario simulating Accelerate with data already on GPU."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessor(tokenizer=mock_tokenizer, max_length=10)
# Simulate Accelerate scenario: batch already on GPU
device = torch.device("cuda:0")
observation = {
"observation.state": torch.randn(1, 10).to(device), # Batched, on GPU
"observation.image": torch.randn(1, 3, 224, 224).to(device), # Batched, on GPU
}
action = torch.randn(1, 5).to(device) # Batched, on GPU
transition = create_transition(
observation=observation,
action=action,
complementary_data={"task": ["accelerate test"]}, # List for batched task
)
result = processor(transition)
# Tokenized tensors should match GPU placement
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device == device
assert attention_mask.device == device
# MockTokenizer squeezes single-item batches, so shape is (max_length,) not (1, max_length)
assert tokens.shape == (10,) # MockTokenizer behavior for single string in list
assert attention_mask.shape == (10,)