fix(annotate): use device_map='auto' for transformers backend

Without device_map, transformers stages the full FP8 checkpoint in CPU
RAM before any GPU placement, OOMing the host on 27B+ models even when
the GPU has enough VRAM. device_map='auto' streams shards directly to
GPU memory.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Pepijn
2026-04-27 23:05:14 +02:00
parent de104936bf
commit f8c00d9ca5
@@ -181,7 +181,14 @@ def _make_transformers_client(config: VlmConfig) -> VlmClient:
"for VL models."
)
processor = AutoProcessor.from_pretrained(config.model_id)
model = auto_cls.from_pretrained(config.model_id, torch_dtype="auto")
# device_map='auto' loads weights directly to GPU(s) and shards when
# needed; without it, transformers stages the full checkpoint in CPU
# memory first which OOMs the host on FP8/large models.
model = auto_cls.from_pretrained(
config.model_id,
torch_dtype="auto",
device_map="auto",
)
model.eval()
def _gen(batch: Sequence[Sequence[dict[str, Any]]], max_tok: int, temp: float) -> list[str]: