mirror of
https://github.com/huggingface/lerobot.git
synced 2026-05-15 08:39:49 +00:00
fix(annotate): drop guided_decoding=dict (api differs across vllm)
vllm 0.10.2 expects guided_decoding to be a GuidedDecodingParams object, not a dict. Different vllm versions differ here. The parser already has a one-retry JSON-recovery path, so drop guided decoding entirely for portability. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -169,11 +169,10 @@ def _make_vllm_client(config: VlmConfig) -> VlmClient:
|
||||
llm = LLM(**llm_kwargs)
|
||||
|
||||
def _gen(batch: Sequence[Sequence[dict[str, Any]]], max_tok: int, temp: float) -> list[str]:
|
||||
params = SamplingParams(
|
||||
max_tokens=max_tok,
|
||||
temperature=temp,
|
||||
guided_decoding={"json": {}} if config.json_mode else None,
|
||||
)
|
||||
# ``guided_decoding`` would speed up parsing but its API differs across
|
||||
# vllm releases (dict vs GuidedDecodingParams). The _GenericTextClient
|
||||
# wrapper already has a one-retry JSON-recovery path, so we skip it.
|
||||
params = SamplingParams(max_tokens=max_tok, temperature=temp)
|
||||
prompts = [_messages_to_prompt(m) for m in batch]
|
||||
outputs = llm.generate(prompts, params)
|
||||
return [o.outputs[0].text for o in outputs]
|
||||
|
||||
Reference in New Issue
Block a user