[CPU] Fix torch version in x86 CPU backend (#19258)

Signed-off-by: jiang1.li <jiang1.li@intel.com>
This commit is contained in:
Li, Jiang
2025-06-26 18:34:47 +08:00
committed by GitHub
parent d188913d99
commit 0567c8249f
8 changed files with 52 additions and 20 deletions

View File

@ -107,6 +107,8 @@ VLM_TEST_SETTINGS = {
),
limit_mm_per_prompt={"image": 4},
)],
# TODO: Revert to "auto" when CPU backend can use torch > 2.6
dtype="bfloat16" if current_platform.is_cpu() else "auto",
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
),
"paligemma": VLMTestInfo(

View File

@ -203,6 +203,9 @@ def build_embedding_inputs_from_test_info(
images = [asset.pil_image for asset in image_assets]
embeds = test_info.convert_assets_to_embeddings(image_assets)
if test_info.dtype != "auto":
dtype = getattr(torch, test_info.dtype) # type: ignore
embeds = [e.to(dtype=dtype) for e in embeds]
assert len(images) == len(model_prompts)
inputs = build_single_image_inputs(images, model_prompts, size_wrapper)