fp16 intermediates doen't work for some text enc models. (#13056)

This commit is contained in:
comfyanonymous
2026-03-18 16:42:28 -07:00
committed by GitHub
parent 56ff88f951
commit f6b869d7d3

View File

@ -46,7 +46,7 @@ class ClipTokenWeightEncoder:
out, pooled = o[:2]
if pooled is not None:
first_pooled = pooled[0:1].to(device=model_management.intermediate_device(), dtype=model_management.intermediate_dtype())
first_pooled = pooled[0:1].to(device=model_management.intermediate_device())
else:
first_pooled = pooled
@ -63,9 +63,9 @@ class ClipTokenWeightEncoder:
output.append(z)
if (len(output) == 0):
r = (out[-1:].to(device=model_management.intermediate_device(), dtype=model_management.intermediate_dtype()), first_pooled)
r = (out[-1:].to(device=model_management.intermediate_device()), first_pooled)
else:
r = (torch.cat(output, dim=-2).to(device=model_management.intermediate_device(), dtype=model_management.intermediate_dtype()), first_pooled)
r = (torch.cat(output, dim=-2).to(device=model_management.intermediate_device()), first_pooled)
if len(o) > 2:
extra = {}