[Misc] rename torch_dtype to dtype (#26695)

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-10-15 20:11:48 +08:00
committed by GitHub
parent f93e348010
commit 8f4b313c37
30 changed files with 52 additions and 55 deletions

View File

@ -999,7 +999,7 @@ class ChameleonForConditionalGeneration(
return []
assert self.model.vqmodel is not None
image_tokens = self.model.get_image_tokens(
image_input["data"].to(self.config.torch_dtype)
image_input["data"].to(self.config.dtype)
)
vision_embeddings = self.model.get_input_embeddings(image_tokens)
return vision_embeddings

View File

@ -1089,7 +1089,7 @@ class Ernie4_5VLMultiModalProcessor(BaseMultiModalProcessor[Ernie4_5_VLProcessin
pixel_values = (
rescale_factor * pixel_values.to(torch.float32) - image_mean_tensor
) / image_std_tensor
pixel_values = pixel_values.to(hf_config.torch_dtype)
pixel_values = pixel_values.to(hf_config.dtype)
return pixel_values
def _call_hf_processor(

View File

@ -615,7 +615,7 @@ class GLM4VForCausalLM(
return None
def _process_image_input(self, image_input: GLMVImagePixelInputs) -> torch.Tensor:
pixel_values = image_input["data"].to(dtype=self.config.torch_dtype)
pixel_values = image_input["data"].to(dtype=self.config.dtype)
return self.transformer.vision(pixel_values)

View File

@ -114,7 +114,7 @@ class FlashConfig(PretrainedConfig):
attention_dropout=0.0,
mla_scale_q_lora=False,
mla_scale_kv_lora=False,
torch_dtype="bfloat16",
dtype="bfloat16",
params_dtype="bfloat16",
router_dtype="float32",
router_bias=False,
@ -130,7 +130,7 @@ class FlashConfig(PretrainedConfig):
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
torch_dtype=torch_dtype,
dtype=dtype,
params_dtype=params_dtype,
router_dtype=router_dtype,
topk_method=topk_method,

View File

@ -987,7 +987,7 @@ class NemotronH_Nano_VL_V2(
prefix=maybe_prefix(prefix, "language_model"),
)
self.vision_model = self.get_vit_model_from_radio_config(config).to(
self.language_model.config.torch_dtype
self.language_model.config.dtype
)
# Construct the vision projection.
@ -1008,7 +1008,7 @@ class NemotronH_Nano_VL_V2(
ReLUSquaredActivation(),
nn.Linear(vision_projection_hidden_size, llm_hidden_size, bias=False),
)
self.mlp1 = self.mlp1.to(self.language_model.config.torch_dtype)
self.mlp1 = self.mlp1.to(self.language_model.config.dtype)
self.config = config
self.model_config = vllm_config.model_config

View File

@ -338,7 +338,7 @@ class Qwen3NextGatedDeltaNet(nn.Module, MambaBase):
group_size=None,
norm_before_gate=True,
device=current_platform.current_device(),
dtype=config.torch_dtype,
dtype=config.dtype,
)
self.out_proj = RowParallelLinear(
@ -847,7 +847,7 @@ class Qwen3NextDecoderLayer(nn.Module):
1,
1,
config.hidden_size,
dtype=config.torch_dtype,
dtype=config.dtype,
),
)
self.ffn_layer_scale = torch.nn.Parameter(
@ -855,7 +855,7 @@ class Qwen3NextDecoderLayer(nn.Module):
1,
1,
config.hidden_size,
dtype=config.torch_dtype,
dtype=config.dtype,
),
)

View File

@ -530,7 +530,7 @@ class TransformersBase(nn.Module, SupportsQuant, SupportsLoRA, SupportsPP):
with init_on_device_without_buffers("meta"):
self.model: PreTrainedModel = AutoModel.from_config(
self.config,
torch_dtype=self.model_config.dtype,
dtype=self.model_config.dtype,
trust_remote_code=self.model_config.trust_remote_code,
)

View File

@ -157,7 +157,7 @@ class TransformersForSequenceClassification(TransformersPoolingBase):
with torch.device("meta"):
seq_cls_model = AutoModelForSequenceClassification.from_config(
self.config,
torch_dtype=self.model_config.dtype,
dtype=self.model_config.dtype,
trust_remote_code=self.model_config.trust_remote_code,
)