[Bugfix] Missing quant_config in deepseek embedding layer (#12836)

This commit is contained in:
Szymon Ożóg
2025-02-07 06:35:09 +01:00
committed by GitHub
parent 433c4a4923
commit aa375dca9f

View File

@ -581,7 +581,8 @@ class DeepseekV2Model(nn.Module):
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
)
quant_config=quant_config,
prefix=f"{prefix}.embed_tokens")
else:
self.embed_tokens = PPMissingLayer()