Support mnnvl all2allv from Flashinfer (#21003)

Signed-off-by: Shu Wang <shuw@nvidia.com>
Signed-off-by: Shu Wang. <shuw@nvidia.com>
Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com>
Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com>
Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com>
Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com>
Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
Shu Wang
2025-09-24 13:38:16 -05:00
committed by yewentao256
parent 7d9f44ad2a
commit 72a5101c7a
10 changed files with 410 additions and 40 deletions

View File

@ -222,7 +222,8 @@ if (has_flashinfer_cutlass_fused_moe()
from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_moe import ( # noqa: E501
FlashInferExperts)
from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_prepare_finalize import ( # noqa: E501
FlashInferCutlassMoEPrepareAndFinalize)
FlashInferCutlassMoEPrepareAndFinalize,
create_flashinfer_prepare_finalize)
register_prepare_and_finalize(
FlashInferCutlassMoEPrepareAndFinalize,
@ -373,7 +374,7 @@ def make_prepare_finalize(
assert prepare_finalize is not None
return prepare_finalize
elif prepare_finalize_type == FlashInferCutlassMoEPrepareAndFinalize:
return FlashInferCutlassMoEPrepareAndFinalize(
return create_flashinfer_prepare_finalize(
use_dp=moe.moe_parallel_config.dp_size > 1)
else:
return MoEPrepareAndFinalizeNoEP()