Enable mxfp8-mxfp4 group gemm on cutlass
Signed-off-by: Faraz Khoubsirat <58580514+farazkh80@users.noreply.github.com>
This commit is contained in:
committed by
Haicheng Wu
parent
dc4817921e
commit
6aa1894093
@ -566,6 +566,8 @@ check_input_datatypes() {
|
||||
((SfVectorSizeA == 32 && cute::is_same_v<KernelScheduleAuto, BuilderScheduleTag>)
|
||||
|| (SfVectorSizeA == 32 && cute::is_same_v<KernelTmaWarpSpecializedPingpong, BuilderScheduleTag>)
|
||||
|| (SfVectorSizeA == 32 && cute::is_same_v<KernelTmaWarpSpecializedCooperative, BuilderScheduleTag>)
|
||||
|| (SfVectorSizeA == 32 && cute::is_same_v<KernelPtrArrayTmaWarpSpecializedPingpong, BuilderScheduleTag>)
|
||||
|| (SfVectorSizeA == 32 && cute::is_same_v<KernelPtrArrayTmaWarpSpecializedCooperative, BuilderScheduleTag>)
|
||||
|| (SfVectorSizeA == 32 && cute::is_base_of_v<KernelScheduleBlockScaledGemmSm100, BuilderScheduleTag>)
|
||||
|| (SfVectorSizeA == 32 && cute::is_base_of_v<KernelSchedulePtrArrayBlockScaledGemmSm100, BuilderScheduleTag>)
|
||||
|| (SfVectorSizeA == 64 && cute::is_base_of_v<KernelScheduleBlockScaledSparseGemmSm100, BuilderScheduleTag>)
|
||||
|
||||
@ -2256,12 +2256,16 @@ bool TestSmall(double alpha = 1.0, double beta = 1.0,
|
||||
using ElementA = typename Gemm::GemmKernel::ElementA;
|
||||
using ElementB = typename Gemm::GemmKernel::ElementB;
|
||||
using TiledMma = typename Gemm::GemmKernel::TiledMma;
|
||||
int alignment_bits = 128;
|
||||
|
||||
static constexpr bool IsF8F6F4 = cutlass::gemm::collective::detail::is_sm100_mma_f8f6f4<TiledMma, ElementA, ElementB>();
|
||||
alignment_bits = cutlass::detail::get_input_alignment_bits<ElementA, IsF8F6F4>();
|
||||
// For fp4 and fp6 kernels, the min alignment_input is 128 elements, so we don't need to add alignment_input in test problem sizes.
|
||||
int alignment_input = (alignment_bits / cute::sizeof_bits<ElementA>::value == 128) ? 0 : (alignment_bits / cute::sizeof_bits<ElementA>::value);
|
||||
// For fp4 and fp6 kernels, the min alignment_input is 128 elements, so we don't need to add alignment_input in test problem sizes.
|
||||
int alignment_bits_a = cutlass::detail::get_input_alignment_bits<ElementA, IsF8F6F4>();
|
||||
int alignment_input_a = (alignment_bits_a / cute::sizeof_bits<ElementA>::value == 128) ? 0 : (alignment_bits_a / cute::sizeof_bits<ElementA>::value);
|
||||
|
||||
int alignment_bits_b = cutlass::detail::get_input_alignment_bits<ElementB, IsF8F6F4>();
|
||||
int alignment_input_b = (alignment_bits_b / cute::sizeof_bits<ElementB>::value == 128) ? 0 : (alignment_bits_b / cute::sizeof_bits<ElementB>::value);
|
||||
|
||||
int alignment_input = (alignment_input_a == 0 || alignment_input_b == 0) ? 0 : std::max(alignment_input_a, alignment_input_b);
|
||||
|
||||
|
||||
if constexpr (apply_alignment_offset) {
|
||||
|
||||
@ -71,6 +71,7 @@ cutlass_test_unit_gemm_device_add_executable(
|
||||
cutlass_test_unit_gemm_device_add_executable(
|
||||
cutlass_test_unit_bs_grouped_gemm_device_tensorop_sm120
|
||||
sm120_bs_gemm_nvf4_nvf4_f32_nvf4_group_gemm_fusion.cu
|
||||
sm120_bs_gemm_mxf8_mxf4_f32_group_gemm_fusion.cu
|
||||
)
|
||||
|
||||
endif()
|
||||
|
||||
@ -0,0 +1,362 @@
|
||||
/***************************************************************************************************
|
||||
* Copyright (c) 2025 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* 3. Neither the name of the copyright holder nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
**************************************************************************************************/
|
||||
|
||||
|
||||
/*! \file
|
||||
\brief Tests for device-wide grouped GEMM interface
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "cutlass/cutlass.h"
|
||||
#include "cute/tensor.hpp"
|
||||
#include "cute/atom/mma_atom.hpp"
|
||||
|
||||
#include "cutlass/numeric_types.h"
|
||||
|
||||
#include "cutlass/gemm/device/gemm_universal_adapter.h"
|
||||
#include "cutlass/gemm/kernel/gemm_universal.hpp"
|
||||
#include "cutlass/epilogue/collective/collective_builder.hpp"
|
||||
#include "cutlass/gemm/collective/collective_builder.hpp"
|
||||
#include "cutlass/epilogue/collective/default_epilogue.hpp"
|
||||
#include "cutlass/epilogue/thread/linear_combination.h"
|
||||
#include "cutlass/gemm/dispatch_policy.hpp"
|
||||
#include "cutlass/epilogue/thread/activation.h"
|
||||
|
||||
#include "../../../common/cutlass_unit_test.h"
|
||||
#include "../gemm_testbed_3x_ptr_array.hpp"
|
||||
|
||||
|
||||
using namespace cute;
|
||||
|
||||
#if defined(CUTLASS_ARCH_MMA_SM120_SUPPORTED)
|
||||
|
||||
// Pingpong kernel schedule
|
||||
TEST(SM120_Device_Gemm_e5m2t_e2m1n_e2m1t_tensorop_f32_epilogue_VS32_group_pingpong, row_sf) {
|
||||
using ElementInputA = float_e5m2_t;
|
||||
using ElementInputB = float_e2m1_t;
|
||||
using ElementA = cutlass::mx_float8_t<ElementInputA>;
|
||||
using ElementB = cutlass::mx_float4_t<ElementInputB>;
|
||||
using ElementC = cutlass::half_t;
|
||||
using ElementD = cutlass::float_e2m1_t;
|
||||
using ElementCompute = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementSF = cutlass::float_ue8m0_t;
|
||||
using ElementSFD = ElementSF;
|
||||
using ElementAccumulator = float;
|
||||
using GmemLayoutA = cutlass::layout::RowMajor;
|
||||
using GmemLayoutB = cutlass::layout::ColumnMajor;
|
||||
using GmemLayoutC = cutlass::layout::RowMajor;
|
||||
constexpr int SFVectorSize = 32;
|
||||
using TileShape_MNK = Shape<_128,_128,_128>;
|
||||
using ClusterShape_MNK = Shape<_1,_1,_1>;
|
||||
|
||||
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementInputA>::value;
|
||||
constexpr int AlignmentB = 128;
|
||||
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value;
|
||||
constexpr int AlignmentD = 128 / cutlass::sizeof_bits<ElementD>::value;
|
||||
|
||||
//
|
||||
// Construct CollectiveEpilogue
|
||||
//
|
||||
|
||||
constexpr int OutputSFVectorSize = SFVectorSize;
|
||||
// D = alpha * acc + beta * C
|
||||
// With Row-major BlockScaleFactor generation.
|
||||
using FusionOperation = cutlass::epilogue::fusion::LinCombBlockScaleFactor<
|
||||
OutputSFVectorSize,
|
||||
ElementD,
|
||||
ElementCompute,
|
||||
ElementSFD, GmemLayoutC,
|
||||
ElementC>;
|
||||
|
||||
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm120, cutlass::arch::OpClassBlockScaledTensorOp,
|
||||
TileShape_MNK, ClusterShape_MNK,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator, ElementCompute,
|
||||
ElementC, GmemLayoutC *, AlignmentC,
|
||||
ElementD, GmemLayoutC *, AlignmentD,
|
||||
cutlass::epilogue::collective::EpilogueScheduleAuto,
|
||||
FusionOperation
|
||||
>::CollectiveOp;
|
||||
|
||||
//
|
||||
// Construct CollectiveMainloop
|
||||
//
|
||||
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm120, cutlass::arch::OpClassBlockScaledTensorOp,
|
||||
ElementA, GmemLayoutA *, AlignmentA,
|
||||
ElementB, GmemLayoutB *, AlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape_MNK, ClusterShape_MNK,
|
||||
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||
cutlass::gemm::KernelPtrArrayTmaWarpSpecializedPingpong
|
||||
>::CollectiveOp;
|
||||
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cutlass::gemm::GroupProblemShape<Shape<int,int,int>>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
auto pass = test::gemm::device::TestSmallFusion<Gemm>(1.0, 0.5);
|
||||
EXPECT_TRUE(pass);
|
||||
}
|
||||
|
||||
|
||||
|
||||
TEST(SM120_Device_Gemm_e5m2t_e2m1n_e2m1t_tensorop_f32_epilogue_VS32_group_pingpong, silu_row_sf) {
|
||||
using ElementInputA = float_e5m2_t;
|
||||
using ElementInputB = float_e2m1_t;
|
||||
using ElementA = cutlass::mx_float8_t<ElementInputA>;
|
||||
using ElementB = cutlass::mx_float4_t<ElementInputB>;
|
||||
using ElementC = cutlass::half_t;
|
||||
using ElementD = cutlass::float_e2m1_t;
|
||||
using ElementCompute = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementSF = cutlass::float_ue4m3_t;
|
||||
using ElementSFD = ElementSF;
|
||||
using ElementAccumulator = float;
|
||||
using GmemLayoutA = cutlass::layout::RowMajor;
|
||||
using GmemLayoutB = cutlass::layout::ColumnMajor;
|
||||
using GmemLayoutC = cutlass::layout::RowMajor;
|
||||
constexpr int SFVectorSize = 32;
|
||||
using TileShape_MNK = Shape<_128,_128,_128>;
|
||||
using ClusterShape_MNK = Shape<_1,_1,_1>;
|
||||
|
||||
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementInputA>::value;
|
||||
constexpr int AlignmentB = 128;
|
||||
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value;
|
||||
constexpr int AlignmentD = 128 / cutlass::sizeof_bits<ElementD>::value;
|
||||
|
||||
//
|
||||
// Construct CollectiveEpilogue
|
||||
//
|
||||
|
||||
constexpr int OutputSFVectorSize = SFVectorSize;
|
||||
// D = SiLu(alpha * acc + beta * C)
|
||||
// With Row-major BlockScaleFactor generation.
|
||||
using FusionOperation = cutlass::epilogue::fusion::LinCombEltActBlockScaleFactor<
|
||||
cutlass::epilogue::thread::SiLu,
|
||||
OutputSFVectorSize,
|
||||
ElementD,
|
||||
ElementCompute,
|
||||
ElementSFD, GmemLayoutC,
|
||||
ElementC>;
|
||||
|
||||
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm120, cutlass::arch::OpClassBlockScaledTensorOp,
|
||||
TileShape_MNK, ClusterShape_MNK,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator, ElementCompute,
|
||||
ElementC, GmemLayoutC *, AlignmentC,
|
||||
ElementD, GmemLayoutC *, AlignmentD,
|
||||
cutlass::epilogue::collective::EpilogueScheduleAuto,
|
||||
FusionOperation
|
||||
>::CollectiveOp;
|
||||
|
||||
//
|
||||
// Construct CollectiveMainloop
|
||||
//
|
||||
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm120, cutlass::arch::OpClassBlockScaledTensorOp,
|
||||
ElementA, GmemLayoutA *, AlignmentA,
|
||||
ElementB, GmemLayoutB *, AlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape_MNK, ClusterShape_MNK,
|
||||
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||
cutlass::gemm::KernelPtrArrayTmaWarpSpecializedPingpong
|
||||
>::CollectiveOp;
|
||||
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cutlass::gemm::GroupProblemShape<Shape<int,int,int>>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
auto pass = test::gemm::device::TestSmallFusion<Gemm>(1.0, 0.5);
|
||||
EXPECT_TRUE(pass);
|
||||
}
|
||||
|
||||
|
||||
// Cooperative kenel schedule
|
||||
TEST(SM120_Device_Gemm_e5m2t_e2m1n_e2m1t_tensorop_f32_epilogue_VS32_group_cooperative, row_sf) {
|
||||
using ElementInputA = float_e5m2_t;
|
||||
using ElementInputB = float_e2m1_t;
|
||||
using ElementA = cutlass::mx_float8_t<ElementInputA>;
|
||||
using ElementB = cutlass::mx_float4_t<ElementInputB>;
|
||||
using ElementC = cutlass::half_t;
|
||||
using ElementD = cutlass::float_e2m1_t;
|
||||
using ElementCompute = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementSF = cutlass::float_ue4m3_t;
|
||||
using ElementSFD = ElementSF;
|
||||
using ElementAccumulator = float;
|
||||
using GmemLayoutA = cutlass::layout::RowMajor;
|
||||
using GmemLayoutB = cutlass::layout::ColumnMajor;
|
||||
using GmemLayoutC = cutlass::layout::RowMajor;
|
||||
constexpr int SFVectorSize = 32;
|
||||
using TileShape_MNK = Shape<_128,_128,_128>;
|
||||
using ClusterShape_MNK = Shape<_1,_1,_1>;
|
||||
|
||||
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementInputA>::value;
|
||||
constexpr int AlignmentB = 128;
|
||||
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value;
|
||||
constexpr int AlignmentD = 128 / cutlass::sizeof_bits<ElementD>::value;
|
||||
|
||||
//
|
||||
// Construct CollectiveEpilogue
|
||||
//
|
||||
|
||||
constexpr int OutputSFVectorSize = SFVectorSize;
|
||||
// D = alpha * acc + beta * C
|
||||
// With Row-major BlockScaleFactor generation.
|
||||
using FusionOperation = cutlass::epilogue::fusion::LinCombBlockScaleFactor<
|
||||
OutputSFVectorSize,
|
||||
ElementD,
|
||||
ElementCompute,
|
||||
ElementSFD, GmemLayoutC,
|
||||
ElementC>;
|
||||
|
||||
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm120, cutlass::arch::OpClassBlockScaledTensorOp,
|
||||
TileShape_MNK, ClusterShape_MNK,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator, ElementCompute,
|
||||
ElementC, GmemLayoutC *, AlignmentC,
|
||||
ElementD, GmemLayoutC *, AlignmentD,
|
||||
cutlass::epilogue::collective::EpilogueScheduleAuto,
|
||||
FusionOperation
|
||||
>::CollectiveOp;
|
||||
|
||||
//
|
||||
// Construct CollectiveMainloop
|
||||
//
|
||||
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm120, cutlass::arch::OpClassBlockScaledTensorOp,
|
||||
ElementA, GmemLayoutA *, AlignmentA,
|
||||
ElementB, GmemLayoutB *, AlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape_MNK, ClusterShape_MNK,
|
||||
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||
cutlass::gemm::collective::KernelScheduleAuto
|
||||
>::CollectiveOp;
|
||||
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cutlass::gemm::GroupProblemShape<Shape<int,int,int>>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
auto pass = test::gemm::device::TestSmallFusion<Gemm>(1.0, 0.5);
|
||||
EXPECT_TRUE(pass);
|
||||
}
|
||||
|
||||
|
||||
|
||||
TEST(SM120_Device_Gemm_e5m2t_e2m1n_e2m1t_tensorop_f32_epilogue_VS32_group_cooperative, silu_row_sf) {
|
||||
using ElementInputA = float_e5m2_t;
|
||||
using ElementInputB = float_e2m1_t;
|
||||
using ElementA = cutlass::mx_float8_t<ElementInputA>;
|
||||
using ElementB = cutlass::mx_float4_t<ElementInputB>;
|
||||
using ElementC = cutlass::half_t;
|
||||
using ElementD = cutlass::float_e2m1_t;
|
||||
using ElementCompute = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementSF = cutlass::float_ue4m3_t;
|
||||
using ElementSFD = ElementSF;
|
||||
using ElementAccumulator = float;
|
||||
using GmemLayoutA = cutlass::layout::RowMajor;
|
||||
using GmemLayoutB = cutlass::layout::ColumnMajor;
|
||||
using GmemLayoutC = cutlass::layout::RowMajor;
|
||||
constexpr int SFVectorSize = 32;
|
||||
using TileShape_MNK = Shape<_128,_128,_128>;
|
||||
using ClusterShape_MNK = Shape<_1,_1,_1>;
|
||||
|
||||
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementInputA>::value;
|
||||
constexpr int AlignmentB = 128;
|
||||
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value;
|
||||
constexpr int AlignmentD = 128 / cutlass::sizeof_bits<ElementD>::value;
|
||||
|
||||
//
|
||||
// Construct CollectiveEpilogue
|
||||
//
|
||||
|
||||
constexpr int OutputSFVectorSize = SFVectorSize;
|
||||
// D = SiLu(alpha * acc + beta * C)
|
||||
// With Row-major BlockScaleFactor generation.
|
||||
using FusionOperation = cutlass::epilogue::fusion::LinCombEltActBlockScaleFactor<
|
||||
cutlass::epilogue::thread::SiLu,
|
||||
OutputSFVectorSize,
|
||||
ElementD,
|
||||
ElementCompute,
|
||||
ElementSFD, GmemLayoutC,
|
||||
ElementC>;
|
||||
|
||||
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm120, cutlass::arch::OpClassBlockScaledTensorOp,
|
||||
TileShape_MNK, ClusterShape_MNK,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator, ElementCompute,
|
||||
ElementC, GmemLayoutC *, AlignmentC,
|
||||
ElementD, GmemLayoutC *, AlignmentD,
|
||||
cutlass::epilogue::collective::EpilogueScheduleAuto,
|
||||
FusionOperation
|
||||
>::CollectiveOp;
|
||||
|
||||
//
|
||||
// Construct CollectiveMainloop
|
||||
//
|
||||
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm120, cutlass::arch::OpClassBlockScaledTensorOp,
|
||||
ElementA, GmemLayoutA *, AlignmentA,
|
||||
ElementB, GmemLayoutB *, AlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape_MNK, ClusterShape_MNK,
|
||||
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||
cutlass::gemm::collective::KernelScheduleAuto
|
||||
>::CollectiveOp;
|
||||
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cutlass::gemm::GroupProblemShape<Shape<int,int,int>>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
auto pass = test::gemm::device::TestSmallFusion<Gemm>(1.0, 0.5);
|
||||
EXPECT_TRUE(pass);
|
||||
}
|
||||
#endif // #if defined(CUTLASS_ARCH_MMA_SM120_SUPPORTED)
|
||||
Reference in New Issue
Block a user