v4.1 release
This commit is contained in:
@ -117,6 +117,82 @@ class TestEVTCompute(EVTTestCaseBase):
|
||||
input_keys = ["C", "alpha", "beta"]
|
||||
result_keys = ["D"]
|
||||
launcher.verify((m, n, k), input_keys, result_keys, l)
|
||||
|
||||
def test_tanh(self):
|
||||
"""
|
||||
Test Tanh op
|
||||
"""
|
||||
def evt_tanh(accum):
|
||||
D = tanh(accum)
|
||||
return D
|
||||
|
||||
for m, n, k, l in self.get_problem_sizes(8):
|
||||
example_inputs = {
|
||||
"accum": self.fake_tensor(self.element, (l, m, n)),
|
||||
"D": self.fake_tensor(self.element, (l, m, n))
|
||||
}
|
||||
|
||||
launcher = EVTTestBed(self.element, evt_tanh, example_inputs)
|
||||
input_keys = []
|
||||
result_keys = ["D"]
|
||||
launcher.verify((m, n, k), input_keys, result_keys, l)
|
||||
|
||||
def test_sigmoid(self):
|
||||
"""
|
||||
Test Sigmoid op
|
||||
"""
|
||||
def evt_sigmoid(accum):
|
||||
D = sigmoid(accum)
|
||||
return D
|
||||
|
||||
for m, n, k, l in self.get_problem_sizes(8):
|
||||
example_inputs = {
|
||||
"accum": self.fake_tensor(self.element, (l, m, n)),
|
||||
"D": self.fake_tensor(self.element, (l, m, n))
|
||||
}
|
||||
|
||||
launcher = EVTTestBed(self.element, evt_sigmoid, example_inputs)
|
||||
input_keys = []
|
||||
result_keys = ["D"]
|
||||
launcher.verify((m, n, k), input_keys, result_keys, l)
|
||||
|
||||
def test_gelu(self):
|
||||
"""
|
||||
Test GELU op
|
||||
"""
|
||||
def evt_gelu(accum):
|
||||
D = gelu(accum)
|
||||
return D
|
||||
|
||||
for m, n, k, l in self.get_problem_sizes(8):
|
||||
example_inputs = {
|
||||
"accum": self.fake_tensor(self.element, (l, m, n)),
|
||||
"D": self.fake_tensor(self.element, (l, m, n))
|
||||
}
|
||||
|
||||
launcher = EVTTestBed(self.element, evt_gelu, example_inputs)
|
||||
input_keys = []
|
||||
result_keys = ["D"]
|
||||
launcher.verify((m, n, k), input_keys, result_keys, l)
|
||||
|
||||
def test_exp(self):
|
||||
"""
|
||||
Test Exp op
|
||||
"""
|
||||
def evt_exp(accum):
|
||||
D = exp(accum)
|
||||
return D
|
||||
|
||||
for m, n, k, l in self.get_problem_sizes(8):
|
||||
example_inputs = {
|
||||
"accum": self.fake_tensor(self.element, (l, m, n)),
|
||||
"D": self.fake_tensor(self.element, (l, m, n))
|
||||
}
|
||||
|
||||
launcher = EVTTestBed(self.element, evt_exp, example_inputs)
|
||||
input_keys = []
|
||||
result_keys = ["D"]
|
||||
launcher.verify((m, n, k), input_keys, result_keys, l)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@ -49,6 +49,51 @@ cutlass.set_log_level(logging.WARNING)
|
||||
|
||||
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
|
||||
class TestEVTMixed(EVTTestCaseBase):
|
||||
|
||||
def test_same_variable_used_multiple_times(self):
|
||||
"""
|
||||
The same variable z0 is used multiple times
|
||||
"""
|
||||
def evt_aux_store(accum):
|
||||
z0 = relu(accum)
|
||||
D = z0 + z0
|
||||
return z0, D
|
||||
|
||||
for m, n, k, l in self.get_problem_sizes(8):
|
||||
example_inputs = {
|
||||
"accum": self.fake_tensor(self.element, (l, m, n)),
|
||||
"D": self.fake_tensor(self.element, (l, m, n)),
|
||||
"z0": self.fake_tensor(self.element, (l, m, n)),
|
||||
}
|
||||
|
||||
launcher = EVTTestBed(self.element, evt_aux_store, example_inputs)
|
||||
input_keys = ["accum"]
|
||||
result_keys = ["z0", "D"]
|
||||
launcher.verify((m, n, k), input_keys, result_keys, l)
|
||||
|
||||
def test_no_lca(self):
|
||||
"""
|
||||
The same variable z0 is used multiple times
|
||||
"""
|
||||
def evt_no_lca(accum, bias):
|
||||
E = relu(accum)
|
||||
F = E + bias
|
||||
tmp_2 = E + 2
|
||||
D = tmp_2 + E
|
||||
return D
|
||||
|
||||
for m, n, k, l in self.get_problem_sizes(8):
|
||||
example_inputs = {
|
||||
"accum": self.fake_tensor(self.element, (l, m, n)),
|
||||
"D": self.fake_tensor(self.element, (l, m, n)),
|
||||
"bias": self.fake_tensor(self.element, (m,1), stride=(1,0)),
|
||||
}
|
||||
|
||||
launcher = EVTTestBed(self.element, evt_no_lca, example_inputs)
|
||||
input_keys = ["accum", "bias"]
|
||||
result_keys = ["D"]
|
||||
launcher.verify((m, n, k), input_keys, result_keys, l)
|
||||
|
||||
def test_mixed_dag(self):
|
||||
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
|
||||
F = alpha * accum + (beta * C + aux)
|
||||
|
||||
@ -49,6 +49,31 @@ cutlass.set_log_level(logging.WARNING)
|
||||
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
|
||||
class TestEVTStore(EVTTestCaseBase):
|
||||
|
||||
@unittest.skipIf(device_cc() != 90, "This test is only for CC 90")
|
||||
def test_invalid_store(self):
|
||||
"""
|
||||
Test invalid store
|
||||
"""
|
||||
def evt_invalid_store(accum):
|
||||
D = accum
|
||||
F = D + 1 # D has users, which is not allowed on SM90 or higher
|
||||
return D, F
|
||||
|
||||
for m, n, k, l in self.get_problem_sizes(8):
|
||||
example_inputs = {
|
||||
"accum": self.fake_tensor(self.element, (l, m, n)),
|
||||
"D": self.fake_tensor(self.element, (l, m, n)),
|
||||
"F": self.fake_tensor(self.element, (l, m, n))
|
||||
}
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
r"On SM90 or higher, D is expected to be a output node with 0 users "
|
||||
r"to enable smem reuse between C and D, but got 1"
|
||||
):
|
||||
launcher = EVTTestBed(self.element, evt_invalid_store, example_inputs)
|
||||
|
||||
break # Only need to test once
|
||||
|
||||
def test_aux_store(self):
|
||||
"""
|
||||
Returning a tensor with shape [m, n]
|
||||
|
||||
@ -185,7 +185,9 @@ class EVTTestBed:
|
||||
|
||||
# Compare the results
|
||||
for result, ref in zip(result_keys, reference_results):
|
||||
assert torch.equal(epilogue_args[result].flatten(), ref.flatten())
|
||||
assert torch.equal(
|
||||
epilogue_args[result].flatten(),
|
||||
ref.masked_fill(torch.isnan(ref), float('inf')).flatten())
|
||||
|
||||
# Run profile
|
||||
if self.profile:
|
||||
@ -210,8 +212,11 @@ class EVTTestCaseBase(unittest.TestCase):
|
||||
|
||||
torch.random.manual_seed(42)
|
||||
|
||||
def fake_tensor(self, element, shape):
|
||||
return Tensor(element=element, shape=shape, layout_tag=cutlass.LayoutType.RowMajor)
|
||||
def fake_tensor(self, element, shape, stride=None):
|
||||
if stride is None:
|
||||
return Tensor(element=element, shape=shape, layout_tag=cutlass.LayoutType.RowMajor)
|
||||
else:
|
||||
return Tensor(element=element, shape=shape, stride=stride)
|
||||
|
||||
def get_problem_sizes(self, alignment, k=None, batch_count=[3,]):
|
||||
k = k if k else self.k
|
||||
|
||||
@ -121,6 +121,7 @@ void FilterArchitecture() {
|
||||
{ "SM89*", 89, 89},
|
||||
{ "SM90*", 90, 90},
|
||||
{ "SM100*", 100, 100},
|
||||
{ "*sm100_*", 100, 100},
|
||||
{ 0, 0, false }
|
||||
};
|
||||
|
||||
|
||||
@ -100,6 +100,53 @@ TEST(SM100_device_conv3d_fprop_implicitgemm_f16ndhwc_f16ndhwc_f32ndhwc_tensor_op
|
||||
EXPECT_TRUE(test::conv::device::TestAllConv<Conv>());
|
||||
}
|
||||
|
||||
TEST(SM100_device_conv3d_fprop_implicitgemm_f16ndhwc_f16ndhwc_f16ndhwc_tensor_op_f32, 64x64x64_1x1x1_alpha_beta_scaled_bias_relu_residual) {
|
||||
using ElementAct = cutlass::half_t;
|
||||
using ElementFlt = cutlass::half_t;
|
||||
using ElementOut = cutlass::half_t;
|
||||
using ElementAcc = float;
|
||||
using ElementCompute = float;
|
||||
using ElementSrc = cutlass::half_t;
|
||||
using ElementBias = float;
|
||||
using MmaTileShape = Shape<_64, _64, Shape<_64>>;
|
||||
using ClusterShape = Shape<_1,_1,_1>;
|
||||
|
||||
using FusionOperation = cutlass::epilogue::fusion::PerColResAddPerColBiasEltAct<
|
||||
cutlass::epilogue::thread::ReLu, ElementOut, ElementCompute, ElementBias, ElementSrc>;
|
||||
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100, cutlass::arch::OpClassTensorOp,
|
||||
MmaTileShape, ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAcc, ElementCompute,
|
||||
ElementSrc, cutlass::layout::TensorNDHWC, 128 / cutlass::sizeof_bits<ElementSrc>::value,
|
||||
ElementOut, cutlass::layout::TensorNDHWC, 128 / cutlass::sizeof_bits<ElementOut>::value,
|
||||
cutlass::epilogue::collective::EpilogueScheduleAuto,
|
||||
FusionOperation
|
||||
>::CollectiveOp;
|
||||
|
||||
using CollectiveMainloop = typename cutlass::conv::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100, cutlass::arch::OpClassTensorOp,
|
||||
cutlass::conv::Operator::kFprop,
|
||||
ElementAct, cutlass::layout::TensorNDHWC, 16 / sizeof(ElementAct),
|
||||
ElementFlt, cutlass::layout::TensorNDHWC, 16 / sizeof(ElementFlt),
|
||||
ElementAcc,
|
||||
MmaTileShape, ClusterShape,
|
||||
cutlass::conv::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||
cutlass::conv::collective::KernelScheduleAuto
|
||||
>::CollectiveOp;
|
||||
|
||||
using ProblemShape=cutlass::conv::ConvProblemShape<CollectiveMainloop::DispatchPolicy::ConvOp, CollectiveMainloop::DispatchPolicy::NumSpatialDimensions>;
|
||||
using ConvKernel = cutlass::conv::kernel::ConvUniversal<
|
||||
ProblemShape,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Conv = cutlass::conv::device::ConvUniversalAdapter<ConvKernel>;
|
||||
|
||||
EXPECT_TRUE(test::conv::device::TestAllConv<Conv>());
|
||||
}
|
||||
|
||||
//
|
||||
// Cluster tile shape 128x64x64
|
||||
// Cluster shape 1x1x1
|
||||
|
||||
@ -234,6 +234,53 @@ TEST(SM100_device_conv3d_fprop_implicitgemm_s8ndhwc_s8ndhwc_s32ndhwc_tensor_op_s
|
||||
EXPECT_TRUE(test::conv::device::TestAllConv<Conv>());
|
||||
}
|
||||
|
||||
TEST(SM100_device_conv3d_fprop_implicitgemm_s8ndhwc_s8ndhwc_s32ndhwc_tensor_op_s32, 64x64x64_1x1x1_alpha_beta_scaled_bias_relu_residual) {
|
||||
using ElementAct = int8_t;
|
||||
using ElementFlt = int8_t;
|
||||
using ElementOut = int32_t;
|
||||
using ElementSrc = int8_t;
|
||||
using ElementAcc = int32_t;
|
||||
using ElementCompute = float;
|
||||
using ElementBias = float;
|
||||
using MmaTileShape = Shape<_64, _64, Shape<_64>>;
|
||||
using ClusterShape = Shape<_1,_1,_1>;
|
||||
|
||||
using FusionOperation = cutlass::epilogue::fusion::PerColResAddPerColBiasEltAct<
|
||||
cutlass::epilogue::thread::ReLu, ElementOut, ElementCompute, ElementBias, ElementSrc>;
|
||||
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100, cutlass::arch::OpClassTensorOp,
|
||||
MmaTileShape, ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAcc, ElementCompute,
|
||||
ElementSrc, cutlass::layout::TensorNDHWC, 128 / cutlass::sizeof_bits<ElementSrc>::value,
|
||||
ElementOut, cutlass::layout::TensorNDHWC, 128 / cutlass::sizeof_bits<ElementOut>::value,
|
||||
cutlass::epilogue::collective::EpilogueScheduleAuto,
|
||||
FusionOperation
|
||||
>::CollectiveOp;
|
||||
|
||||
using CollectiveMainloop = typename cutlass::conv::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100, cutlass::arch::OpClassTensorOp,
|
||||
cutlass::conv::Operator::kFprop,
|
||||
ElementAct, cutlass::layout::TensorNDHWC, 16 / sizeof(ElementAct),
|
||||
ElementFlt, cutlass::layout::TensorNDHWC, 16 / sizeof(ElementFlt),
|
||||
ElementAcc,
|
||||
MmaTileShape, ClusterShape,
|
||||
cutlass::conv::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||
cutlass::conv::collective::KernelScheduleAuto
|
||||
>::CollectiveOp;
|
||||
|
||||
using ProblemShape=cutlass::conv::ConvProblemShape<CollectiveMainloop::DispatchPolicy::ConvOp, CollectiveMainloop::DispatchPolicy::NumSpatialDimensions>;
|
||||
using ConvKernel = cutlass::conv::kernel::ConvUniversal<
|
||||
ProblemShape,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Conv = cutlass::conv::device::ConvUniversalAdapter<ConvKernel>;
|
||||
|
||||
EXPECT_TRUE(test::conv::device::TestAllConv<Conv>());
|
||||
}
|
||||
|
||||
// alpha != 1 && beta != 0 && bias && gelu
|
||||
TEST(SM100_device_conv3d_fprop_implicitgemm_s8ndhwc_s8ndhwc_s32ndhwc_tensor_op_s32, 64x64x64_1x1x1_alpha_beta_bias_gelu) {
|
||||
using ElementAct = int8_t;
|
||||
|
||||
@ -176,6 +176,8 @@ struct ConvTestbed {
|
||||
|
||||
static constexpr bool DisableSource = cute::is_void_v<typename FusionOp::ElementSource>;
|
||||
|
||||
static constexpr bool IsResidualEnabled = cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithResidualAdd<FusionOp>::value;
|
||||
|
||||
using StrideC = typename Conv::ConvKernel::StrideC;
|
||||
using StrideD = typename Conv::ConvKernel::StrideD;
|
||||
using ThreadEpilogueOp = typename Conv::ConvKernel::CollectiveEpilogue::ThreadEpilogueOp;
|
||||
@ -494,6 +496,7 @@ struct ConvTestbed {
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
ElementD,
|
||||
IsResidualEnabled,
|
||||
decltype(mAlpha),
|
||||
decltype(mBeta),
|
||||
decltype(mBias),
|
||||
|
||||
@ -148,6 +148,19 @@ TEST(CuTe_core, Subbyte_iterator)
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
array_subbyte<uint6b_t, 15> a{};
|
||||
auto tensor = make_tensor(a.begin(), make_shape(15));
|
||||
|
||||
fill(a, uint6b_t(13));
|
||||
for (int i = 0; i < int(a.size()); ++i) {
|
||||
EXPECT_EQ(uint6b_t(tensor(i)), uint6b_t(13));
|
||||
tensor(i) = uint6b_t(i);
|
||||
EXPECT_EQ(uint6b_t(a[i]), uint6b_t(tensor(i)));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
array_subbyte<int4_t, 15> a{};
|
||||
auto tensor = make_tensor(a.begin(), make_shape(15));
|
||||
|
||||
@ -950,3 +950,32 @@ cutlass_test_unit_gemm_device_add_executable(
|
||||
endif()
|
||||
|
||||
|
||||
cutlass_test_unit_gemm_device_add_executable(
|
||||
cutlass_test_unit_gemm_device_simt_sm100
|
||||
|
||||
# No batching of source to control compiler memory usage
|
||||
BATCH_SOURCES ON
|
||||
BATCH_SIZE 1
|
||||
|
||||
sm100_gemm_f32_f32_f32_simt_align1.cu
|
||||
)
|
||||
|
||||
cutlass_test_unit_gemm_device_add_executable(
|
||||
cutlass_test_unit_gemm_device_simt_sm100_bias_relu
|
||||
|
||||
# No batching of source to control compiler memory usage
|
||||
BATCH_SOURCES ON
|
||||
BATCH_SIZE 1
|
||||
|
||||
sm100_gemm_f32_f32_f32_simt_align1_bias_relu.cu
|
||||
)
|
||||
|
||||
cutlass_test_unit_gemm_device_add_executable(
|
||||
cutlass_test_unit_gemm_device_simt_sm100_ptr_array
|
||||
|
||||
# No batching of source to control compiler memory usage
|
||||
BATCH_SOURCES ON
|
||||
BATCH_SIZE 1
|
||||
sm100_gemm_f32_f32_f32_simt_align1_ptr_array.cu
|
||||
)
|
||||
|
||||
|
||||
@ -190,13 +190,7 @@ private:
|
||||
|
||||
template <typename T>
|
||||
auto make_iterator(T* ptr) {
|
||||
using namespace cute;
|
||||
if constexpr (cute::is_subbyte_v<T>) {
|
||||
return subbyte_iterator<T>(ptr);
|
||||
}
|
||||
else {
|
||||
return ptr;
|
||||
}
|
||||
return cute::recast_ptr<T>(ptr);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
@ -286,26 +280,26 @@ bool initialize_tensor(
|
||||
scope_max = 2;
|
||||
scope_min = 0;
|
||||
}
|
||||
|
||||
|
||||
else if (bits_input <= 6) {
|
||||
scope_max = 2;
|
||||
scope_min = -2;
|
||||
}
|
||||
|
||||
|
||||
else if (bits_input <= 8) {
|
||||
|
||||
|
||||
if constexpr (
|
||||
cute::is_same_v<Element, cutlass::float_ue8m0_t>){
|
||||
scope_max = 4;
|
||||
scope_min = 1;
|
||||
}
|
||||
else {
|
||||
|
||||
|
||||
scope_max = 1;
|
||||
scope_min = -1;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
else{
|
||||
scope_max = 4;
|
||||
@ -354,11 +348,11 @@ static constexpr bool is_row_or_col_major(){
|
||||
// Default MMA input Operands : A , B
|
||||
//
|
||||
template<
|
||||
class ScheduleType_,
|
||||
class Gemm,
|
||||
class ScheduleType_,
|
||||
class Gemm,
|
||||
class ElementA_ = typename Gemm::GemmKernel::ElementA,
|
||||
class ElementB_ = typename Gemm::GemmKernel::ElementB,
|
||||
class Enable = void>
|
||||
class Enable = void>
|
||||
struct HostCollectiveMainloop {
|
||||
// Kernel data types
|
||||
using ElementA = ElementA_;
|
||||
@ -520,7 +514,7 @@ struct HostCollectiveMainloop {
|
||||
|
||||
Arguments to_args() {
|
||||
|
||||
|
||||
|
||||
// Runtime datatype selection
|
||||
if constexpr (not cute::is_same_v<ElementA, typename Gemm::GemmKernel::ElementA>) {
|
||||
using ArrayElementA = typename Gemm::GemmKernel::CollectiveMainloop::ArrayElementA;
|
||||
@ -531,13 +525,13 @@ struct HostCollectiveMainloop {
|
||||
};
|
||||
}
|
||||
else {
|
||||
|
||||
Arguments arguments =
|
||||
|
||||
Arguments arguments =
|
||||
{
|
||||
tensor_A.device_data(), stride_a, tensor_B.device_data(), stride_b
|
||||
};
|
||||
return arguments;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto to_host_args(ProblemShapeType problem_size) {
|
||||
@ -555,19 +549,19 @@ struct HostCollectiveMainloop {
|
||||
auto B = make_tensor(make_iterator(tensor_B.host_data()),
|
||||
make_layout(make_shape(N, K, L), stride_b));
|
||||
|
||||
|
||||
|
||||
auto dummy_SFA = cute::make_tensor(static_cast<ElementA*>(nullptr),
|
||||
cute::make_layout(cute::make_shape(M, K, L), stride_a));
|
||||
auto dummy_SFB = cute::make_tensor(static_cast<ElementB*>(nullptr),
|
||||
cute::make_layout(cute::make_shape(N, K, L), stride_b));
|
||||
|
||||
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
|
||||
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
decltype(B)
|
||||
|
||||
|
||||
, decltype(dummy_SFA),
|
||||
decltype(dummy_SFB)
|
||||
|
||||
|
||||
> mainloop_params{};
|
||||
|
||||
mainloop_params.A = A;
|
||||
@ -631,7 +625,7 @@ template<
|
||||
class ElementB_>
|
||||
struct HostCollectiveMainloopSparse
|
||||
{
|
||||
|
||||
|
||||
// Kernel data types
|
||||
using ElementA = ElementA_;
|
||||
// CuTe layout A for the kernel's sparse tensorA.
|
||||
@ -875,8 +869,8 @@ struct HostCollectiveMainloopSparse
|
||||
};
|
||||
|
||||
template<
|
||||
class ScheduleType_,
|
||||
class Gemm,
|
||||
class ScheduleType_,
|
||||
class Gemm,
|
||||
class ElementA_,
|
||||
class ElementB_
|
||||
>
|
||||
@ -1076,7 +1070,7 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelTmaWarpSpecializedBlockScaled
|
||||
|
||||
tensor_A.resize(a_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagA>::layout_factory(a_coord, stride_factor_A));
|
||||
tensor_B.resize(b_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagB>::layout_factory(b_coord, stride_factor_B));
|
||||
|
||||
|
||||
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2022));
|
||||
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2021));
|
||||
|
||||
@ -1098,7 +1092,7 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelTmaWarpSpecializedBlockScaled
|
||||
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
|
||||
auto sfa_coord = cutlass::make_Coord(m_blks * Blk_MN{} * L, k_blks * Blk_SF{});
|
||||
auto sfb_coord = cutlass::make_Coord(n_blks * Blk_MN{} * L, k_blks * Blk_SF{});
|
||||
|
||||
|
||||
tensor_SFA.resize(sfa_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagA>::layout_factory(sfa_coord, stride_factor_A));
|
||||
tensor_SFB.resize(sfb_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagB>::layout_factory(sfb_coord, stride_factor_B));
|
||||
|
||||
@ -1145,12 +1139,12 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelTmaWarpSpecializedBlockScaled
|
||||
make_layout(make_shape(N, K, L), stride_b));
|
||||
auto SfB = make_tensor(tensor_SFB.host_data(), layout_sfb);
|
||||
|
||||
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
decltype(B),
|
||||
decltype(SfA),
|
||||
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
decltype(B),
|
||||
decltype(SfA),
|
||||
decltype(SfB)
|
||||
>
|
||||
>
|
||||
mainloop_params{A, SfA, B, SfB};
|
||||
return mainloop_params;
|
||||
}
|
||||
@ -1184,7 +1178,7 @@ template<
|
||||
class ElementA_,
|
||||
class ElementB_
|
||||
>
|
||||
struct HostCollectiveMainloop<cutlass::gemm::KernelTmaWarpSpecializedPingpongBlockScaledSm120<SchedulerPipelineStageCount_>,
|
||||
struct HostCollectiveMainloop<cutlass::gemm::KernelTmaWarpSpecializedPingpongBlockScaledSm120<SchedulerPipelineStageCount_>,
|
||||
Gemm, ElementA_, ElementB_> : public
|
||||
HostCollectiveMainloop<cutlass::gemm::KernelTmaWarpSpecializedBlockScaledSm100<0,0>,
|
||||
Gemm, ElementA_, ElementB_> {
|
||||
@ -1454,7 +1448,7 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelSparseTmaWarpSpecializedBlock
|
||||
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
|
||||
auto sfa_coord = cutlass::make_Coord(m_blks * Blk_MN{} * L, k_blks * Blk_SF{});
|
||||
auto sfb_coord = cutlass::make_Coord(n_blks * Blk_MN{} * L, k_blks * Blk_SF{});
|
||||
|
||||
|
||||
tensor_SFA.resize(sfa_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagA>::layout_factory(sfa_coord, stride_factor_A));
|
||||
tensor_SFB.resize(sfb_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagB>::layout_factory(sfb_coord, stride_factor_B));
|
||||
|
||||
@ -1503,12 +1497,12 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelSparseTmaWarpSpecializedBlock
|
||||
auto SfB = make_tensor(tensor_SFB.host_data(), layout_sfb);
|
||||
|
||||
// return {A, SfA, B, SfB};
|
||||
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
decltype(B),
|
||||
decltype(SfA),
|
||||
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
decltype(B),
|
||||
decltype(SfA),
|
||||
decltype(SfB)
|
||||
>
|
||||
>
|
||||
mainloop_params{A, SfA, B, SfB};
|
||||
return mainloop_params;
|
||||
}
|
||||
@ -1577,7 +1571,7 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelTmaWarpSpecializedCooperative
|
||||
typename Base::LayoutTagA::Stride stride_factor_A_ = typename Base::LayoutTagA::Stride(),
|
||||
typename Base::LayoutTagB::Stride stride_factor_B_ = typename Base::LayoutTagB::Stride(),
|
||||
typename Base::LayoutTagE::Stride stride_factor_E_ = typename Base::LayoutTagE::Stride()
|
||||
) : Base::HostCollectiveMainloop(check_relative_equality_, init_A_, init_B_, seed_, stride_factor_A_,
|
||||
) : Base::HostCollectiveMainloop(check_relative_equality_, init_A_, init_B_, seed_, stride_factor_A_,
|
||||
stride_factor_B_,
|
||||
stride_factor_E_) {}
|
||||
};
|
||||
@ -1656,8 +1650,8 @@ struct HostCollectiveDefaultEpilogue {
|
||||
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
|
||||
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
|
||||
uint64_t seed_ = kDefaultSeed
|
||||
): init_C(init_C_), seed(seed_),
|
||||
stride_factor_C(typename LayoutTagC::Stride()),
|
||||
): init_C(init_C_), seed(seed_),
|
||||
stride_factor_C(typename LayoutTagC::Stride()),
|
||||
stride_factor_D(typename LayoutTagD::Stride()),
|
||||
check_relative_equality(check_relative_equality_),
|
||||
use_device_scalars(use_device_scalars_){ }
|
||||
@ -1766,7 +1760,7 @@ struct HostCollectiveDefaultEpilogue {
|
||||
|
||||
bool passed = equality_check(reference_D.host_view(), tensor_D.host_view());
|
||||
if(!passed) {
|
||||
std::cout<<"D is incorrect"<<std::endl;
|
||||
std::cout<<"D is incorrect"<<std::endl;
|
||||
}
|
||||
return passed;
|
||||
}
|
||||
@ -1779,7 +1773,7 @@ struct HostCollectiveDefaultEpilogue {
|
||||
}
|
||||
|
||||
Arguments to_args(ProblemShapeType problem_size) {
|
||||
Arguments arguments =
|
||||
Arguments arguments =
|
||||
{
|
||||
{alpha, beta},
|
||||
tensor_C.device_data(), stride_c, tensor_D.device_data(), stride_d
|
||||
@ -1869,7 +1863,7 @@ struct HostCollectiveEpilogue {
|
||||
typename Gemm::EpilogueOutputOp>;
|
||||
static_assert(cute::is_base_of_v<cutlass::epilogue::fusion::FusionOperation, FusionOp>);
|
||||
|
||||
|
||||
|
||||
// Scale factor Generation related
|
||||
using SfStrategy = cutlass::reference::host::SfStrategy;
|
||||
static constexpr bool IsBlockScaleSupported = FusionOp::IsBlockScaleSupported;
|
||||
@ -1880,11 +1874,11 @@ struct HostCollectiveEpilogue {
|
||||
using Sm1xxBlockScaledOutputConfig= cutlass::detail::Sm1xxBlockScaledOutputConfig<SFD_VectorSize,
|
||||
IsKMajorSFD ? cute::UMMA::Major::K : cute::UMMA::Major::MN>;
|
||||
using Blk_MN = typename Sm1xxBlockScaledOutputConfig::Blk_MN;
|
||||
using Blk_SF = typename Sm1xxBlockScaledOutputConfig::Blk_SF;
|
||||
using Blk_SF = typename Sm1xxBlockScaledOutputConfig::Blk_SF;
|
||||
using OutputSFAtom = typename Sm1xxBlockScaledOutputConfig::SfAtom;
|
||||
cutlass::HostTensor<ElementSFD, LayoutTagD> tensor_SFD;
|
||||
cutlass::HostTensor<ElementSFD, LayoutTagD> reference_SFD;
|
||||
|
||||
|
||||
using ElementCompute = typename FusionOp::ElementCompute;
|
||||
using ElementScalar = typename FusionOp::ElementScalar;
|
||||
using ElementBias = non_void_t<typename FusionOp::ElementBias>;
|
||||
@ -1968,9 +1962,9 @@ struct HostCollectiveEpilogue {
|
||||
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
|
||||
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
|
||||
uint64_t seed_ = kDefaultSeed
|
||||
): init_scale(init_scale_), init_bias(init_bias_),
|
||||
init_C(init_C_), seed(seed_),
|
||||
stride_factor_C(typename LayoutTagC::Stride()),
|
||||
): init_scale(init_scale_), init_bias(init_bias_),
|
||||
init_C(init_C_), seed(seed_),
|
||||
stride_factor_C(typename LayoutTagC::Stride()),
|
||||
stride_factor_D(typename LayoutTagD::Stride()),
|
||||
check_relative_equality(check_relative_equality_),
|
||||
use_device_scalars(use_device_scalars_){ }
|
||||
@ -2172,7 +2166,7 @@ struct HostCollectiveEpilogue {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
auto m_blks = cutlass::ceil_div(M, cute::size<0>(cute::shape(OutputSFAtom{})));
|
||||
auto n_blks = cutlass::ceil_div(N, cute::size<1>(cute::shape(OutputSFAtom{})));
|
||||
@ -2191,7 +2185,7 @@ struct HostCollectiveEpilogue {
|
||||
EXPECT_TRUE(initialize_tensor(norm_constant.host_view(), init_scale, seed + 2023));
|
||||
norm_constant.sync_device();
|
||||
}
|
||||
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -2258,7 +2252,7 @@ struct HostCollectiveEpilogue {
|
||||
}
|
||||
}
|
||||
#endif
|
||||
std::cout<<"D is incorrect"<<std::endl;
|
||||
std::cout<<"D is incorrect"<<std::endl;
|
||||
}
|
||||
|
||||
if constexpr (IsAbsMaxEnabledD) {
|
||||
@ -2279,24 +2273,24 @@ struct HostCollectiveEpilogue {
|
||||
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_Aux.host_view()), 0);
|
||||
passed &= equality_check(reference_Aux.host_view(), tensor_Aux.host_view());
|
||||
if(!passed) {
|
||||
std::cout<<"Aux is incorrect"<<std::endl;
|
||||
std::cout<<"Aux is incorrect"<<std::endl;
|
||||
}
|
||||
if constexpr (IsAbsMaxEnabledAux) {
|
||||
abs_max_Aux.sync_host();
|
||||
bool tmp = equality_check(reference_abs_max_Aux.host_view(), abs_max_Aux.host_view());
|
||||
if(!tmp) {
|
||||
std::cout<<"AbsMax of Aux is incorrect"<<std::endl;
|
||||
std::cout<<"AbsMax of Aux is incorrect"<<std::endl;
|
||||
}
|
||||
passed &= tmp;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
tensor_SFD.sync_host();
|
||||
bool passed_sf = equality_check(reference_SFD.host_view(), tensor_SFD.host_view());
|
||||
if(!passed_sf) {
|
||||
std::cout<<"SF is incorrect"<<std::endl;
|
||||
std::cout<<"SF is incorrect"<<std::endl;
|
||||
}
|
||||
passed &= passed_sf;
|
||||
}
|
||||
@ -2317,7 +2311,7 @@ struct HostCollectiveEpilogue {
|
||||
file << "\n\nvbeta = \n" << beta.host_view();
|
||||
} else {
|
||||
file
|
||||
<< "\n\nalpha= \n" << alpha.host_view()
|
||||
<< "\n\nalpha= \n" << alpha.host_view()
|
||||
<< "\n\nbeta= \n " << beta.host_view();
|
||||
}
|
||||
file << "\n\n";
|
||||
@ -2360,7 +2354,7 @@ struct HostCollectiveEpilogue {
|
||||
<< "\n\nReference Aux =\n" << reference_Aux.host_view()
|
||||
<< "\n\nComputed Aux =\n" << tensor_Aux.host_view();
|
||||
}
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
file
|
||||
<< "\n\nSFD Reference =\n" << reference_SFD.host_view()
|
||||
@ -2378,7 +2372,7 @@ struct HostCollectiveEpilogue {
|
||||
auto coord_0 = cutlass::make_Coord(0);
|
||||
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
|
||||
auto [M, N, K, L] = problem_shape_MNKL;
|
||||
Arguments arguments =
|
||||
Arguments arguments =
|
||||
{
|
||||
{},
|
||||
tensor_C.device_data(), stride_c, tensor_D.device_data(), stride_d
|
||||
@ -2484,7 +2478,7 @@ struct HostCollectiveEpilogue {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
arguments.thread.block_scale_factor_ptr = tensor_SFD.device_data();
|
||||
arguments.thread.norm_constant_ptr = norm_constant.device_data();
|
||||
@ -2550,7 +2544,7 @@ struct HostCollectiveEpilogue {
|
||||
cute::make_layout(cute::make_shape(M, N, L), make_stride(cute::_0{}, cute::_0{}, cute::_1{})));
|
||||
}
|
||||
}();
|
||||
|
||||
|
||||
auto SfD = [&](){
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
auto tensor = make_tensor(detail::make_iterator(reference_SFD.host_data()),
|
||||
@ -2574,11 +2568,11 @@ struct HostCollectiveEpilogue {
|
||||
decltype(Valpha),
|
||||
decltype(Vbeta),
|
||||
ActivationFunctor,
|
||||
decltype(SfD),
|
||||
Int<SFD_VectorSize>,
|
||||
decltype(SfD),
|
||||
Int<SFD_VectorSize>,
|
||||
cutlass::plus<ElementCompute>,
|
||||
IsColBiasEnabled
|
||||
, SfGenStrategy
|
||||
, SfGenStrategy
|
||||
> epilogue_params{};
|
||||
|
||||
epilogue_params.C = C;
|
||||
@ -2593,7 +2587,7 @@ struct HostCollectiveEpilogue {
|
||||
epilogue_params.scale_d = scale_D.at(coord_0);
|
||||
}
|
||||
|
||||
if constexpr (IsRowBiasEnabled or IsColBiasEnabled or IsDeBiasEnabled)
|
||||
if constexpr (IsRowBiasEnabled or IsColBiasEnabled or IsDeBiasEnabled)
|
||||
{
|
||||
epilogue_params.Bias = Bias;
|
||||
}
|
||||
@ -2628,7 +2622,7 @@ struct HostCollectiveEpilogue {
|
||||
epilogue_params.Vbeta = Vbeta;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
epilogue_params.SfD = SfD;
|
||||
epilogue_params.st = norm_constant.at(coord_0);
|
||||
@ -2643,19 +2637,19 @@ template <
|
||||
bool force_legacy_epilogue = false,
|
||||
typename ElementA = typename Gemm::GemmKernel::ElementA,
|
||||
typename ElementB = typename Gemm::GemmKernel::ElementB
|
||||
, typename RuntimeDatatypeA = void*
|
||||
, typename RuntimeDatatypeB = void*
|
||||
, typename RuntimeDatatypeA = void*
|
||||
, typename RuntimeDatatypeB = void*
|
||||
>
|
||||
struct TestbedImpl {
|
||||
// Kernel data types
|
||||
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
|
||||
// All Collective MMA operands are defined by HostCollectiveMainloopType based on the schedule type
|
||||
using HostCollectiveMainloopType = HostCollectiveMainloop<ScheduleType, Gemm, ElementA, ElementB>;
|
||||
|
||||
using CollectiveEpilogue = cute::conditional_t<IsDefaultEpilogue<typename Gemm::GemmKernel::CollectiveEpilogue>::value || force_legacy_epilogue,
|
||||
HostCollectiveDefaultEpilogue<Gemm>,
|
||||
|
||||
using CollectiveEpilogue = cute::conditional_t<IsDefaultEpilogue<typename Gemm::GemmKernel::CollectiveEpilogue>::value || force_legacy_epilogue,
|
||||
HostCollectiveDefaultEpilogue<Gemm>,
|
||||
HostCollectiveEpilogue<Gemm>>;
|
||||
|
||||
|
||||
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
|
||||
using ElementAccumulator = typename Gemm::GemmKernel::ElementAccumulator;
|
||||
using ElementCompute = typename ElementComputeType<Gemm, ElementAccumulator>::Type;
|
||||
@ -2666,7 +2660,7 @@ struct TestbedImpl {
|
||||
using LayoutTagC = typename CollectiveEpilogue::LayoutTagC;
|
||||
using LayoutTagD = typename CollectiveEpilogue::LayoutTagD;
|
||||
|
||||
|
||||
|
||||
using InternalElementA = typename Gemm::GemmKernel::ElementA;
|
||||
using InternalElementB = typename Gemm::GemmKernel::ElementB;
|
||||
static constexpr bool IsRuntimeDataTypeA = cutlass::gemm::collective::detail::is_sm10x_runtime_f8f6f4<InternalElementA>();
|
||||
@ -2674,11 +2668,11 @@ struct TestbedImpl {
|
||||
static constexpr bool IsRuntimeDataTypeB = cutlass::gemm::collective::detail::is_sm10x_runtime_f8f6f4<InternalElementB>();
|
||||
|
||||
static_assert((IsRuntimeDataTypeA && IsRuntimeDataTypeB) ||
|
||||
(!IsRuntimeDataTypeA && !IsRuntimeDataTypeB),
|
||||
(!IsRuntimeDataTypeA && !IsRuntimeDataTypeB),
|
||||
"ElementA and ElementB in a GEMM kernel should be both runtime or both static.");
|
||||
|
||||
static constexpr bool IsRuntimeDataType = IsRuntimeDataTypeA && IsRuntimeDataTypeB;
|
||||
|
||||
|
||||
|
||||
uint32_t sm_count;
|
||||
// Used to force multi-wave tests for persistent kernel schedules
|
||||
@ -2705,7 +2699,7 @@ struct TestbedImpl {
|
||||
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
|
||||
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
|
||||
uint64_t seed_ = kDefaultSeed
|
||||
): collective_mma_inputs(HostCollectiveMainloopType(check_relative_equality_, init_A_, init_B_, seed_)),
|
||||
): collective_mma_inputs(HostCollectiveMainloopType(check_relative_equality_, init_A_, init_B_, seed_)),
|
||||
collective_epilogue(CollectiveEpilogue(check_relative_equality_, use_device_scalars_, vector_scale_mode_, init_C_, init_scale_, init_bias_, seed_)) { }
|
||||
|
||||
TestbedImpl(
|
||||
@ -2759,7 +2753,7 @@ struct TestbedImpl {
|
||||
file
|
||||
<< "problem: " << ' ' << M << "x" << N << "x" << K << ", Batch count = " << L
|
||||
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
|
||||
|
||||
|
||||
collective_mma_inputs.print_tensors(file);
|
||||
collective_epilogue.print_tensors(file);
|
||||
}
|
||||
@ -2777,7 +2771,7 @@ struct TestbedImpl {
|
||||
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
|
||||
auto mainloop_params = collective_mma_inputs.to_host_args(problem_size);
|
||||
auto epilogue_params = collective_epilogue.to_host_args(problem_size);
|
||||
|
||||
|
||||
cutlass::reference::host::Gemm3x(mainloop_params, epilogue_params);
|
||||
|
||||
bool passed = compare_reference(problem_shape_MNKL, alpha, beta);
|
||||
@ -2865,12 +2859,12 @@ struct TestbedImpl {
|
||||
detail::MaxSwizzleSize max_swizzle = detail::MaxSwizzleSize{},
|
||||
detail::Splits splits = detail::Splits{},
|
||||
DecompositionMode decomposition_mode = DecompositionMode::Heuristic
|
||||
, RuntimeDatatypeA runtime_input_datatype_a = {}
|
||||
, RuntimeDatatypeB runtime_input_datatype_b = {}
|
||||
, RuntimeDatatypeA runtime_input_datatype_a = {}
|
||||
, RuntimeDatatypeB runtime_input_datatype_b = {}
|
||||
)
|
||||
{
|
||||
#if (CUTLASS_DEBUG_TRACE_LEVEL > 1)
|
||||
CUTLASS_TRACE_HOST("TestbedImpl::run");
|
||||
CUTLASS_TRACE_HOST("TestbedImpl::run");
|
||||
#endif
|
||||
|
||||
// Fail test if insufficient CUDA device
|
||||
@ -2933,12 +2927,12 @@ struct TestbedImpl {
|
||||
|
||||
mainloop_args = collective_mma_inputs.to_args();
|
||||
|
||||
|
||||
|
||||
if constexpr (IsRuntimeDataType) {
|
||||
mainloop_args.runtime_data_type_a = runtime_input_datatype_a;
|
||||
mainloop_args.runtime_data_type_b = runtime_input_datatype_b;
|
||||
}
|
||||
|
||||
|
||||
|
||||
arguments =
|
||||
{
|
||||
@ -3062,19 +3056,19 @@ template <
|
||||
bool force_legacy_epilogue = false,
|
||||
typename ElementA = typename Gemm::GemmKernel::ElementA,
|
||||
typename ElementB = typename Gemm::GemmKernel::ElementB
|
||||
, typename RuntimeDatatypeA = void*
|
||||
, typename RuntimeDatatypeB = void*
|
||||
, typename RuntimeDatatypeA = void*
|
||||
, typename RuntimeDatatypeB = void*
|
||||
>
|
||||
struct Testbed3x {
|
||||
|
||||
using TestBedImpl = typename detail::TestbedImpl<
|
||||
Gemm,
|
||||
ActivationFunctor,
|
||||
force_legacy_epilogue,
|
||||
ElementA,
|
||||
Gemm,
|
||||
ActivationFunctor,
|
||||
force_legacy_epilogue,
|
||||
ElementA,
|
||||
ElementB
|
||||
, RuntimeDatatypeA
|
||||
, RuntimeDatatypeB
|
||||
, RuntimeDatatypeA
|
||||
, RuntimeDatatypeB
|
||||
>;
|
||||
using Kernel = typename Gemm::GemmKernel;
|
||||
using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue;
|
||||
@ -3115,13 +3109,13 @@ struct Testbed3x {
|
||||
DecompositionMode decomposition_mode = DecompositionMode::Heuristic,
|
||||
bool profiling = false,
|
||||
detail::Iterations iterations = detail::Iterations{}
|
||||
, RuntimeDatatypeA runtime_input_datatype_a = {}
|
||||
, RuntimeDatatypeB runtime_input_datatype_b = {}
|
||||
, RuntimeDatatypeA runtime_input_datatype_a = {}
|
||||
, RuntimeDatatypeB runtime_input_datatype_b = {}
|
||||
)
|
||||
{
|
||||
return impl_.run(
|
||||
problem_size, alpha, beta, profiling, iterations, raster_order, max_swizzle, splits, decomposition_mode
|
||||
, runtime_input_datatype_a, runtime_input_datatype_b
|
||||
, runtime_input_datatype_a, runtime_input_datatype_b
|
||||
);
|
||||
}
|
||||
};
|
||||
@ -3176,7 +3170,7 @@ bool TestGemmPerf3x(int iterations = 20) {
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
template <
|
||||
typename Gemm,
|
||||
typename Gemm,
|
||||
typename RuntimeDataTypeA,
|
||||
typename RuntimeDataTypeB,
|
||||
bool force_legacy_epilogue = false>
|
||||
@ -3266,8 +3260,8 @@ bool TestRuntimeDataTypeSmall(
|
||||
problem_splits.push_back(detail::Splits{2});
|
||||
}
|
||||
for (auto splits : problem_splits) {
|
||||
|
||||
if constexpr (cute::is_same_v<RuntimeDataTypeA, cute::UMMA::MXF4Format> &&
|
||||
|
||||
if constexpr (cute::is_same_v<RuntimeDataTypeA, cute::UMMA::MXF4Format> &&
|
||||
cute::is_same_v<RuntimeDataTypeB, cute::UMMA::MXF4Format>) {
|
||||
// e2m1_e2m1
|
||||
if (runtime_input_datatype_a == cute::UMMA::MXF4Format::E2M1 &&
|
||||
@ -3300,16 +3294,16 @@ bool TestRuntimeDataTypeSmall(
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
else
|
||||
if constexpr (cute::is_same_v<RuntimeDataTypeA, cute::UMMA::MXF8F6F4Format> &&
|
||||
|
||||
else
|
||||
if constexpr (cute::is_same_v<RuntimeDataTypeA, cute::UMMA::MXF8F6F4Format> &&
|
||||
cute::is_same_v<RuntimeDataTypeB, cute::UMMA::MXF8F6F4Format>) {
|
||||
static_assert((cute::is_same_v<InternalElementA, cutlass::type_erased_dynamic_float8_t> ||
|
||||
cute::is_same_v<InternalElementA, cutlass::type_erased_dynamic_float6_t> ||
|
||||
cute::is_same_v<InternalElementA, cutlass::type_erased_dynamic_float4_t>) &&
|
||||
cute::is_same_v<InternalElementA, cutlass::type_erased_dynamic_float4_t>) &&
|
||||
(cute::is_same_v<InternalElementB, cutlass::type_erased_dynamic_float8_t> ||
|
||||
cute::is_same_v<InternalElementB, cutlass::type_erased_dynamic_float6_t> ||
|
||||
cute::is_same_v<InternalElementB, cutlass::type_erased_dynamic_float4_t>),
|
||||
cute::is_same_v<InternalElementB, cutlass::type_erased_dynamic_float4_t>),
|
||||
"Runtime datatype must be selected with an appropriate static umbrella data type.");
|
||||
if constexpr (cute::is_same_v<InternalElementA, cutlass::type_erased_dynamic_float8_t> &&
|
||||
cute::is_same_v<InternalElementB, cutlass::type_erased_dynamic_float4_t>) {
|
||||
@ -3483,7 +3477,7 @@ bool TestRuntimeDataTypeSmall(
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
else
|
||||
if constexpr (cute::is_same_v<InternalElementA, cutlass::type_erased_dynamic_float8_t> &&
|
||||
cute::is_same_v<InternalElementB, cutlass::type_erased_dynamic_float8_t>) {
|
||||
// e5m2_e5m2
|
||||
@ -3622,16 +3616,16 @@ bool TestRuntimeDataTypeSmall(
|
||||
|
||||
template <typename Gemm, bool force_legacy_epilogue = false, bool apply_alignment_offset = true, bool test_batched_alpha_beta = false>
|
||||
bool TestSmall(double alpha = 1.0, double beta = cute::is_same_v<typename Gemm::GemmKernel::ElementC, void> ? 0.0 : 1.0,
|
||||
CheckEquality check_relative_equality = CheckEquality::RELATIVE,
|
||||
ScalarLoc use_device_scalars = ScalarLoc::ON_DEVICE,
|
||||
VectorScale vector_scale_mode = VectorScale::ENABLED,
|
||||
CheckEquality check_relative_equality = CheckEquality::RELATIVE,
|
||||
ScalarLoc use_device_scalars = ScalarLoc::ON_DEVICE,
|
||||
VectorScale vector_scale_mode = VectorScale::ENABLED,
|
||||
std::vector<int> override_problem_size_k = {}) {
|
||||
|
||||
|
||||
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
|
||||
using ElementScalar = typename Gemm::EpilogueOutputOp::ElementScalar;
|
||||
using CtaShape_MNK = typename Gemm::GemmKernel::CollectiveMainloop::CtaShape_MNK;
|
||||
using DispatchPolicy = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy;
|
||||
CtaShape_MNK cta_shape;
|
||||
CtaShape_MNK cta_shape;
|
||||
Testbed3x<Gemm, cutlass::epilogue::thread::Identity, force_legacy_epilogue> testbed(check_relative_equality, use_device_scalars, vector_scale_mode);
|
||||
static constexpr int SmCount = 16;
|
||||
static constexpr int MultiplierOffsetM = 1;
|
||||
@ -3901,7 +3895,7 @@ bool TestAll(double alpha = 1.0, double beta = cute::is_same_v<typename Gemm::Ge
|
||||
}
|
||||
catch (std::exception const& e) {
|
||||
EXPECT_TRUE(false) << "TestAll: testbed.run {"
|
||||
<< "m: " << m << ", n: " << n << ", k: " << k
|
||||
<< "m: " << m << ", n: " << n << ", k: " << k
|
||||
<< ", alpha: " << alpha << ", beta: " << beta
|
||||
<< ", raster_order: ???"
|
||||
<< ", max_swizzle_size: " << static_cast<int>(max_swizzle_size)
|
||||
@ -3912,7 +3906,7 @@ bool TestAll(double alpha = 1.0, double beta = cute::is_same_v<typename Gemm::Ge
|
||||
}
|
||||
catch (...) {
|
||||
EXPECT_TRUE(false) << "TestAll: testbed.run {"
|
||||
<< "m: " << m << ", n: " << n << ", k: " << k
|
||||
<< "m: " << m << ", n: " << n << ", k: " << k
|
||||
<< ", alpha: " << alpha << ", beta: " << beta
|
||||
<< ", raster_order: ???"
|
||||
<< ", max_swizzle_size: " << static_cast<int>(max_swizzle_size)
|
||||
@ -3923,7 +3917,7 @@ bool TestAll(double alpha = 1.0, double beta = cute::is_same_v<typename Gemm::Ge
|
||||
}
|
||||
|
||||
EXPECT_TRUE(passed) << "TestAll: testbed.run {"
|
||||
<< "m: " << m << ", n: " << n << ", k: " << k
|
||||
<< "m: " << m << ", n: " << n << ", k: " << k
|
||||
<< ", alpha: " << alpha << ", beta: " << beta
|
||||
<< ", raster_order: ???"
|
||||
<< ", max_swizzle_size: " << static_cast<int>(max_swizzle_size)
|
||||
|
||||
@ -142,13 +142,7 @@ private:
|
||||
|
||||
template <typename T>
|
||||
auto make_iterator(T* ptr) {
|
||||
using namespace cute;
|
||||
if constexpr (cute::is_subbyte_v<T>) {
|
||||
return subbyte_iterator<T>(ptr);
|
||||
}
|
||||
else {
|
||||
return ptr;
|
||||
}
|
||||
return cute::recast_ptr<T>(ptr);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
@ -224,26 +218,26 @@ bool initialize_tensor(
|
||||
scope_max = 2;
|
||||
scope_min = 0;
|
||||
}
|
||||
|
||||
|
||||
else if (bits_input <= 6) {
|
||||
scope_max = 2;
|
||||
scope_min = -2;
|
||||
}
|
||||
|
||||
|
||||
else if (bits_input <= 8) {
|
||||
|
||||
|
||||
if constexpr (
|
||||
cute::is_same_v<Element, cutlass::float_ue8m0_t>){
|
||||
scope_max = 4;
|
||||
scope_min = 1;
|
||||
}
|
||||
else {
|
||||
|
||||
|
||||
scope_max = 1;
|
||||
scope_min = -1;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
else{
|
||||
scope_max = 4;
|
||||
@ -292,10 +286,10 @@ static constexpr bool is_row_or_col_major(){
|
||||
// Default MMA input Operands : A , B
|
||||
//
|
||||
template<
|
||||
class ScheduleType_,
|
||||
class Gemm,
|
||||
class ScheduleType_,
|
||||
class Gemm,
|
||||
class ElementA_ = typename Gemm::GemmKernel::ElementA,
|
||||
class ElementB_ = typename Gemm::GemmKernel::ElementB>
|
||||
class ElementB_ = typename Gemm::GemmKernel::ElementB>
|
||||
struct HostCollectiveMainloop {
|
||||
// Kernel data types
|
||||
using ElementA = ElementA_;
|
||||
@ -432,13 +426,13 @@ struct HostCollectiveMainloop {
|
||||
|
||||
if constexpr (IsGroupGemm) {
|
||||
arguments
|
||||
=
|
||||
=
|
||||
{
|
||||
device_tensors_A.get(), stride_a_device.get(), device_tensors_B.get(), stride_b_device.get()
|
||||
};
|
||||
}
|
||||
}
|
||||
else {
|
||||
arguments =
|
||||
arguments =
|
||||
{
|
||||
device_tensors_A.get(), stride_a_host[0], device_tensors_B.get(), stride_b_host[0]
|
||||
};
|
||||
@ -458,8 +452,8 @@ struct HostCollectiveMainloop {
|
||||
auto B = make_tensor(make_iterator(tensors_B[batch].host_data()),
|
||||
make_layout(make_shape(N, K, 1), stride_b_host[batch]));
|
||||
|
||||
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
decltype(B)
|
||||
> mainloop_params{};
|
||||
|
||||
@ -542,7 +536,7 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlo
|
||||
using LayoutTagB = cutlass::detail::StrideToLayoutTagB_t<StrideB>;
|
||||
|
||||
static constexpr bool IsGroupGemm = !cute::is_same_v<StrideA, InternalStrideA>;
|
||||
|
||||
|
||||
using ElementAccumulator = typename Gemm::GemmKernel::ElementAccumulator;
|
||||
using ElementScalingFactor = ElementAccumulator;
|
||||
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
|
||||
@ -627,7 +621,7 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlo
|
||||
tensors_SFB.clear();
|
||||
layout_sfa_host.clear();
|
||||
layout_sfb_host.clear();
|
||||
|
||||
|
||||
auto [M, N, K, L] = cute::append<4>(problem_shapes.get_host_problem_shape(0), 1);
|
||||
L = std::max(problem_shapes.groups(), L);
|
||||
|
||||
@ -636,7 +630,7 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlo
|
||||
|
||||
stride_a_host.push_back(cutlass::make_cute_packed_stride(InternalStrideA{}, {M, K, 1}));
|
||||
stride_b_host.push_back(cutlass::make_cute_packed_stride(InternalStrideB{}, {N, K, 1}));
|
||||
|
||||
|
||||
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
|
||||
auto a_coord = cutlass::make_Coord(M, K);
|
||||
// Cutlass has Row/Col major refers to MxK times KxN matrix product,
|
||||
@ -658,13 +652,13 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlo
|
||||
tensors_B[i].sync_device();
|
||||
|
||||
using namespace cute;
|
||||
|
||||
|
||||
auto k_blks = cutlass::ceil_div(K, size<1>(shape(SfAtom{})));
|
||||
auto m_blks = cutlass::ceil_div(M, Blk_MN{});
|
||||
auto n_blks = cutlass::ceil_div(N, Blk_MN{});
|
||||
layout_sfa_host.push_back(Sm1xxBlkScaledConfig::tile_atom_to_shape_SFA(cute::make_shape(M, N, K, 1)));
|
||||
layout_sfb_host.push_back(Sm1xxBlkScaledConfig::tile_atom_to_shape_SFB(cute::make_shape(M, N, K, 1)));
|
||||
|
||||
|
||||
// 2.x host tensor does not natively contain a batch stride or coord, so we spoof if by folding it into the outer mode
|
||||
auto sfa_coord = cutlass::make_Coord(m_blks * Blk_MN{}, k_blks * Blk_SF{});
|
||||
auto sfb_coord = cutlass::make_Coord(n_blks * Blk_MN{}, k_blks * Blk_SF{});
|
||||
@ -717,13 +711,13 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlo
|
||||
|
||||
stride_a_device.reset(problem_shapes.groups());
|
||||
stride_a_device.copy_from_host(stride_a_host.data());
|
||||
|
||||
|
||||
stride_b_device.reset(problem_shapes.groups());
|
||||
stride_b_device.copy_from_host(stride_b_host.data());
|
||||
|
||||
layout_sfa_device.reset(problem_shapes.groups());
|
||||
layout_sfa_device.copy_from_host(layout_sfa_host.data());
|
||||
|
||||
|
||||
layout_sfb_device.reset(problem_shapes.groups());
|
||||
layout_sfb_device.copy_from_host(layout_sfb_host.data());
|
||||
|
||||
@ -732,15 +726,15 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlo
|
||||
device_tensors_A.get(), stride_a_device.get(),
|
||||
device_tensors_B.get(), stride_b_device.get(),
|
||||
device_tensors_SFA.get(), layout_sfa_device.get(),
|
||||
device_tensors_SFB.get(), layout_sfb_device.get()
|
||||
device_tensors_SFB.get(), layout_sfb_device.get()
|
||||
};
|
||||
}
|
||||
}
|
||||
else {
|
||||
return Arguments{
|
||||
device_tensors_A.get(), stride_a_host[0],
|
||||
device_tensors_B.get(), stride_b_host[0],
|
||||
device_tensors_SFA.get(), layout_sfa_host[0],
|
||||
device_tensors_SFB.get(), layout_sfb_host[0]
|
||||
device_tensors_SFB.get(), layout_sfb_host[0]
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -759,12 +753,12 @@ struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlo
|
||||
make_layout(make_shape(N, K, 1), stride_b_host[batch]));
|
||||
auto SfB = make_tensor(tensors_SFB[batch].host_data(), layout_sfb_host[batch]);
|
||||
|
||||
return cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
decltype(B),
|
||||
decltype(SfA),
|
||||
return cutlass::reference::host::GettMainloopParams<ElementAccumulator,
|
||||
decltype(A),
|
||||
decltype(B),
|
||||
decltype(SfA),
|
||||
decltype(SfB)
|
||||
>
|
||||
>
|
||||
{A, SfA, B, SfB};
|
||||
}
|
||||
|
||||
@ -795,7 +789,7 @@ template<
|
||||
class ElementA_,
|
||||
class ElementB_
|
||||
>
|
||||
struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedPingpongBlockScaledSm120<SchedulerPipelineStageCount_>,
|
||||
struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedPingpongBlockScaledSm120<SchedulerPipelineStageCount_>,
|
||||
Gemm, ElementA_, ElementB_> : public
|
||||
HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlockScaledSm100<0,0>,
|
||||
Gemm, ElementA_, ElementB_> {
|
||||
@ -820,7 +814,7 @@ template<
|
||||
class ElementA_,
|
||||
class ElementB_
|
||||
>
|
||||
struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedCooperativeBlockScaledSm120<SchedulerPipelineStageCount_>,
|
||||
struct HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedCooperativeBlockScaledSm120<SchedulerPipelineStageCount_>,
|
||||
Gemm, ElementA_, ElementB_> : public
|
||||
HostCollectiveMainloop<cutlass::gemm::KernelPtrArrayTmaWarpSpecializedBlockScaledSm100<0,0>,
|
||||
Gemm, ElementA_, ElementB_> {
|
||||
@ -854,7 +848,7 @@ struct HostCollectiveDefaultEpilogue {
|
||||
using ElementC = non_void_t<typename kernel::ElementC, ElementD>;
|
||||
using StrideC = typename kernel::StrideC;
|
||||
using InternalStrideC = typename kernel::InternalStrideC;
|
||||
|
||||
|
||||
static constexpr bool IsGroupGemm = !cute::is_same_v<StrideD, InternalStrideD>;
|
||||
|
||||
using FusionOp = typename Gemm::EpilogueOutputOp;
|
||||
@ -884,7 +878,7 @@ struct HostCollectiveDefaultEpilogue {
|
||||
/// Initialization
|
||||
cutlass::DeviceAllocation<InternalStrideC> stride_c_device;
|
||||
cutlass::DeviceAllocation<InternalStrideD> stride_d_device;
|
||||
|
||||
|
||||
std::vector<InternalStrideC> stride_c_host;
|
||||
std::vector<InternalStrideD> stride_d_host;
|
||||
|
||||
@ -920,15 +914,15 @@ struct HostCollectiveDefaultEpilogue {
|
||||
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
|
||||
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
|
||||
uint64_t seed_ = kDefaultSeed
|
||||
): init_C(init_C_), seed(seed_),
|
||||
stride_factor_C(typename LayoutTagC::Stride()),
|
||||
): init_C(init_C_), seed(seed_),
|
||||
stride_factor_C(typename LayoutTagC::Stride()),
|
||||
stride_factor_D(typename LayoutTagD::Stride()),
|
||||
check_relative_equality(check_relative_equality_),
|
||||
use_device_scalars(use_device_scalars_){ }
|
||||
|
||||
bool initialize(ProblemShapeType problem_shapes, ElementScalar alpha_=1.f, ElementScalar beta_=0.f) {
|
||||
// Initialize Epilogue tensors
|
||||
|
||||
|
||||
tensors_C.clear();
|
||||
tensors_D.clear();
|
||||
references_D.clear();
|
||||
@ -991,7 +985,7 @@ struct HostCollectiveDefaultEpilogue {
|
||||
return cutlass::reference::host::TensorEquals(lhs, rhs);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool compare_reference(
|
||||
ProblemShapeType problem_shapes,
|
||||
ElementScalar alpha,
|
||||
@ -1013,7 +1007,7 @@ struct HostCollectiveDefaultEpilogue {
|
||||
|
||||
bool passed = equality_check(references_D[batch].host_view(), tensors_D[batch].host_view());
|
||||
if(!passed) {
|
||||
std::cout<<"D is incorrect"<<std::endl;
|
||||
std::cout<<"D is incorrect"<<std::endl;
|
||||
}
|
||||
return passed;
|
||||
}
|
||||
@ -1051,14 +1045,14 @@ struct HostCollectiveDefaultEpilogue {
|
||||
|
||||
Arguments arguments;
|
||||
if constexpr (IsGroupGemm) {
|
||||
arguments =
|
||||
arguments =
|
||||
{
|
||||
{alpha, beta},
|
||||
device_tensors_C.get(), stride_c_device.get(), device_tensors_D.get(), stride_d_device.get()
|
||||
};
|
||||
}
|
||||
else {
|
||||
arguments =
|
||||
arguments =
|
||||
{
|
||||
{alpha, beta},
|
||||
device_tensors_C.get(), stride_c_host[0], device_tensors_D.get(), stride_d_host[0]
|
||||
@ -1153,7 +1147,7 @@ struct HostCollectiveEpilogue {
|
||||
using FusionOp = typename Gemm::EpilogueOutputOp;
|
||||
static_assert(cute::is_base_of_v<cutlass::epilogue::fusion::FusionOperation, FusionOp>);
|
||||
|
||||
|
||||
|
||||
// Scale factor Generation related
|
||||
using SfStrategy = cutlass::reference::host::SfStrategy;
|
||||
static constexpr bool IsBlockScaleSupported = FusionOp::IsBlockScaleSupported;
|
||||
@ -1164,7 +1158,7 @@ struct HostCollectiveEpilogue {
|
||||
SFD_VectorSize
|
||||
>;
|
||||
using Blk_MN = typename Sm1xxBlockScaledOutputConfig::Blk_MN;
|
||||
using Blk_SF = typename Sm1xxBlockScaledOutputConfig::Blk_SF;
|
||||
using Blk_SF = typename Sm1xxBlockScaledOutputConfig::Blk_SF;
|
||||
using OutputSFAtom = typename Sm1xxBlockScaledOutputConfig::SfAtom;
|
||||
std::vector<cutlass::HostTensor<ElementSFD, LayoutTagD>> tensors_SFD;
|
||||
std::vector<cutlass::HostTensor<ElementSFD, LayoutTagD>> references_SFD;
|
||||
@ -1197,7 +1191,7 @@ struct HostCollectiveEpilogue {
|
||||
/// Initialization
|
||||
cutlass::DeviceAllocation<InternalStrideC> stride_c_device;
|
||||
cutlass::DeviceAllocation<InternalStrideD> stride_d_device;
|
||||
|
||||
|
||||
std::vector<InternalStrideC> stride_c_host;
|
||||
std::vector<InternalStrideD> stride_d_host;
|
||||
|
||||
@ -1216,7 +1210,7 @@ struct HostCollectiveEpilogue {
|
||||
std::vector<cutlass::HostTensor<ElementC, LayoutTagC>> tensors_C;
|
||||
cutlass::DeviceAllocation<const ElementC *> device_tensors_C;
|
||||
cutlass::HostTensor<ElementCompute, LayoutTagScalar> norm_constant;
|
||||
|
||||
|
||||
// Outputs
|
||||
cutlass::HostTensor<ElementAmax, LayoutTagScalar> abs_max_Aux;
|
||||
cutlass::HostTensor<ElementAmax, LayoutTagScalar> abs_max_D;
|
||||
@ -1256,25 +1250,25 @@ struct HostCollectiveEpilogue {
|
||||
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
|
||||
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
|
||||
uint64_t seed_ = kDefaultSeed
|
||||
): init_scale(init_scale_), init_bias(init_bias_),
|
||||
init_C(init_C_), seed(seed_),
|
||||
stride_factor_C(typename LayoutTagC::Stride()),
|
||||
): init_scale(init_scale_), init_bias(init_bias_),
|
||||
init_C(init_C_), seed(seed_),
|
||||
stride_factor_C(typename LayoutTagC::Stride()),
|
||||
stride_factor_D(typename LayoutTagD::Stride()),
|
||||
check_relative_equality(check_relative_equality_),
|
||||
use_device_scalars(use_device_scalars_){ }
|
||||
|
||||
bool initialize(ProblemShapeType problem_shapes, ElementScalar alpha_=1.f, ElementScalar beta_=0.f) {
|
||||
// Initialize Epilogue tensors
|
||||
|
||||
|
||||
tensors_C.clear();
|
||||
tensors_D.clear();
|
||||
references_D.clear();
|
||||
stride_c_host.clear();
|
||||
stride_d_host.clear();
|
||||
|
||||
|
||||
tensors_SFD.clear();
|
||||
references_SFD.clear();
|
||||
|
||||
|
||||
|
||||
auto [M, N, K, L] = cute::append<4>(problem_shapes.get_host_problem_shape(0), 1);
|
||||
L = std::max(problem_shapes.groups(), L);
|
||||
@ -1406,7 +1400,7 @@ struct HostCollectiveEpilogue {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
for (int32_t i = 0; i < L; ++i) {
|
||||
auto [M, N, K, _] = cute::append<4>(problem_shapes.get_host_problem_shape(i), 1);
|
||||
@ -1424,7 +1418,7 @@ struct HostCollectiveEpilogue {
|
||||
EXPECT_TRUE(initialize_tensor(norm_constant.host_view(), init_scale, seed + 2023));
|
||||
norm_constant.sync_device();
|
||||
}
|
||||
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1457,7 +1451,7 @@ struct HostCollectiveEpilogue {
|
||||
return cutlass::reference::host::TensorEquals(lhs, rhs);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool compare_reference(
|
||||
ProblemShapeType problem_shapes,
|
||||
ElementScalar alpha,
|
||||
@ -1476,7 +1470,7 @@ struct HostCollectiveEpilogue {
|
||||
|
||||
bool passed = equality_check(references_D[batch].host_view(), tensors_D[batch].host_view());
|
||||
if(!passed) {
|
||||
std::cout<<"D is incorrect"<<std::endl;
|
||||
std::cout<<"D is incorrect"<<std::endl;
|
||||
}
|
||||
|
||||
if constexpr (IsAbsMaxEnabledD) {
|
||||
@ -1497,27 +1491,27 @@ struct HostCollectiveEpilogue {
|
||||
EXPECT_GT(cutlass::reference::host::TensorNorm(references_Aux[batch].host_view()), 0);
|
||||
passed &= equality_check(references_Aux[batch].host_view(), tensors_Aux[batch].host_view());
|
||||
if(!passed) {
|
||||
std::cout<<"Aux is incorrect"<<std::endl;
|
||||
std::cout<<"Aux is incorrect"<<std::endl;
|
||||
}
|
||||
if constexpr (IsAbsMaxEnabledAux) {
|
||||
abs_max_Aux.sync_host();
|
||||
bool tmp = equality_check(reference_abs_max_Aux.host_view(), abs_max_Aux.host_view());
|
||||
if(!tmp) {
|
||||
std::cout<<"AbsMax of Aux is incorrect"<<std::endl;
|
||||
std::cout<<"AbsMax of Aux is incorrect"<<std::endl;
|
||||
}
|
||||
passed &= tmp;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
tensors_SFD[batch].sync_host();
|
||||
bool passed_sf = equality_check(references_SFD[batch].host_view(), tensors_SFD[batch].host_view());
|
||||
if(!passed_sf) {
|
||||
std::cout<<"SF is incorrect"<<std::endl;
|
||||
std::cout<<"SF is incorrect"<<std::endl;
|
||||
}
|
||||
passed &= passed_sf;
|
||||
}
|
||||
|
||||
|
||||
|
||||
return passed;
|
||||
}
|
||||
@ -1533,7 +1527,7 @@ struct HostCollectiveEpilogue {
|
||||
if constexpr (IsPerRowScaleEnabled) {
|
||||
file << "\n\nvalpha = \n" << alpha.host_view();
|
||||
file << "\n\nvbeta = \n" << beta.host_view();
|
||||
}
|
||||
}
|
||||
else {
|
||||
file
|
||||
<< ", alpha: " << alpha.at(coord_0) << ", beta: " << beta.at(coord_0);
|
||||
@ -1631,14 +1625,14 @@ struct HostCollectiveEpilogue {
|
||||
|
||||
Arguments arguments;
|
||||
if constexpr (IsGroupGemm) {
|
||||
arguments =
|
||||
arguments =
|
||||
{
|
||||
{},
|
||||
device_tensors_C_ptr, stride_c_device.get(), device_tensors_D.get(), stride_d_device.get()
|
||||
};
|
||||
}
|
||||
else {
|
||||
arguments =
|
||||
arguments =
|
||||
{
|
||||
{},
|
||||
device_tensors_C_ptr, stride_c_host[0], device_tensors_D.get(), stride_d_host[0]
|
||||
@ -1661,10 +1655,10 @@ struct HostCollectiveEpilogue {
|
||||
fusion_args.beta = beta.at(coord_0);
|
||||
|
||||
fusion_args.alpha_ptr = alpha.device_data();
|
||||
// can_implement requires beta_ptr to not be set if its voidC
|
||||
// can_implement requires beta_ptr to not be set if its voidC
|
||||
fusion_args.beta_ptr = cute::is_void_v<typename kernel::ElementC> ? nullptr :
|
||||
beta.device_data();
|
||||
|
||||
|
||||
if constexpr (IsScaleFactorEnabled) {
|
||||
fusion_args.scale_a = scale_A.at(coord_0);
|
||||
fusion_args.scale_b = scale_B.at(coord_0);
|
||||
@ -1717,7 +1711,7 @@ struct HostCollectiveEpilogue {
|
||||
fusion_args.amax_aux_ptr = abs_max_Aux.device_data();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
std::vector<ElementSFD *> ptr_SFD_host(L);
|
||||
for (int32_t i = 0; i < L; ++i) {
|
||||
@ -1729,7 +1723,7 @@ struct HostCollectiveEpilogue {
|
||||
arguments.thread.block_scale_factor_ptr = device_tensors_SFD.get();
|
||||
arguments.thread.norm_constant_ptr = norm_constant.device_data();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
return arguments;
|
||||
@ -1763,7 +1757,7 @@ struct HostCollectiveEpilogue {
|
||||
cute::make_layout(cute::make_shape(M, N, cute::_1{}), cute::make_stride(cute::_1{}, cute::_0{}, M)));
|
||||
auto Vbeta = cute::make_tensor(detail::make_iterator(beta.host_data()),
|
||||
cute::make_layout(cute::make_shape(M, N, cute::_1{}), cute::make_stride(cute::_1{}, cute::_0{}, N)));
|
||||
|
||||
|
||||
auto SfD = [&](){
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
auto tensor = make_tensor(detail::make_iterator(references_SFD[batch].host_data()),
|
||||
@ -1775,7 +1769,7 @@ struct HostCollectiveEpilogue {
|
||||
return D;
|
||||
}
|
||||
}();
|
||||
|
||||
|
||||
|
||||
cutlass::reference::host::GettEpilogueParams<
|
||||
ElementScalar,
|
||||
@ -1789,11 +1783,11 @@ struct HostCollectiveEpilogue {
|
||||
decltype(Valpha),
|
||||
decltype(Vbeta),
|
||||
ActivationFunctor
|
||||
, decltype(SfD)
|
||||
, Int<SFD_VectorSize>
|
||||
, decltype(SfD)
|
||||
, Int<SFD_VectorSize>
|
||||
, cutlass::plus<ElementCompute>
|
||||
, false
|
||||
, SfGenStrategy
|
||||
, SfGenStrategy
|
||||
> epilogue_params{};
|
||||
|
||||
epilogue_params.C = C;
|
||||
@ -1836,12 +1830,12 @@ struct HostCollectiveEpilogue {
|
||||
epilogue_params.Vbeta = Vbeta;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if constexpr (IsBlockScaleSupported) {
|
||||
epilogue_params.SfD = SfD;
|
||||
epilogue_params.st = norm_constant.at(coord_0);
|
||||
}
|
||||
|
||||
|
||||
return epilogue_params;
|
||||
}
|
||||
};
|
||||
@ -1858,8 +1852,8 @@ struct TestbedImpl {
|
||||
using ScheduleType = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy::Schedule;
|
||||
// All Collective MMA operands are defined by HostCollectiveMainloopType based on the schedule type
|
||||
using HostCollectiveMainloopType = HostCollectiveMainloop<ScheduleType, Gemm, ElementA, ElementB>;
|
||||
using CollectiveEpilogue = cute::conditional_t<IsDefaultEpilogue<typename Gemm::GemmKernel::CollectiveEpilogue>::value || force_legacy_epilogue,
|
||||
HostCollectiveDefaultEpilogue<Gemm>,
|
||||
using CollectiveEpilogue = cute::conditional_t<IsDefaultEpilogue<typename Gemm::GemmKernel::CollectiveEpilogue>::value || force_legacy_epilogue,
|
||||
HostCollectiveDefaultEpilogue<Gemm>,
|
||||
HostCollectiveEpilogue<Gemm>>;
|
||||
|
||||
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
|
||||
@ -1899,7 +1893,7 @@ struct TestbedImpl {
|
||||
cutlass::Distribution::Kind init_scale_ = cutlass::Distribution::Uniform,
|
||||
cutlass::Distribution::Kind init_bias_ = cutlass::Distribution::Uniform,
|
||||
uint64_t seed_ = kDefaultSeed
|
||||
): collective_mma_inputs(HostCollectiveMainloopType(check_relative_equality_, init_A_, init_B_, seed_)),
|
||||
): collective_mma_inputs(HostCollectiveMainloopType(check_relative_equality_, init_A_, init_B_, seed_)),
|
||||
collective_epilogue(CollectiveEpilogue(check_relative_equality_, use_device_scalars_, vector_scale_mode_, init_C_, init_scale_, init_bias_, seed_)) { }
|
||||
|
||||
TestbedImpl(
|
||||
@ -2127,10 +2121,10 @@ template <
|
||||
struct Testbed3x {
|
||||
|
||||
using TestBedImpl = typename detail::TestbedImpl<
|
||||
Gemm,
|
||||
ActivationFunctor,
|
||||
force_legacy_epilogue,
|
||||
ElementA,
|
||||
Gemm,
|
||||
ActivationFunctor,
|
||||
force_legacy_epilogue,
|
||||
ElementA,
|
||||
ElementB
|
||||
>;
|
||||
using Kernel = typename Gemm::GemmKernel;
|
||||
@ -2220,7 +2214,7 @@ bool TestAll(double alpha = 1.0, double beta = 0.0, CheckEquality check_relative
|
||||
cutlass::from_real<ElementScalar>(alpha),
|
||||
cutlass::from_real<ElementScalar>(beta)
|
||||
);
|
||||
}
|
||||
}
|
||||
else {
|
||||
ProblemShapeType problem_size{{m, n, k, batch}};
|
||||
|
||||
@ -2247,9 +2241,9 @@ bool TestAll(double alpha = 1.0, double beta = 0.0, CheckEquality check_relative
|
||||
|
||||
template <typename Gemm, bool force_legacy_epilogue = false, bool apply_alignment_offset = false>
|
||||
bool TestSmall(double alpha = 1.0, double beta = 1.0,
|
||||
CheckEquality check_relative_equality = CheckEquality::RELATIVE,
|
||||
ScalarLoc use_device_scalars = ScalarLoc::ON_DEVICE,
|
||||
VectorScale vector_scale_mode = VectorScale::ENABLED,
|
||||
CheckEquality check_relative_equality = CheckEquality::RELATIVE,
|
||||
ScalarLoc use_device_scalars = ScalarLoc::ON_DEVICE,
|
||||
VectorScale vector_scale_mode = VectorScale::ENABLED,
|
||||
std::vector<int> override_problem_size_k = {}) {
|
||||
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
|
||||
using ElementScalar = typename Gemm::EpilogueOutputOp::ElementScalar;
|
||||
@ -2257,13 +2251,13 @@ bool TestSmall(double alpha = 1.0, double beta = 1.0,
|
||||
using ElementB = typename Gemm::GemmKernel::ElementB;
|
||||
using TiledMma = typename Gemm::GemmKernel::TiledMma;
|
||||
int alignment_bits = 128;
|
||||
|
||||
|
||||
static constexpr bool IsF8F6F4 = cutlass::gemm::collective::detail::is_sm100_mma_f8f6f4<TiledMma, ElementA, ElementB>();
|
||||
alignment_bits = cutlass::detail::get_input_alignment_bits<ElementA, IsF8F6F4>();
|
||||
// For fp4 and fp6 kernels, the min alignment_input is 128 elements, so we don't need to add alignment_input in test problem sizes.
|
||||
int alignment_input = (alignment_bits / cute::sizeof_bits<ElementA>::value == 128) ? 0 : (alignment_bits / cute::sizeof_bits<ElementA>::value);
|
||||
|
||||
|
||||
|
||||
if constexpr (apply_alignment_offset) {
|
||||
// If BlockScaled, then min alignment is SFVecSize
|
||||
static constexpr bool IsBlockScaleSupported = Gemm::EpilogueOutputOp::IsBlockScaleSupported;
|
||||
@ -2272,13 +2266,13 @@ bool TestSmall(double alpha = 1.0, double beta = 1.0,
|
||||
alignment_input = cutlass::round_up(alignment_input, SFVecSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
using CtaShape_MNK = typename Gemm::GemmKernel::CollectiveMainloop::CtaShape_MNK;
|
||||
using DispatchPolicy = typename Gemm::GemmKernel::CollectiveMainloop::DispatchPolicy;
|
||||
CtaShape_MNK cta_shape;
|
||||
Testbed3x<Gemm, cutlass::epilogue::thread::Identity, force_legacy_epilogue> testbed(check_relative_equality, use_device_scalars, vector_scale_mode);
|
||||
// For Ptr-Array and Grouped GEMM ideally we need to know SM count at runtime
|
||||
// For Ptr-Array and Grouped GEMM ideally we need to know SM count at runtime
|
||||
static constexpr int SmCount = 16;
|
||||
|
||||
float waves[] = {0.5, 2.5};
|
||||
|
||||
321
test/unit/gemm/device/sm100_gemm_f32_f32_f32_simt_align1.cu
Normal file
321
test/unit/gemm/device/sm100_gemm_f32_f32_f32_simt_align1.cu
Normal file
@ -0,0 +1,321 @@
|
||||
/***************************************************************************************************
|
||||
* Copyright (c) 2024 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* 3. Neither the name of the copyright holder nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
**************************************************************************************************/
|
||||
|
||||
/*! \file
|
||||
\brief Tests for device-wide GEMM interface (SGEMM)
|
||||
*/
|
||||
|
||||
#include "cutlass/cutlass.h"
|
||||
|
||||
#include "cutlass/numeric_types.h"
|
||||
#include "cutlass/arch/mma_sm100.h"
|
||||
|
||||
#include "cutlass/gemm/device/gemm_universal_adapter.h"
|
||||
#include "cutlass/gemm/kernel/gemm_universal.hpp"
|
||||
#include "cutlass/gemm/collective/collective_builder.hpp"
|
||||
|
||||
#include "cutlass/epilogue/dispatch_policy.hpp"
|
||||
#include "cutlass/epilogue/collective/collective_builder.hpp"
|
||||
|
||||
#include "cute/tensor.hpp"
|
||||
#include "cute/atom/mma_atom.hpp"
|
||||
|
||||
#include "../../common/cutlass_unit_test.h"
|
||||
|
||||
#include "gemm_testbed_3x.hpp"
|
||||
|
||||
#if defined(CUTLASS_ARCH_MMA_SM100_SUPPORTED)
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// CTA tile shape: 128x128x16
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32n_f32t_f32n_simt_f32_align1, 128x128x16) {
|
||||
// NT layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::ColumnMajor;
|
||||
using LayoutB = cutlass::layout::RowMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using TileShape = Shape<_128, _128, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
cutlass::gemm::KernelMultistage
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
cutlass::epilogue::EpilogueSimtVectorized
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int,int,int,int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmall<Gemm, true>();
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32t_f32n_f32n_simt_f32_align1, 128x128x16) {
|
||||
// TN layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::RowMajor;
|
||||
using LayoutB = cutlass::layout::ColumnMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using TileShape = Shape<_128, _128, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
cutlass::gemm::KernelMultistage
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
cutlass::epilogue::EpilogueSimtVectorized
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int,int,int,int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmall<Gemm, true>();
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// CTA tile shape: 64x32x16
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32n_f32n_f32n_simt_f32_align1, 64x32x16) {
|
||||
// NN layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::ColumnMajor;
|
||||
using LayoutB = cutlass::layout::ColumnMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using TileShape = Shape<_64, _32, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
cutlass::gemm::KernelMultistage
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
cutlass::epilogue::EpilogueSimtVectorized
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int,int,int,int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmall<Gemm, true>();
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32t_f32t_f32n_simt_f32_align1, 64x32x16) {
|
||||
// TT layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::RowMajor;
|
||||
using LayoutB = cutlass::layout::RowMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using TileShape = Shape<_64, _32, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
cutlass::gemm::KernelMultistage
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
cutlass::epilogue::EpilogueSimtVectorized
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int,int,int,int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmall<Gemm, true>();
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
#endif // defined(CUTLASS_ARCH_MMA_SM100_SUPPORTED)
|
||||
@ -0,0 +1,337 @@
|
||||
/***************************************************************************************************
|
||||
* Copyright (c) 2024 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* 3. Neither the name of the copyright holder nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
**************************************************************************************************/
|
||||
|
||||
/*! \file
|
||||
\brief Tests for device-wide GEMM interface (SGEMM)
|
||||
*/
|
||||
|
||||
#include "cutlass/cutlass.h"
|
||||
|
||||
#include "cutlass/numeric_types.h"
|
||||
#include "cutlass/arch/mma_sm100.h"
|
||||
|
||||
#include "cutlass/gemm/device/gemm_universal_adapter.h"
|
||||
#include "cutlass/gemm/kernel/gemm_universal.hpp"
|
||||
#include "cutlass/gemm/collective/collective_builder.hpp"
|
||||
|
||||
#include "cutlass/epilogue/dispatch_policy.hpp"
|
||||
#include "cutlass/epilogue/collective/collective_builder.hpp"
|
||||
|
||||
#include "cute/tensor.hpp"
|
||||
#include "cute/atom/mma_atom.hpp"
|
||||
|
||||
#include "../../common/cutlass_unit_test.h"
|
||||
|
||||
#include "gemm_testbed_3x.hpp"
|
||||
|
||||
#if defined(CUTLASS_ARCH_MMA_SM100_SUPPORTED)
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32n_f32t_f32n_simt_f32_align1_bias_relu, 128x128x16) {
|
||||
// NT layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::ColumnMajor;
|
||||
using LayoutB = cutlass::layout::RowMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using ElementBias = float;
|
||||
using TileShape = Shape<_128, _128, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
cutlass::gemm::KernelMultistage
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
// Treat Clamp as Relu
|
||||
using FusionOp = cutlass::epilogue::fusion::LinCombPerRowBiasEltAct<
|
||||
cutlass::epilogue::thread::Clamp, ElementD, ElementCompute, ElementBias, ElementC, ElementCompute, kAlignmentD
|
||||
>;
|
||||
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
cutlass::epilogue::EpilogueSimtVectorized
|
||||
,FusionOp
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int,int,int,int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmallFusion<Gemm>(2.0, 6.0);
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32n_f32t_f32n_simt_f32_align1_bias_relu, 128x256x16) {
|
||||
// NT layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::ColumnMajor;
|
||||
using LayoutB = cutlass::layout::RowMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using ElementBias = float;
|
||||
using TileShape = Shape<_128, _256, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
cutlass::gemm::KernelMultistage
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
// Treat Clamp as Relu
|
||||
using FusionOp = cutlass::epilogue::fusion::LinCombPerRowBiasEltAct<
|
||||
cutlass::epilogue::thread::Clamp, ElementD, ElementCompute, ElementBias, ElementC, ElementCompute, kAlignmentD
|
||||
>;
|
||||
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
cutlass::epilogue::EpilogueSimtVectorized
|
||||
,FusionOp
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int,int,int,int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmallFusion<Gemm>(2.0, 0.0);
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32t_f32n_f32n_simt_f32_align1_bias_relu, 64x128x16) {
|
||||
// NT layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::RowMajor;
|
||||
using LayoutB = cutlass::layout::ColumnMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using ElementBias = float;
|
||||
using TileShape = Shape<_64, _128, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
cutlass::gemm::KernelMultistage
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
// Treat Clamp as Relu
|
||||
using FusionOp = cutlass::epilogue::fusion::LinCombPerRowBiasEltAct<
|
||||
cutlass::epilogue::thread::Clamp, ElementD, ElementCompute, ElementBias, ElementC, ElementCompute, kAlignmentD
|
||||
>;
|
||||
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
cutlass::epilogue::EpilogueSimtVectorized
|
||||
,FusionOp
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int,int,int,int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmallFusion<Gemm>(2.0, 6.0);
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32t_f32n_f32n_simt_f32_align1_bias_relu, 64x32x16) {
|
||||
// NT layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::RowMajor;
|
||||
using LayoutB = cutlass::layout::ColumnMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using ElementBias = float;
|
||||
using TileShape = Shape<_64, _32, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
cutlass::gemm::KernelMultistage
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
// Treat Clamp as Relu
|
||||
using FusionOp = cutlass::epilogue::fusion::LinCombPerRowBiasEltAct<
|
||||
cutlass::epilogue::thread::Clamp, ElementD, ElementCompute, ElementBias, ElementC, ElementCompute, kAlignmentD
|
||||
>;
|
||||
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
cutlass::epilogue::EpilogueSimtVectorized
|
||||
,FusionOp
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int,int,int,int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmallFusion<Gemm>(2.0, 0.0);
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
#endif // defined(CUTLASS_ARCH_MMA_SM100_SUPPORTED)
|
||||
@ -0,0 +1,329 @@
|
||||
/***************************************************************************************************
|
||||
* Copyright (c) 2024 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* 3. Neither the name of the copyright holder nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
**************************************************************************************************/
|
||||
|
||||
/*! \file
|
||||
\brief Tests for device-wide GEMM interface (SGEMM)
|
||||
*/
|
||||
|
||||
#include "cutlass/cutlass.h"
|
||||
|
||||
#include "cutlass/numeric_types.h"
|
||||
#include "cutlass/arch/mma_sm100.h"
|
||||
|
||||
#include "cutlass/gemm/device/gemm_universal_adapter.h"
|
||||
#include "cutlass/gemm/kernel/gemm_universal.hpp"
|
||||
#include "cutlass/gemm/collective/collective_builder.hpp"
|
||||
|
||||
#include "cutlass/epilogue/dispatch_policy.hpp"
|
||||
#include "cutlass/epilogue/collective/collective_builder.hpp"
|
||||
|
||||
#include "cute/tensor.hpp"
|
||||
#include "cute/atom/mma_atom.hpp"
|
||||
|
||||
#include "../../common/cutlass_unit_test.h"
|
||||
|
||||
#include "gemm_testbed_3x_ptr_array.hpp"
|
||||
|
||||
#if defined(CUTLASS_ARCH_MMA_SM100_SUPPORTED)
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// CTA tile shape: 128x128x16
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32n_f32t_f32t_simt_f32_align1_ptr_array, 128x128x16) {
|
||||
// NT layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::ColumnMajor;
|
||||
using LayoutB = cutlass::layout::RowMajor;
|
||||
using LayoutC = cutlass::layout::RowMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using TileShape = Shape<_128, _128, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
using EpilogueSchedule = cutlass::epilogue::EpiloguePtrArraySimtVectorized;
|
||||
using KernelSchedule = cutlass::gemm::KernelPtrArrayMultistage;
|
||||
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
KernelSchedule
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
EpilogueSchedule
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cutlass::gemm::ArrayProblemShape<Shape<int,int,int,int>>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmall<Gemm, true>();
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32t_f32n_f32n_simt_f32_align1_ptr_array, 128x128x16) {
|
||||
// TN layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::RowMajor;
|
||||
using LayoutB = cutlass::layout::ColumnMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using TileShape = Shape<_128, _128, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
using EpilogueSchedule = cutlass::epilogue::EpiloguePtrArraySimtVectorized;
|
||||
using KernelSchedule = cutlass::gemm::KernelPtrArrayMultistage;
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
KernelSchedule
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
EpilogueSchedule
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cutlass::gemm::ArrayProblemShape<Shape<int,int,int,int>>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmall<Gemm, true>();
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// CTA tile shape: 64x256x16
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32n_f32n_f32t_simt_f32_align1_ptr_array, 64x256x16) {
|
||||
// NN layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::ColumnMajor;
|
||||
using LayoutB = cutlass::layout::ColumnMajor;
|
||||
using LayoutC = cutlass::layout::RowMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using TileShape = Shape<_64, _256, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
using EpilogueSchedule = cutlass::epilogue::EpiloguePtrArraySimtVectorized;
|
||||
using KernelSchedule = cutlass::gemm::KernelPtrArrayMultistage;
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
KernelSchedule
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
EpilogueSchedule
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cutlass::gemm::ArrayProblemShape<Shape<int,int,int,int>>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmall<Gemm, true>();
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
TEST(SM100Only_Device_Gemm_f32t_f32t_f32n_simt_f32_align1_ptr_array, 64x256x16) {
|
||||
// TT layout
|
||||
using namespace cute;
|
||||
using LayoutA = cutlass::layout::RowMajor;
|
||||
using LayoutB = cutlass::layout::RowMajor;
|
||||
using LayoutC = cutlass::layout::ColumnMajor;
|
||||
using LayoutD = LayoutC;
|
||||
using ElementA = float;
|
||||
using ElementB = float;
|
||||
using ElementC = float;
|
||||
using ElementD = float;
|
||||
using ElementAccumulator = float;
|
||||
using ElementCompute = float;
|
||||
using TileShape = Shape<_64, _256, _16>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
static constexpr int kAlignmentA = 1;
|
||||
static constexpr int kAlignmentB = 1;
|
||||
static constexpr int kAlignmentC = 1;
|
||||
static constexpr int kAlignmentD = 1;
|
||||
|
||||
using EpilogueSchedule = cutlass::epilogue::EpiloguePtrArraySimtVectorized;
|
||||
using KernelSchedule = cutlass::gemm::KernelPtrArrayMultistage;
|
||||
// Mainloop
|
||||
using CollectiveMainloop = cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
ElementA, LayoutA, kAlignmentA,
|
||||
ElementB, LayoutB, kAlignmentB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCount<3>,
|
||||
KernelSchedule
|
||||
>::CollectiveOp;
|
||||
|
||||
// Epilogue
|
||||
using CollectiveEpilogue = cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100,
|
||||
cutlass::arch::OpClassSimt,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementCompute,
|
||||
ElementC,
|
||||
LayoutC,
|
||||
kAlignmentC,
|
||||
ElementD,
|
||||
LayoutD,
|
||||
kAlignmentD,
|
||||
EpilogueSchedule
|
||||
>::CollectiveOp;
|
||||
|
||||
// Kernel
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cutlass::gemm::ArrayProblemShape<Shape<int,int,int,int>>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue
|
||||
>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
bool result = test::gemm::device::TestSmall<Gemm, true>();
|
||||
EXPECT_TRUE(result);
|
||||
}
|
||||
|
||||
#endif // defined(CUTLASS_ARCH_MMA_SM100_SUPPORTED)
|
||||
Reference in New Issue
Block a user