Updates and Bug fixes to CUTLASS 3.3 (#1232)

This commit is contained in:
Pradeep Ramani
2023-12-05 06:50:49 -08:00
committed by GitHub
parent 4a1709e17e
commit e9e30c2304
31 changed files with 534 additions and 227 deletions

View File

@ -742,7 +742,7 @@ bool TestAllConv2d(
}
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)

View File

@ -609,7 +609,7 @@ bool TestAllInterleavedConv2d(
#if 0
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)

View File

@ -632,7 +632,7 @@ bool TestAllConv2dWithBroadcast(
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)

View File

@ -587,7 +587,7 @@ bool TestAllConv2dWithReduction(
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)

View File

@ -613,7 +613,7 @@ bool TestAllConv3d(
// Sweep split-k-slice using serial reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv3dProblemSize conv3d_split_k_test_size (
{1, 8, 8, 8, 32}, // input size (NDHWC)

View File

@ -27,6 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add_subdirectory(core)
add_subdirectory(volta)
add_subdirectory(ampere)
add_subdirectory(hopper)
add_subdirectory(layout)
@ -37,6 +38,7 @@ add_custom_target(
DEPENDS
cutlass_test_unit_cute_layout
cutlass_test_unit_cute_core
cutlass_test_unit_cute_volta
cutlass_test_unit_cute_ampere
cutlass_test_unit_cute_hopper
cutlass_test_unit_cute_msvc_compilation
@ -47,6 +49,7 @@ add_custom_target(
DEPENDS
test_unit_cute_layout
test_unit_cute_core
test_unit_cute_volta
test_unit_cute_ampere
test_unit_cute_hopper
test_unit_cute_msvc_compilation

View File

@ -52,11 +52,11 @@ struct SharedStorage {
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class GmemLayout, class SmemLayout>
__global__ void
bulk_copy_test_device_cute(T const* g_in,
T * g_out,
GmemLayout gmem_layout,
SmemLayout smem_layout)
__global__ void
bulk_copy_test_device_cute(T const* g_in,
T * g_out,
GmemLayout gmem_layout,
SmemLayout smem_layout)
{
// Use Shared Storage structure to allocate and distribute aligned SMEM addresses
extern __shared__ char shared_memory[];
@ -75,7 +75,7 @@ bulk_copy_test_device_cute(T const* g_in,
// Perform the BULK_COPY load
//
auto atom = Copy_Atom<SM90_BULK_COPY_AUTO, uint8_t>{};
auto blkcp = Copy_Traits<SM90_BULK_COPY_AUTO>{};
#if 0
if (thread0()) {
@ -93,7 +93,7 @@ bulk_copy_test_device_cute(T const* g_in,
initialize_barrier(bulk_copy_mbar[0], 1 /*numThreads*/);
set_barrier_transaction_bytes(bulk_copy_mbar[0], transaction_bytes);
copy(atom.with(bulk_copy_mbar[0]), gA, sA);
copy(blkcp.with(bulk_copy_mbar[0]), gA, sA);
}
__syncthreads();
@ -121,11 +121,11 @@ bulk_copy_test_device_cute(T const* g_in,
template <class T, class GLayout, class SLayout>
void run_and_validate(GLayout gmem_layout,
SLayout smem_layout)
SLayout smem_layout)
{
thrust::host_vector<T> h_in(cosize(gmem_layout));
for (int32_t i = 0; i < h_in.size(); ++i) {
h_in[i] = T(i);
for (int32_t i = 0; i < h_in.size(); ++i) {
h_in[i] = T(i);
}
thrust::device_vector<T> d_in = h_in;
@ -148,9 +148,9 @@ void run_and_validate(GLayout gmem_layout,
// } // namespace
TEST(SM90_CuTe_BLKCP, ColMajor)
TEST(SM90_CuTe_BLKCP, ColMajor)
{
auto smem_layout = make_layout(Shape<_32,_32>{}, GenColMajor{});
auto gmem_layout = smem_layout;
run_and_validate< int8_t>(gmem_layout, smem_layout);
@ -158,9 +158,9 @@ TEST(SM90_CuTe_BLKCP, ColMajor)
run_and_validate<tfloat32_t>(gmem_layout, smem_layout);
}
TEST(SM90_CuTe_BLKCP, RowMajor)
TEST(SM90_CuTe_BLKCP, RowMajor)
{
auto smem_layout = make_layout(Shape<_32,_32>{}, GenRowMajor{});
auto gmem_layout = smem_layout;
run_and_validate< int8_t>(gmem_layout, smem_layout);
@ -168,9 +168,9 @@ TEST(SM90_CuTe_BLKCP, RowMajor)
run_and_validate<tfloat32_t>(gmem_layout, smem_layout);
}
TEST(SM90_CuTe_BLKCP, NonCompact)
TEST(SM90_CuTe_BLKCP, NonCompact)
{
{
auto smem_layout = make_layout(Shape<_32,_32>{}, Stride<_1,Int<48>>{});
auto gmem_layout = smem_layout;

View File

@ -51,11 +51,11 @@ struct SharedStorage {
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class GmemLayout, class SmemLayout>
__global__ void
bulk_copy_test_device_cute(T const* g_in,
T * g_out,
GmemLayout gmem_layout,
SmemLayout smem_layout)
__global__ void
bulk_copy_test_device_cute(T const* g_in,
T * g_out,
GmemLayout gmem_layout,
SmemLayout smem_layout)
{
// Use Shared Storage structure to allocate and distribute aligned SMEM addresses
extern __shared__ char shared_memory[];
@ -93,9 +93,9 @@ bulk_copy_test_device_cute(T const* g_in,
Tensor gA_out = make_tensor(make_gmem_ptr(g_out), gmem_layout);
auto atom = Copy_Atom<Copy_Traits<SM90_BULK_COPY_AUTO>, uint8_t>{};
auto blkcp = Copy_Traits<SM90_BULK_COPY_AUTO>{};
copy(atom, sA, gA_out);
copy(blkcp, sA, gA_out);
// Bulk Copy store requires the same sync as TMA store.
tma_store_arrive();
tma_store_wait<0>();
@ -103,11 +103,11 @@ bulk_copy_test_device_cute(T const* g_in,
template <class T, class GLayout, class SLayout>
void run_and_validate(GLayout gmem_layout,
SLayout smem_layout)
SLayout smem_layout)
{
thrust::host_vector<T> h_in(cosize(gmem_layout));
for (int32_t i = 0; i < h_in.size(); ++i) {
h_in[i] = T(i);
for (int32_t i = 0; i < h_in.size(); ++i) {
h_in[i] = T(i);
}
thrust::device_vector<T> d_in = h_in;
@ -130,9 +130,8 @@ void run_and_validate(GLayout gmem_layout,
// } // namespace
TEST(SM90_CuTe_BLKCP, ColMajor)
TEST(SM90_CuTe_BLKCP, ColMajor)
{
auto smem_layout = make_layout(Shape<_32,_32>{}, GenColMajor{});
auto gmem_layout = smem_layout;
run_and_validate< int8_t>(gmem_layout, smem_layout);
@ -140,9 +139,8 @@ TEST(SM90_CuTe_BLKCP, ColMajor)
run_and_validate<tfloat32_t>(gmem_layout, smem_layout);
}
TEST(SM90_CuTe_BLKCP, RowMajor)
TEST(SM90_CuTe_BLKCP, RowMajor)
{
auto smem_layout = make_layout(Shape<_32,_32>{}, GenRowMajor{});
auto gmem_layout = smem_layout;
run_and_validate< int8_t>(gmem_layout, smem_layout);
@ -150,9 +148,8 @@ TEST(SM90_CuTe_BLKCP, RowMajor)
run_and_validate<tfloat32_t>(gmem_layout, smem_layout);
}
TEST(SM90_CuTe_BLKCP, NonCompact)
TEST(SM90_CuTe_BLKCP, NonCompact)
{
{
auto smem_layout = make_layout(Shape<_32,_32>{}, Stride<_1,Int<48>>{});
auto gmem_layout = smem_layout;

View File

@ -0,0 +1,32 @@
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
cutlass_test_unit_add_executable(
cutlass_test_unit_cute_volta
vectorization_auto.cu
)

View File

@ -0,0 +1,132 @@
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
using namespace cute;
template <class GmemTensor, class RmemTiler, class CopyPolicy>
__global__
void
kernel(GmemTensor gC, RmemTiler tiler, CopyPolicy policy)
{
Tensor tCgC = local_tile(gC, tiler, 0);
Tensor rC = make_tensor_like(tCgC);
using T = typename GmemTensor::value_type;
for (int i = 0; i < size(rC); ++i) {
rC(i) = T(i % 13);
}
#if 0
print(" gC : "); print( gC); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print(" rC : "); print( rC); print("\n");
#endif
// NOTE: only 1 thread, this thread produce a block of 8x8 output. The fringe will not be touched.
//copy(rC, tCgC); // Enable auto-vectorization if static
//copy_vec<T>(rC, tCgC); // Disable auto-vectorization always
copy(policy, rC, tCgC); // Use a policy to establish vectorization assumptions
}
template <class T, class CopyPolicy, class GmemLayout, class RmemTiler>
void
test_copy_vectorization(CopyPolicy policy, GmemLayout gmem_layout, RmemTiler rmem_tiler)
{
thrust::host_vector<T> h_in(cosize(gmem_layout), T(0));
thrust::device_vector<T> d_in = h_in;
Tensor m_in = make_tensor(make_gmem_ptr(raw_pointer_cast(d_in.data())), gmem_layout);
kernel<<<1,1>>>(m_in, rmem_tiler, policy);
thrust::host_vector<T> h_out = d_in;
Tensor result = make_tensor(h_out.data(), gmem_layout);
thrust::host_vector<T> h_true = h_in;
Tensor ref = make_tensor(h_true.data(), gmem_layout);
// Set the values directly in the reference tensor, no copy
Tensor ref_tile = local_tile(ref, rmem_tiler, 0);
for (int i = 0; i < size(ref_tile); ++i) {
ref_tile(i) = T(i % 13);
}
// Compare the reference and the result. Print only the first 3 errors.
// print_tensor(result);
int count = 3;
for (int i = 0; i < size(ref) && count > 0; ++i) {
EXPECT_EQ(result(i), ref(i));
if (result(i) != ref(i)) {
--count;
}
}
}
template <class T, class GmemLayout, class RmemTiler>
void
test_copy_vectorization(GmemLayout gmem_layout, RmemTiler rmem_tiler)
{
test_copy_vectorization<T>(DefaultCopy{}, gmem_layout, rmem_tiler);
}
TEST(SM70_CuTe_Volta, SimpleVec)
{
// Fully static layouts are assumed to be aligned -- these will be vectorized
test_copy_vectorization<float>(make_layout(make_shape(Int<8>{}, Int<8>{})), Shape<_8,_8>{});
test_copy_vectorization<float>(make_layout(make_shape(Int<12>{}, Int<12>{})), Shape<_8,_8>{});
// Fails in vectorization recast due to misalignment and static assertions
//test_copy_vectorization<float>(make_layout(make_shape(Int<9>{}, Int<9>{})), Shape<_8,_8>{});
// Dynamic layouts are not assumed to be aligned -- these will not be vectorized
test_copy_vectorization<float>(make_layout(make_shape(12,12)), Shape<_8,_8>{});
test_copy_vectorization<float>(make_layout(make_shape( 9, 9)), Shape<_8,_8>{});
// Dynamic layouts that are assumed to be aligned -- these will be vectorized
test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape( 8, 8)), Shape<_8,_8>{});
test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape(12,12)), Shape<_8,_8>{});
// Fails -- bad alignment assumption
//test_copy_vectorization<float>(AutoVectorizingCopyWithAssumedAlignment<128>{}, make_layout(make_shape( 9, 9)), Shape<_8,_8>{});
}