0
test/python/backend/conv/__init__.py
Normal file
0
test/python/backend/conv/__init__.py
Normal file
@ -0,0 +1,233 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
|
||||
from cutlass.backend.conv2d_operation import *
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dDgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,209 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dDgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage3(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=4,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage3_64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage4_64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=4,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,130 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_dgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend.conv2d_operation import *
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dDgradImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[4, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,127 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dDgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,196 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
def conv2d_few_channel_problemsizes(channels):
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 8, 8, channels),
|
||||
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 16, 16, channels),
|
||||
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 16, 16, channels),
|
||||
cutlass_bindings.Tensor4DCoord(16, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(32, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
return problem_sizes
|
||||
|
||||
class Conv2dFpropFewChannelsF16NHWCF16NHWCF16HNWCTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.few_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_few_channel_problemsizes(2)))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_1(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=2,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.few_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_few_channel_problemsizes(1)))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,220 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
def conv2d_fixed_channel_problemsizes(channels):
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 8, 8, channels),
|
||||
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(32, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
return problem_sizes
|
||||
|
||||
class Conv2dFpropFixedChannelsF16NHWCF16NHWCF16HNWCTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_8(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(8)))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(4)))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(2)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,341 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dFpropImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 14),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 14),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 23, 56, 98),
|
||||
cutlass_bindings.Tensor4DCoord(128, 3, 3, 98),
|
||||
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 14),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 14),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 23, 56, 98),
|
||||
cutlass_bindings.Tensor4DCoord(128, 3, 3, 98),
|
||||
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 28),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 28),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 23, 56, 100),
|
||||
cutlass_bindings.Tensor4DCoord(128, 3, 3, 100),
|
||||
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,86 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dFpropImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,128 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend.conv2d_operation import *
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dFpropImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[4, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle2
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,139 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dFpropImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
)
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,285 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dStridedDgradImplicitGemmF16NHWCF16NHWCF32NHWCTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x256_64x3_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 256, 64], stages=3,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4_128x128_32x3_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 56, 56, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 1, 1, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 55, 55, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 1, 1, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,129 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
|
||||
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment, math_inst.element_accumulator,
|
||||
cutlass_bindings.float16
|
||||
)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment, math_inst.element_accumulator,
|
||||
cutlass_bindings.float16
|
||||
)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,274 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_64x256_32x4_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 256, 32], stages=3,
|
||||
warp_count=[1, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,128 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend.conv2d_operation import *
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dWgradImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,139 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dWgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align1(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 8, 8, 1),
|
||||
cutlass_bindings.Tensor4DCoord(1, 3, 3, 1),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
42
test/python/backend/conv/run_all_tests.py
Normal file
42
test/python/backend/conv/run_all_tests.py
Normal file
@ -0,0 +1,42 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
import unittest
|
||||
from cutlass.backend.memory_manager import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**32, 2**32)
|
||||
loader = unittest.TestLoader()
|
||||
tests = loader.discover('./', 'conv2d_*.py')
|
||||
testRunner = unittest.runner.TextTestRunner()
|
||||
testRunner.run(tests)
|
||||
0
test/python/backend/gemm/__init__.py
Normal file
0
test/python/backend/gemm/__init__.py
Normal file
128
test/python/backend/gemm/gemm_bf16_sm80.py
Normal file
128
test/python/backend/gemm/gemm_bf16_sm80.py
Normal file
@ -0,0 +1,128 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class GemmBF16TensorOpSm80(unittest.TestCase):
|
||||
def SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32_64x128x64_32x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.bfloat16, element_b=cutlass_bindings.bfloat16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 128, 64],
|
||||
stages=4, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32_128x256x64_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.bfloat16, element_b=cutlass_bindings.bfloat16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 128, 32],
|
||||
stages=6, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "multistage"))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
138
test/python/backend/gemm/gemm_bf16_sm90.py
Normal file
138
test/python/backend/gemm/gemm_bf16_sm90.py
Normal file
@ -0,0 +1,138 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
from functools import partial
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend import library
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
name_fn = partial(get_name, element_a=cutlass_bindings.bfloat16, element_b=cutlass_bindings.bfloat16, arch=90)
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue,
|
||||
cluster_shape, threadblock_shape, stages, opclass, persistent=False):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: indexable container of layouts of A, B, and C operands
|
||||
:param alignments: indexable container of alignments of A, B, and C operands
|
||||
:param element_output: data type of the output element
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:param element_epilogue: data type used in computing the epilogue
|
||||
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
|
||||
:param threadblock_shape: indexable container of dimensions of threadblock tiles
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass_bindings.OpClass
|
||||
:param persistent: whether this is a persistent warp-specialized kernel
|
||||
:type persistent: bool
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass_bindings.bfloat16
|
||||
element_B = cutlass_bindings.bfloat16
|
||||
inst_shape = [1, 1, 1] if opclass == cutlass_bindings.OpClass.Simt else None
|
||||
warp_count = [2, 2, 1] if opclass == cutlass_bindings.OpClass.Simt else None
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=inst_shape,
|
||||
element_a=element_A, element_b=element_B, element_accumulator=element_accumulator,
|
||||
opcode_class=opclass, math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=threadblock_shape,
|
||||
cluster_shape=cluster_shape,
|
||||
stages=stages, warp_count=warp_count,
|
||||
math_instruction=math_inst,
|
||||
persistent=persistent
|
||||
)
|
||||
|
||||
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
|
||||
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
|
||||
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
|
||||
|
||||
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=90, tile_description=tile_description, A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
if persistent:
|
||||
suffix = "_persistent"
|
||||
else:
|
||||
suffix = ""
|
||||
|
||||
name = name_fn(layouts, alignments, element_output, element_accumulator,
|
||||
element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass, suffix=suffix)
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 90, "Device compute capability is insufficient for SM90 tests.")
|
||||
class GemmBF16Sm90(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass_bindings.OpClass.TensorOp)
|
||||
add_test_simt = partial(add_test, opclass=cutlass_bindings.OpClass.Simt)
|
||||
|
||||
add_test_tensorop(GemmBF16Sm90, LayoutCombination.NNN, [8, 8, 8], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], 3)
|
||||
add_test_tensorop(GemmBF16Sm90, LayoutCombination.NNN, [4, 4, 8], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], 5)
|
||||
add_test_tensorop(GemmBF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [2, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmBF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [2, 1, 1], [128, 128, 32], None, persistent=True)
|
||||
add_test_simt(GemmBF16Sm90, LayoutCombination.NNN, [1, 1, 1], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 8], 2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
479
test/python/backend/gemm/gemm_f16_sm80.py
Normal file
479
test/python/backend/gemm/gemm_f16_sm80.py
Normal file
@ -0,0 +1,479 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class GemmF16Sm80(unittest.TestCase):
|
||||
def test_SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32_128x128x32_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor,
|
||||
direct_store=True
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32_128x128x64_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32_128x256x64_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 256, 64],
|
||||
stages=3, warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32_256x128x64_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[256, 128, 64],
|
||||
stages=3, warp_count=[4, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16_sliced_k_128x64x64_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 64, 64],
|
||||
stages=3, warp_count=[2, 1, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float16
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_GemmUniversal_f16n_f16t_f32t_tensor_op_f32_64x64x32_32x32x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 64, 32],
|
||||
stages=10, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float16
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32_256x128x64_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[256, 128, 64],
|
||||
stages=3, warp_count=[4, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_test_SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16_sliced_k_128x64x64_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 64, 64],
|
||||
stages=3, warp_count=[2, 1, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32_128x256x64_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 256, 64],
|
||||
stages=3, warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f16t_f16t_f32t_tensor_op_f32_128x256x64_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 256, 64],
|
||||
stages=3, warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
182
test/python/backend/gemm/gemm_f16_sm90.py
Normal file
182
test/python/backend/gemm/gemm_f16_sm90.py
Normal file
@ -0,0 +1,182 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
from functools import partial
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend import library
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
# Partial specialziation for naming tests
|
||||
name_fn = partial(get_name, element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16, arch=90)
|
||||
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue,
|
||||
cluster_shape, threadblock_shape, stages, opclass, persistent=False):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: indexable container of layouts of A, B, and C operands
|
||||
:param alignments: indexable container of alignments of A, B, and C operands
|
||||
:param element_output: data type of the output element
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:param element_epilogue: data type used in computing the epilogue
|
||||
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
|
||||
:param threadblock_shape: indexable container of dimensions of threadblock tiles
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass_bindings.OpClass
|
||||
:param persistent: whether this is a persistent warp-specialized kernel
|
||||
:type persistent: bool
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
|
||||
element_A = cutlass_bindings.float16
|
||||
element_B = cutlass_bindings.float16
|
||||
inst_shape = [1, 1, 1] if opclass == cutlass_bindings.OpClass.Simt else None
|
||||
warp_count = [2, 2, 1] if opclass == cutlass_bindings.OpClass.Simt else None
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=inst_shape,
|
||||
element_a=element_A, element_b=element_B, element_accumulator=element_accumulator,
|
||||
opcode_class=opclass, math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=threadblock_shape,
|
||||
cluster_shape=cluster_shape,
|
||||
stages=stages, warp_count=warp_count,
|
||||
math_instruction=math_inst,
|
||||
persistent=persistent
|
||||
)
|
||||
|
||||
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
|
||||
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
|
||||
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
|
||||
|
||||
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=90, tile_description=tile_description, A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
if persistent:
|
||||
suffix = "_persistent"
|
||||
else:
|
||||
suffix = ""
|
||||
|
||||
name = name_fn(layouts, alignments, element_output, element_accumulator,
|
||||
element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass, suffix=suffix)
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 90, "Device compute capability is insufficient for SM90 tests.")
|
||||
class GemmF16Sm90(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass_bindings.OpClass.TensorOp)
|
||||
add_test_simt = partial(add_test, opclass=cutlass_bindings.OpClass.Simt)
|
||||
|
||||
# Tests with 1x1x1 clusters
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNN, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], 3)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTN, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 64, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 64, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [4, 4, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [4, 4, 8], cutlass_bindings.float16, cutlass_bindings.float16, cutlass_bindings.float16, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float16, cutlass_bindings.float16, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 64, 64], 5)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [2, 2, 2], cutlass_bindings.float16, cutlass_bindings.float16, cutlass_bindings.float16, [1, 1, 1], [128, 128, 32], None)
|
||||
|
||||
# Tests with different cluster shapes
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 4, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 4, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [4, 1, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [4, 2, 1], [64, 128, 64], None)
|
||||
|
||||
# Tests for persistent warp-specialized threadblocks
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 1, 1], [64, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 1, 1], [128, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 2, 1], [64, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 4, 1], [64, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 4, 1], [64, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [4, 1, 1], [64, 128, 64], None, persistent=True)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [4, 4, 1], [64, 128, 64], None, persistent=True)
|
||||
|
||||
# Tests using SIMT
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.NNN, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 8], 2)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.TNN, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 128, 8], 2)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.NTN, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 64, 8], 2)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.TTN, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 64, 8], 2)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.NNT, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float16, cutlass_bindings.float16, [1, 1, 1], [128, 128, 8], 2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
178
test/python/backend/gemm/gemm_f32_sm80.py
Normal file
178
test/python/backend/gemm/gemm_f32_sm80.py
Normal file
@ -0,0 +1,178 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.memory_manager import get_allocated_size
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class GemmF32nF32nF32nTensorOpF32Sm80(unittest.TestCase):
|
||||
def test_SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32_128x128x32_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add_fast_bf16
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
|
||||
def test_SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_f32_128x128x32_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_fast_accurate_f32_64x64x32_32x32x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add_fast_f32
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 64, 32],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**24, 2**24)
|
||||
cutlass.backend.compiler.load_from_cache()
|
||||
unittest.main()
|
||||
134
test/python/backend/gemm/gemm_f64_sm80.py
Normal file
134
test/python/backend/gemm/gemm_f64_sm80.py
Normal file
@ -0,0 +1,134 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class GemmF64TensorOpSm80(unittest.TestCase):
|
||||
def test_SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64_32x32x16_16x16x16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[8, 8, 4],
|
||||
element_a=cutlass_bindings.float64, element_b=cutlass_bindings.float64,
|
||||
element_accumulator=cutlass_bindings.float64, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[32, 32, 16],
|
||||
stages=4, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
# alignment 1 restricted for double
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=1
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float64
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
def test_SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64_64x64x16_32x32x16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[8, 8, 4],
|
||||
element_a=cutlass_bindings.float64, element_b=cutlass_bindings.float64,
|
||||
element_accumulator=cutlass_bindings.float64, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 64, 16],
|
||||
stages=4, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
# alignment 1 restricted for double
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=1
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float64
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
124
test/python/backend/gemm/gemm_f64_sm90.py
Normal file
124
test/python/backend/gemm/gemm_f64_sm90.py
Normal file
@ -0,0 +1,124 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
from functools import partial
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend import library
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
name_fn = partial(get_name, element_a=cutlass_bindings.float64, element_b=cutlass_bindings.float64, arch=90)
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue,
|
||||
cluster_shape, threadblock_shape, stages, opclass):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: indexable container of layouts of A, B, and C operands
|
||||
:param alignments: indexable container of alignments of A, B, and C operands
|
||||
:param element_output: data type of the output element
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:param element_epilogue: data type used in computing the epilogue
|
||||
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
|
||||
:param threadblock_shape: indexable container of dimensions of threadblock tiles
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass_bindings.OpClass
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass_bindings.float64
|
||||
element_B = cutlass_bindings.float64
|
||||
inst_shape = [1, 1, 1] if opclass == cutlass_bindings.OpClass.Simt else None
|
||||
warp_count = [2, 2, 1] if opclass == cutlass_bindings.OpClass.Simt else None
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=inst_shape,
|
||||
element_a=element_A, element_b=element_B, element_accumulator=element_accumulator,
|
||||
opcode_class=opclass, math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=threadblock_shape,
|
||||
cluster_shape=cluster_shape,
|
||||
stages=stages, warp_count=warp_count,
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
|
||||
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
|
||||
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
|
||||
|
||||
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=90, tile_description=tile_description, A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
name = name_fn(layouts, alignments, element_output, element_accumulator,
|
||||
element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass)
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 90, "Device compute capability is insufficient for SM90 tests.")
|
||||
class GemmF64Sm90(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
add_test_simt = partial(add_test, opclass=cutlass_bindings.OpClass.Simt)
|
||||
add_test_simt(GemmF64Sm90, LayoutCombination.NNN, [1, 1, 1], cutlass_bindings.float64, cutlass_bindings.float64, cutlass_bindings.float64, [1, 1, 1], [64, 64, 32], 2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
235
test/python/backend/gemm/gemm_grouped_sm80.py
Normal file
235
test/python/backend/gemm/gemm_grouped_sm80.py
Normal file
@ -0,0 +1,235 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.gemm_grouped_testbed import TestbedGrouped
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class GemmGroupedSm80(unittest.TestCase):
|
||||
def test_SM80_Device_GemmGrouped_f16n_f16t_f32n_tensor_op_f32_128x128x32_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16], element_a=cutlass_bindings.float16,
|
||||
element_b=cutlass_bindings.float16, element_accumulator=cutlass_bindings.float32,
|
||||
opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
|
||||
|
||||
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
|
||||
operation = GemmOperationGrouped(
|
||||
80,
|
||||
tile_description, A, B, C,
|
||||
epilogue_functor, swizzling_functor,
|
||||
precompute_mode=precompute_mode
|
||||
)
|
||||
|
||||
testbed = TestbedGrouped(operation=operation)
|
||||
|
||||
self.assertTrue(testbed.run(24))
|
||||
|
||||
def test_SM80_Device_GemmGrouped_f64t_f64t_f64n_tensor_op_f64_64x64x16_32x32x16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[8, 8, 4], element_a=cutlass_bindings.float64,
|
||||
element_b=cutlass_bindings.float64, element_accumulator=cutlass_bindings.float64,
|
||||
opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 64, 16],
|
||||
stages=4, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float64, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=1
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float64
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
|
||||
|
||||
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
|
||||
operation = GemmOperationGrouped(
|
||||
80,
|
||||
tile_description, A, B, C,
|
||||
epilogue_functor, swizzling_functor,
|
||||
precompute_mode=precompute_mode
|
||||
)
|
||||
|
||||
testbed = TestbedGrouped(operation=operation)
|
||||
|
||||
self.assertTrue(testbed.run(24))
|
||||
|
||||
def test_SM80_Device_GemmGrouped_f32t_f32t_f32t_simt_f32_128x64x8_64x32x1(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1], element_a=cutlass_bindings.float32,
|
||||
element_b=cutlass_bindings.float32, element_accumulator=cutlass_bindings.float32,
|
||||
opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 64, 8],
|
||||
stages=4, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=1
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
|
||||
|
||||
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
|
||||
operation = GemmOperationGrouped(
|
||||
80,
|
||||
tile_description, A, B, C,
|
||||
epilogue_functor, swizzling_functor,
|
||||
precompute_mode=precompute_mode
|
||||
)
|
||||
|
||||
testbed = TestbedGrouped(operation=operation)
|
||||
|
||||
self.assertTrue(testbed.run(27))
|
||||
|
||||
def test_SM80_Device_GemmGrouped_f16n_f16t_f32n_tensor_op_f32_128x128x32_64x64x32_cache(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16], element_a=cutlass_bindings.float16,
|
||||
element_b=cutlass_bindings.float16, element_accumulator=cutlass_bindings.float32,
|
||||
opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.float32
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, element_epilogue)
|
||||
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
|
||||
|
||||
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
|
||||
operation = GemmOperationGrouped(
|
||||
80,
|
||||
tile_description, A, B, C,
|
||||
epilogue_functor, swizzling_functor,
|
||||
precompute_mode=precompute_mode
|
||||
)
|
||||
|
||||
testbed = TestbedGrouped(operation=operation)
|
||||
|
||||
self.assertTrue(testbed.run(5))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
261
test/python/backend/gemm/gemm_s8_sm80.py
Normal file
261
test/python/backend/gemm/gemm_s8_sm80.py
Normal file
@ -0,0 +1,261 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.epilogue import LinearCombinationClamp
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class GemmS8TensorOpF32Sm80(unittest.TestCase):
|
||||
def test_SM80_Device_Gemm_s8t_s8n_s8t_tensor_op_s32_64x64x64_32x32x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 32],
|
||||
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
|
||||
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add_saturate
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 64, 64],
|
||||
stages=6, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajorInterleaved32,
|
||||
alignment=16
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajorInterleaved32,
|
||||
alignment=16
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajorInterleaved32,
|
||||
alignment=8
|
||||
)
|
||||
|
||||
epilogue_functor = FastLinearCombinationClamp(
|
||||
C.element, C.alignment
|
||||
)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "interleaved"))
|
||||
|
||||
def test_SM80_Device_Gemm_s8t_s8n_s8t_tensor_op_s32_256x128x128_64x64x128(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 32],
|
||||
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
|
||||
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 128],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
|
||||
alignment=16
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=16
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
|
||||
alignment=16
|
||||
)
|
||||
|
||||
epilogue_functor = FastLinearCombinationClamp(
|
||||
C.element, C.alignment
|
||||
)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "multistage"))
|
||||
|
||||
def test_SM80_Device_Gemm_s8t_s8n_s8n_tensor_op_s32_128x128x128_64x64x128(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 32],
|
||||
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
|
||||
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 128],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
|
||||
alignment=16
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=16
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=16
|
||||
)
|
||||
|
||||
epilogue_functor = FastLinearCombinationClamp(
|
||||
C.element, C.alignment
|
||||
)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "multistage"))
|
||||
|
||||
def test_SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32_128x128x128_64x64x128(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 32],
|
||||
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
|
||||
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 128],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
|
||||
alignment=16
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=16
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.int32, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.int32
|
||||
|
||||
epilogue_functor = LinearCombinationClamp(
|
||||
C.element, C.alignment, math_inst.element_accumulator,
|
||||
element_epilogue
|
||||
)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "multistage"))
|
||||
|
||||
def test_SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32_128x128x128_64x64x128(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 32],
|
||||
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
|
||||
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 128],
|
||||
stages=3, warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
|
||||
alignment=16
|
||||
)
|
||||
B = TensorDescription(
|
||||
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
|
||||
alignment=16
|
||||
)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.int32, layout=cutlass_bindings.RowMajor,
|
||||
alignment=4
|
||||
)
|
||||
|
||||
element_epilogue = cutlass_bindings.int32
|
||||
|
||||
epilogue_functor = LinearCombinationClamp(
|
||||
C.element, C.alignment, math_inst.element_accumulator,
|
||||
element_epilogue
|
||||
)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=80, tile_description=tile_description,
|
||||
A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "multistage"))
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
154
test/python/backend/gemm/gemm_s8_sm90.py
Normal file
154
test/python/backend/gemm/gemm_s8_sm90.py
Normal file
@ -0,0 +1,154 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
from functools import partial
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend import library
|
||||
from cutlass.backend.test import *
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
name_fn = partial(get_name, element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16, arch=90)
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue,
|
||||
cluster_shape, threadblock_shape, stages, opclass, persistent=False):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: indexable container of layouts of A, B, and C operands
|
||||
:param alignments: indexable container of alignments of A, B, and C operands
|
||||
:param element_output: data type of the output element
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:param element_epilogue: data type used in computing the epilogue
|
||||
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
|
||||
:param threadblock_shape: indexable container of dimensions of threadblock tiles
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass_bindings.OpClass
|
||||
:param persistent: whether this is a persistent warp-specialized kernel
|
||||
:type persistent: bool
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass_bindings.int8
|
||||
element_B = cutlass_bindings.int8
|
||||
inst_shape = [1, 1, 1] if opclass == cutlass_bindings.OpClass.Simt else None
|
||||
warp_count = [2, 2, 1] if opclass == cutlass_bindings.OpClass.Simt else None
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=inst_shape,
|
||||
element_a=element_A, element_b=element_B, element_accumulator=element_accumulator,
|
||||
opcode_class=opclass, math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=threadblock_shape,
|
||||
cluster_shape=cluster_shape,
|
||||
stages=stages, warp_count=warp_count,
|
||||
math_instruction=math_inst,
|
||||
persistent=persistent
|
||||
)
|
||||
|
||||
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
|
||||
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
|
||||
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
|
||||
|
||||
if opclass == cutlass_bindings.OpClass.Simt:
|
||||
epilogue_functor_cls = LinearCombinationClamp
|
||||
else:
|
||||
epilogue_functor_cls = LinearCombination
|
||||
epilogue_functor = epilogue_functor_cls(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
|
||||
|
||||
swizzling_functor = cutlass_bindings.IdentitySwizzle1
|
||||
|
||||
operation = GemmOperationUniversal(
|
||||
arch=90, tile_description=tile_description, A=A, B=B, C=C,
|
||||
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
|
||||
|
||||
self.assertTrue(test_all_gemm(operation, "universal"))
|
||||
|
||||
if persistent:
|
||||
suffix = "_persistent"
|
||||
else:
|
||||
suffix = ""
|
||||
|
||||
name = name_fn(layouts, alignments, element_output, element_accumulator,
|
||||
element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass, suffix=suffix)
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 90, "Device compute capability is insufficient for SM90 tests.")
|
||||
class GemmS8Sm90(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass_bindings.OpClass.TensorOp)
|
||||
add_test_simt = partial(add_test, opclass=cutlass_bindings.OpClass.Simt)
|
||||
|
||||
# Tests with 1x1x1 clusters
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNN, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 128, 128], 3)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 8], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [64, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 64, 32], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [4, 4, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 128, 128], None)
|
||||
|
||||
# Tests with different cluster shapes
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [2, 2, 1], [128, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 4, 1], [128, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [4, 4, 1], [128, 128, 128], None)
|
||||
|
||||
# Tests with persistent warp-specialized threadblocks
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [2, 1, 1], [128, 128, 128], None, persistent=True)
|
||||
|
||||
# Tests for SIMT
|
||||
add_test_simt(GemmS8Sm90, LayoutCombination.TNN, [1, 1, 1], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [64, 32, 8], 2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
unittest.main()
|
||||
41
test/python/backend/gemm/run_all_tests.py
Normal file
41
test/python/backend/gemm/run_all_tests.py
Normal file
@ -0,0 +1,41 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
import unittest
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**30, 2**30)
|
||||
loader = unittest.TestLoader()
|
||||
tests = loader.discover('./', 'gemm_*.py')
|
||||
testRunner = unittest.runner.TextTestRunner()
|
||||
testRunner.run(tests)
|
||||
161
test/python/emit/pytorch.py
Normal file
161
test/python/emit/pytorch.py
Normal file
@ -0,0 +1,161 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Tests emitting a CUTLASS kernel to a PyTorch CUDA extension
|
||||
"""
|
||||
|
||||
import random
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import cutlass
|
||||
|
||||
if cutlass.utils.datatypes.torch_available:
|
||||
import torch
|
||||
|
||||
|
||||
def _initialize(dtype, M: int, N: int, K: int):
|
||||
"""
|
||||
Utility function to initialize A, B, C, and D matrices corresponding to dimensions M, N, and K
|
||||
|
||||
:param dtype: data type of tensors
|
||||
:param M: M dimension of GEMM problem
|
||||
:type M: int
|
||||
:param N: N dimension of GEMM problem
|
||||
:type N: int
|
||||
:param K: N dimension of GEMM problem
|
||||
:type K: int
|
||||
|
||||
:return: initialized tensors A, B, C, and D
|
||||
:rtype: list
|
||||
"""
|
||||
sizes = [(M, K), (K, N), (M, N), (M, N)]
|
||||
return [torch.randint(-3, 3, size, device='cuda').to(dtype) for size in sizes]
|
||||
|
||||
|
||||
def _generate_problems(dtype, num):
|
||||
"""
|
||||
Utility function to generate `num` GEMMs of random sizes
|
||||
|
||||
:param dtype: data type of tensors
|
||||
:param num: number of GEMMs to generate
|
||||
:type num: int
|
||||
|
||||
:return: lists of A, B, C, and D tensors
|
||||
:rtype: list
|
||||
"""
|
||||
valid_sizes = [128, 256, 512, 1024]
|
||||
As, Bs, Cs, Ds = [], [], [], []
|
||||
for _ in range(num):
|
||||
M, N, K = [random.choice(valid_sizes) for _ in range(3)]
|
||||
A, B, C, D = _initialize(dtype, M, N, K)
|
||||
As.append(A)
|
||||
Bs.append(B)
|
||||
Cs.append(C)
|
||||
Ds.append(D)
|
||||
return As, Bs, Cs, Ds
|
||||
|
||||
|
||||
@unittest.skipIf(not cutlass.utils.datatypes.torch_available, 'PyTorch must be available to run PyTorch extension tests')
|
||||
class PyTorchExtensionTest(unittest.TestCase):
|
||||
|
||||
def test_gemm(self):
|
||||
random.seed(2023)
|
||||
|
||||
dtype = torch.float16
|
||||
plan = cutlass.op.Gemm(element=dtype, layout=cutlass.LayoutType.RowMajor)
|
||||
plan.activation = cutlass.epilogue.relu
|
||||
op = plan.construct()
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
mod = cutlass.emit.pytorch(op, name='gemm_mod', cc=plan.cc, sourcedir=tmpdir, jit=True)
|
||||
|
||||
A, B, C, _ = _initialize(dtype, 1024, 256, 512)
|
||||
|
||||
D_ref = torch.nn.functional.relu(A @ B)
|
||||
D = mod.run(A, B)
|
||||
assert torch.allclose(D, D_ref)
|
||||
|
||||
D = mod.run(A, B, C)
|
||||
assert torch.allclose(D, D_ref)
|
||||
|
||||
D = mod.run(A, B, C, 1.0)
|
||||
assert torch.allclose(D, D_ref)
|
||||
|
||||
D = mod.run(A, B, C, 1.0, 0.0)
|
||||
assert torch.allclose(D, D_ref)
|
||||
|
||||
alpha = 2.0
|
||||
beta = -1.0
|
||||
D_ref = torch.nn.functional.relu((A @ B) * alpha + (beta * C))
|
||||
D = mod.run(A, B, C, alpha, beta)
|
||||
assert torch.allclose(D, D_ref)
|
||||
|
||||
def test_grouped_gemm(self):
|
||||
random.seed(2023)
|
||||
|
||||
dtype = torch.float16
|
||||
plan = cutlass.op.GroupedGemm(element=dtype, layout=cutlass.LayoutType.RowMajor)
|
||||
op = plan.construct()
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
mod = cutlass.emit.pytorch(op, name='grouped_gemm_mod', cc=plan.cc, sourcedir=tmpdir, jit=True)
|
||||
|
||||
As, Bs, Cs, _ = _generate_problems(dtype, 50)
|
||||
|
||||
def check_all(X, Y):
|
||||
for x, y in zip(X, Y):
|
||||
assert torch.allclose(x, y)
|
||||
|
||||
Ds_ref = [a @ b for a, b in zip(As, Bs)]
|
||||
Ds = mod.run(As, Bs)
|
||||
check_all(Ds, Ds_ref)
|
||||
|
||||
Ds = mod.run(As, Bs, Cs)
|
||||
check_all(Ds, Ds_ref)
|
||||
|
||||
Ds = mod.run(As, Bs, Cs, 1.0)
|
||||
check_all(Ds, Ds_ref)
|
||||
|
||||
Ds = mod.run(As, Bs, Cs, 1.0, 0.0)
|
||||
check_all(Ds, Ds_ref)
|
||||
|
||||
alpha = 2.0
|
||||
beta = -1.0
|
||||
Ds_ref = [(a @ b) * alpha + (beta * c) for a, b, c in zip(As, Bs, Cs)]
|
||||
Ds = mod.run(As, Bs, Cs, alpha, beta)
|
||||
check_all(Ds, Ds_ref)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
167
test/python/gemm/gemm_f16_sm80.py
Normal file
167
test/python/gemm/gemm_f16_sm80.py
Normal file
@ -0,0 +1,167 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Low-level functionality tests for GEMM with F16 operands on SM80
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import cutlass
|
||||
from cutlass.utils.datatypes import binding_opclass, binding_type
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
cc = 80
|
||||
|
||||
# Partial specialziation for naming tests
|
||||
bound_type = binding_type(cutlass.DataType.f16)
|
||||
name_fn = partial(get_name, element_a=bound_type, element_b=bound_type, arch=cc)
|
||||
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator,
|
||||
threadblock_shape, warp_count, stages, opclass, swizzle=None):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: layouts of A, B, and C operands
|
||||
:type layouts: list or tuple
|
||||
:param alignments: alingments of A, B, and C operands
|
||||
:type alignments: list or tuple
|
||||
:param element_output: data type of the output element
|
||||
:type element_output: cutlass.DataType
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:type element_accumulator: cutlass.DataType
|
||||
:param threadblock_shape: dimensions of threadblock tiles
|
||||
:type threadblock_shape: list or tuple
|
||||
:param warp_count: warps to be launched per threadblock dimension
|
||||
:type warp_count: list or tuple
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass.OpClass
|
||||
:param swizzle: threadblock swizzling functor
|
||||
"""
|
||||
cluster_shape = [1, 1, 1]
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass.DataType.f16
|
||||
element_B = cutlass.DataType.f16
|
||||
layout_A, layout_B, layout_C = layouts
|
||||
alignment_A, alignment_B, alignment_C = alignments
|
||||
|
||||
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
|
||||
element_C=element_output, element_D=element_output,
|
||||
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
|
||||
element_accumulator=element_accumulator,
|
||||
kernel_cc=cc)
|
||||
|
||||
plan.opclass = opclass
|
||||
if swizzle is not None:
|
||||
plan.swizzling_functor = swizzle
|
||||
td = plan.tile_descriptions()[0]
|
||||
td.threadblock_shape = threadblock_shape
|
||||
td.stages = stages
|
||||
td.warp_count = warp_count
|
||||
td.cluster_shape = cluster_shape
|
||||
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
|
||||
self.assertTrue(test_all_gemm(op, 'universal'))
|
||||
|
||||
element_epilogue = element_accumulator
|
||||
name = name_fn(layouts, alignments, binding_type(element_output), binding_type(element_accumulator),
|
||||
binding_type(element_epilogue), cluster_shape, threadblock_shape, stages, opclass=binding_opclass(opclass))
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
|
||||
class GemmF16Sm80(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
|
||||
class GemmF16Sm80StreamK(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# Tests using TensorOp
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp)
|
||||
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.NNN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.NNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.NTN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.NTT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TTN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TTT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [64, 128, 32], [1, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 64, 32], [2, 1, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [64, 64, 64], [1, 1, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [4, 4, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [4, 4, 8], cutlass.DataType.f16, cutlass.DataType.f16, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f16, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [64, 64, 64], [1, 1, 1], 5)
|
||||
add_test_tensorop(GemmF16Sm80, LayoutCombination.TNT, [2, 2, 2], cutlass.DataType.f16, cutlass.DataType.f16, [128, 128, 32], [2, 2, 1], 3)
|
||||
|
||||
# Tests using SIMT
|
||||
add_test_simt = partial(add_test, opclass=cutlass.OpcodeClass.Simt)
|
||||
|
||||
add_test_simt(GemmF16Sm80, LayoutCombination.NNN, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 8], [2, 2, 1], 2)
|
||||
add_test_simt(GemmF16Sm80, LayoutCombination.TNN, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f32, [64, 128, 8], [1, 2, 1], 2)
|
||||
add_test_simt(GemmF16Sm80, LayoutCombination.NTN, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f32, [128, 64, 8], [2, 1, 1], 2)
|
||||
add_test_simt(GemmF16Sm80, LayoutCombination.TTN, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f32, [64, 64, 8], [1, 1, 1], 2)
|
||||
add_test_simt(GemmF16Sm80, LayoutCombination.NNT, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f16, [128, 128, 8], [2, 2, 1], 2)
|
||||
|
||||
# Stream K tests
|
||||
add_test_streamk = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK)
|
||||
add_test_streamk(GemmF16Sm80StreamK, LayoutCombination.NNN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_streamk(GemmF16Sm80StreamK, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [64, 64, 64], [1, 1, 1], 5)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
173
test/python/gemm/gemm_f16_sm90.py
Normal file
173
test/python/gemm/gemm_f16_sm90.py
Normal file
@ -0,0 +1,173 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Low-level functionality tests for GEMM with F16 operands on SM90
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import cutlass
|
||||
from cutlass.utils.datatypes import binding_opclass, binding_type
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
cc = 90
|
||||
|
||||
# Partial specialziation for naming tests
|
||||
bound_type = binding_type(cutlass.DataType.f16)
|
||||
name_fn = partial(get_name, element_a=bound_type, element_b=bound_type, arch=cc)
|
||||
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator,
|
||||
cluster_shape, threadblock_shape, stages, opclass,
|
||||
kernel_schedule=cutlass.KernelScheduleType.ScheduleAuto,
|
||||
swizzle=None):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: layouts of A, B, and C operands
|
||||
:type layouts: list or tuple
|
||||
:param alignments: alingments of A, B, and C operands
|
||||
:type alignments: list or tuple
|
||||
:param element_output: data type of the output element
|
||||
:type element_output: cutlass.DataType
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:type element_accumulator: cutlass.DataType
|
||||
:param cluster_shape: dimensions of threadblock cluster
|
||||
:type cluster_shape: list or tuple
|
||||
:param threadblock_shape: dimensions of threadblock tiles
|
||||
:type threadblock_shape: list or tuple
|
||||
:param warp_count: warps to be launched per threadblock dimension
|
||||
:type warp_count: list or tuple
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass.OpClass
|
||||
:param kernel_schedule: kernel schedule type
|
||||
:type kernel_schedule: cutlass.KernelScheduleType
|
||||
:param swizzle: threadblock swizzling functor
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass.DataType.f16
|
||||
element_B = cutlass.DataType.f16
|
||||
layout_A, layout_B, layout_C = layouts
|
||||
alignment_A, alignment_B, alignment_C = alignments
|
||||
|
||||
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
|
||||
element_C=element_output, element_D=element_output,
|
||||
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
|
||||
element_accumulator=element_accumulator)
|
||||
|
||||
plan.opclass = opclass
|
||||
if swizzle is not None:
|
||||
plan.swizzling_functor = swizzle
|
||||
td = plan.tile_descriptions()[0]
|
||||
td.threadblock_shape = threadblock_shape
|
||||
td.stages = stages
|
||||
td.cluster_shape = cluster_shape
|
||||
td.kernel_schedule = kernel_schedule
|
||||
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
|
||||
self.assertTrue(test_all_gemm(op, 'universal'))
|
||||
|
||||
element_epilogue = element_accumulator
|
||||
name = name_fn(layouts, alignments, binding_type(element_output), binding_type(element_accumulator),
|
||||
binding_type(element_epilogue), cluster_shape, threadblock_shape, stages,
|
||||
opclass=binding_opclass(opclass), kernel_schedule=kernel_schedule)
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
|
||||
class GemmF16Sm90(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp)
|
||||
|
||||
# Tests with 1x1x1 clusters
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [128, 128, 32], 3)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [4, 4, 8], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [4, 4, 8], cutlass.DataType.f16, cutlass.DataType.f16, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f16, [1, 1, 1], [128, 128, 32], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [64, 64, 64], 5)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [2, 2, 2], cutlass.DataType.f16, cutlass.DataType.f16, [1, 1, 1], [128, 128, 32], None)
|
||||
|
||||
# Tests with different cluster shapes
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass.DataType.f16, cutlass.DataType.f16, [2, 2, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNN, [8, 8, 4], cutlass.DataType.f32, cutlass.DataType.f32, [2, 2, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTN, [8, 8, 4], cutlass.DataType.f32, cutlass.DataType.f32, [2, 2, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNN, [8, 8, 4], cutlass.DataType.f32, cutlass.DataType.f32, [2, 2, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 4], cutlass.DataType.f32, cutlass.DataType.f32, [1, 4, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 4], cutlass.DataType.f32, cutlass.DataType.f32, [2, 4, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 4], cutlass.DataType.f32, cutlass.DataType.f32, [4, 1, 1], [64, 128, 64], None)
|
||||
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 4], cutlass.DataType.f32, cutlass.DataType.f32, [4, 2, 1], [64, 128, 64], None)
|
||||
|
||||
# Tests for different schedule modes
|
||||
add_test_schedule = partial(add_test, GemmF16Sm90, LayoutCombination.TTN, [8, 8, 4], cutlass.DataType.f32, cutlass.DataType.f32, opclass=cutlass.OpcodeClass.TensorOp)
|
||||
add_test_schedule([1, 1, 1], [128, 128, 64], None, kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong)
|
||||
add_test_schedule([1, 1, 1], [128, 128, 64], None, kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedCooperative)
|
||||
add_test_schedule([2, 1, 1], [128, 128, 64], None, kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong)
|
||||
add_test_schedule([2, 1, 1], [128, 128, 64], None, kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedCooperative)
|
||||
add_test_schedule([2, 1, 1], [256, 128, 64], None, kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedCooperative)
|
||||
add_test_schedule([2, 1, 1], [128, 128, 64], 5, kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong)
|
||||
add_test_schedule([2, 1, 1], [128, 128, 64], 5, kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedCooperative)
|
||||
|
||||
# Tests using SIMT
|
||||
add_test_simt = partial(add_test, opclass=cutlass.OpcodeClass.Simt)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.NNN, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [128, 128, 8], 2)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.TNN, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [64, 128, 8], 2)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.NTN, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [128, 64, 8], 2)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.TTN, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f32, [1, 1, 1], [64, 64, 8], 2)
|
||||
add_test_simt(GemmF16Sm90, LayoutCombination.NNT, [1, 1, 1], cutlass.DataType.f16, cutlass.DataType.f16, [1, 1, 1], [128, 128, 8], 2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
155
test/python/gemm/gemm_f32_sm80.py
Normal file
155
test/python/gemm/gemm_f32_sm80.py
Normal file
@ -0,0 +1,155 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Low-level functionality tests for GEMM with F32 operands on SM80
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import cutlass
|
||||
from cutlass.utils.datatypes import binding_opclass, binding_type
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
cc = 80
|
||||
|
||||
# Partial specialziation for naming tests
|
||||
bound_type = binding_type(cutlass.DataType.f32)
|
||||
name_fn = partial(get_name, element_a=bound_type, element_b=bound_type, arch=cc)
|
||||
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator,
|
||||
threadblock_shape, warp_count, stages, opclass, swizzle=None):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: layouts of A, B, and C operands
|
||||
:type layouts: list or tuple
|
||||
:param alignments: alingments of A, B, and C operands
|
||||
:type alignments: list or tuple
|
||||
:param element_output: data type of the output element
|
||||
:type element_output: cutlass.DataType
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:type element_accumulator: cutlass.DataType
|
||||
:param threadblock_shape: dimensions of threadblock tiles
|
||||
:type threadblock_shape: list or tuple
|
||||
:param warp_count: warps to be launched per threadblock dimension
|
||||
:type warp_count: list or tuple
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass.OpClass
|
||||
:param swizzle: threadblock swizzling functor
|
||||
"""
|
||||
|
||||
cluster_shape = [1, 1, 1]
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass.DataType.f32
|
||||
element_B = cutlass.DataType.f32
|
||||
layout_A, layout_B, layout_C = layouts
|
||||
alignment_A, alignment_B, alignment_C = alignments
|
||||
|
||||
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
|
||||
element_C=element_output, element_D=element_output,
|
||||
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
|
||||
element_accumulator=element_accumulator,
|
||||
kernel_cc=cc)
|
||||
|
||||
plan.opclass = opclass
|
||||
if swizzle is not None:
|
||||
plan.swizzling_functor = swizzle
|
||||
td = plan.tile_descriptions()[0]
|
||||
td.threadblock_shape = threadblock_shape
|
||||
td.stages = stages
|
||||
td.warp_count = warp_count
|
||||
td.cluster_shape = cluster_shape
|
||||
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
|
||||
|
||||
self.assertTrue(test_all_gemm(op, 'universal'))
|
||||
|
||||
element_epilogue = element_accumulator
|
||||
name = name_fn(layouts, alignments, binding_type(element_output), binding_type(element_accumulator),
|
||||
binding_type(element_epilogue), cluster_shape, threadblock_shape, stages, opclass=binding_opclass(opclass))
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
|
||||
class GemmF32Sm80(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
|
||||
class GemmF32Sm80StreamK(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# Tests using TensorOp
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp)
|
||||
|
||||
add_test_tensorop(GemmF32Sm80, LayoutCombination.NNN, [4, 4, 4], cutlass.DataType.f32, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF32Sm80, LayoutCombination.NNT, [4, 4, 4], cutlass.DataType.f32, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
add_test_tensorop(GemmF32Sm80, LayoutCombination.NTN, [4, 4, 4], cutlass.DataType.f32, cutlass.DataType.f32, [64, 128, 32], [1, 2, 1], 3)
|
||||
add_test_tensorop(GemmF32Sm80, LayoutCombination.NTN, [4, 4, 4], cutlass.DataType.f32, cutlass.DataType.f32, [64, 64, 32], [1, 1, 1], 4)
|
||||
# Tests using SIMT
|
||||
add_test_simt = partial(add_test, opclass=cutlass.OpcodeClass.Simt)
|
||||
|
||||
add_test_simt(GemmF32Sm80, LayoutCombination.NNN, [1, 1, 1], cutlass.DataType.f32, cutlass.DataType.f32, [128, 128, 8], [2, 2, 1], 2)
|
||||
add_test_simt(GemmF32Sm80, LayoutCombination.TNN, [1, 1, 1], cutlass.DataType.f32, cutlass.DataType.f32, [64, 128, 8], [1, 2, 1], 2)
|
||||
add_test_simt(GemmF32Sm80, LayoutCombination.NTN, [1, 1, 1], cutlass.DataType.f32, cutlass.DataType.f32, [128, 64, 8], [2, 1, 1], 2)
|
||||
add_test_simt(GemmF32Sm80, LayoutCombination.TTN, [1, 1, 1], cutlass.DataType.f32, cutlass.DataType.f32, [64, 64, 8], [1, 1, 1], 2)
|
||||
add_test_simt(GemmF32Sm80, LayoutCombination.NNT, [1, 1, 1], cutlass.DataType.f32, cutlass.DataType.f32, [128, 128, 8], [2, 2, 1], 2)
|
||||
|
||||
# Stream K tests
|
||||
add_test_streamk = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK)
|
||||
add_test_streamk(GemmF32Sm80StreamK, LayoutCombination.TTN, [4, 4, 4], cutlass.DataType.f32, cutlass.DataType.f32, [128, 128, 32], [2, 2, 1], 3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
156
test/python/gemm/gemm_f64_sm80.py
Normal file
156
test/python/gemm/gemm_f64_sm80.py
Normal file
@ -0,0 +1,156 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Low-level functionality tests for GEMM with F64 operands on SM80
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import cutlass
|
||||
from cutlass.utils.datatypes import binding_opclass, binding_type
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
cc = 80
|
||||
|
||||
# Partial specialziation for naming tests
|
||||
bound_type = binding_type(cutlass.DataType.f64)
|
||||
name_fn = partial(get_name, element_a=bound_type, element_b=bound_type, arch=cc)
|
||||
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator,
|
||||
threadblock_shape, warp_count, stages, opclass, swizzle=None):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: layouts of A, B, and C operands
|
||||
:type layouts: list or tuple
|
||||
:param alignments: alingments of A, B, and C operands
|
||||
:type alignments: list or tuple
|
||||
:param element_output: data type of the output element
|
||||
:type element_output: cutlass.DataType
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:type element_accumulator: cutlass.DataType
|
||||
:param threadblock_shape: dimensions of threadblock tiles
|
||||
:type threadblock_shape: list or tuple
|
||||
:param warp_count: warps to be launched per threadblock dimension
|
||||
:type warp_count: list or tuple
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass.OpClass
|
||||
:param swizzle: threadblock swizzling functor
|
||||
"""
|
||||
|
||||
cluster_shape = [1, 1, 1]
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass.DataType.f64
|
||||
element_B = cutlass.DataType.f64
|
||||
layout_A, layout_B, layout_C = layouts
|
||||
alignment_A, alignment_B, alignment_C = alignments
|
||||
|
||||
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
|
||||
element_C=element_output, element_D=element_output,
|
||||
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
|
||||
element_accumulator=element_accumulator,
|
||||
kernel_cc=cc)
|
||||
|
||||
plan.opclass = opclass
|
||||
if swizzle is not None:
|
||||
plan.swizzling_functor = swizzle
|
||||
td = plan.tile_descriptions()[0]
|
||||
td.threadblock_shape = threadblock_shape
|
||||
td.stages = stages
|
||||
td.warp_count = warp_count
|
||||
td.cluster_shape = cluster_shape
|
||||
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
|
||||
|
||||
self.assertTrue(test_all_gemm(op, 'universal'))
|
||||
|
||||
element_epilogue = element_accumulator
|
||||
name = name_fn(layouts, alignments, binding_type(element_output), binding_type(element_accumulator),
|
||||
binding_type(element_epilogue), cluster_shape, threadblock_shape, stages, opclass=binding_opclass(opclass))
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
|
||||
class GemmF64Sm80(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
|
||||
class GemmF64Sm80StreamK(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# Tests using TensorOp
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp)
|
||||
|
||||
add_test_tensorop(GemmF64Sm80, LayoutCombination.NNN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [128, 128, 16], [4, 2, 1], 3)
|
||||
add_test_tensorop(GemmF64Sm80, LayoutCombination.NTN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [64, 64, 16], [2, 2, 1], 4)
|
||||
add_test_tensorop(GemmF64Sm80, LayoutCombination.TTN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [32, 32, 16], [2, 1, 1], 5)
|
||||
|
||||
# Tests using SIMT
|
||||
add_test_simt = partial(add_test, opclass=cutlass.OpcodeClass.Simt)
|
||||
|
||||
add_test_simt(GemmF64Sm80, LayoutCombination.NNN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [128, 128, 8], [2, 2, 1], 2)
|
||||
add_test_simt(GemmF64Sm80, LayoutCombination.TNN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [64, 128, 8], [1, 2, 1], 2)
|
||||
add_test_simt(GemmF64Sm80, LayoutCombination.NTN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [128, 64, 8], [2, 1, 1], 2)
|
||||
add_test_simt(GemmF64Sm80, LayoutCombination.TTN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [64, 64, 8], [1, 1, 1], 2)
|
||||
add_test_simt(GemmF64Sm80, LayoutCombination.NNT, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [128, 128, 8], [2, 2, 1], 2)
|
||||
|
||||
# Stream K tests
|
||||
add_test_streamk = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK)
|
||||
add_test_streamk(GemmF64Sm80StreamK, LayoutCombination.NTT, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [128, 128, 16], [4, 2, 1], 3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
142
test/python/gemm/gemm_f64_sm90.py
Normal file
142
test/python/gemm/gemm_f64_sm90.py
Normal file
@ -0,0 +1,142 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Low-level functionality tests for GEMM with F64 operands on SM90
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import cutlass
|
||||
from cutlass.utils.datatypes import binding_opclass, binding_type
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
cc = 90
|
||||
|
||||
# Partial specialziation for naming tests
|
||||
bound_type = binding_type(cutlass.DataType.f64)
|
||||
name_fn = partial(get_name, element_a=bound_type, element_b=bound_type, arch=cc)
|
||||
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator,
|
||||
cluster_shape, threadblock_shape, stages, opclass, persistent=False, swizzle=None):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: layouts of A, B, and C operands
|
||||
:type layouts: list or tuple
|
||||
:param alignments: alingments of A, B, and C operands
|
||||
:type alignments: list or tuple
|
||||
:param element_output: data type of the output element
|
||||
:type element_output: cutlass.DataType
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:type element_accumulator: cutlass.DataType
|
||||
:param cluster_shape: dimensions of threadblock cluster
|
||||
:type cluster_shape: list or tuple
|
||||
:param threadblock_shape: dimensions of threadblock tiles
|
||||
:type threadblock_shape: list or tuple
|
||||
:param warp_count: warps to be launched per threadblock dimension
|
||||
:type warp_count: list or tuple
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass.OpClass
|
||||
:param persistent: whether this is a persistent warp-specialized kernel
|
||||
:type persistent: bool
|
||||
:param swizzle: threadblock swizzling functor
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass.DataType.f64
|
||||
element_B = cutlass.DataType.f64
|
||||
layout_A, layout_B, layout_C = layouts
|
||||
alignment_A, alignment_B, alignment_C = alignments
|
||||
|
||||
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
|
||||
element_C=element_output, element_D=element_output,
|
||||
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
|
||||
element_accumulator=element_accumulator)
|
||||
|
||||
plan.opclass = opclass
|
||||
if swizzle is not None:
|
||||
plan.swizzling_functor = swizzle
|
||||
td = plan.tile_descriptions()[0]
|
||||
td.threadblock_shape = threadblock_shape
|
||||
td.stages = stages
|
||||
td.cluster_shape = cluster_shape
|
||||
td.persistent = persistent
|
||||
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
|
||||
self.assertTrue(test_all_gemm(op, 'universal'))
|
||||
|
||||
if persistent:
|
||||
suffix = "_persistent"
|
||||
else:
|
||||
suffix = ""
|
||||
|
||||
element_epilogue = element_accumulator
|
||||
name = name_fn(layouts, alignments, binding_type(element_output), binding_type(element_accumulator),
|
||||
binding_type(element_epilogue), cluster_shape, threadblock_shape, stages,
|
||||
opclass=binding_opclass(opclass), suffix=suffix)
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
|
||||
class GemmF64Sm90(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp)
|
||||
add_test_simt = partial(add_test, opclass=cutlass.OpcodeClass.Simt)
|
||||
|
||||
add_test_tensorop(GemmF64Sm90, LayoutCombination.NNT, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [1, 1, 1], [128, 128, 32], 3)
|
||||
add_test_tensorop(GemmF64Sm90, LayoutCombination.TNN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [1, 1, 1], [128, 128, 32], 3)
|
||||
add_test_simt(GemmF64Sm90, LayoutCombination.NNN, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [1, 1, 1], [128, 128, 8], 2)
|
||||
add_test_simt(GemmF64Sm90, LayoutCombination.TTT, [1, 1, 1], cutlass.DataType.f64, cutlass.DataType.f64, [1, 1, 1], [64, 128, 8], 2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
156
test/python/gemm/gemm_s8_sm80.py
Normal file
156
test/python/gemm/gemm_s8_sm80.py
Normal file
@ -0,0 +1,156 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Low-level functionality tests for GEMM with S8 operands on SM80
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import cutlass
|
||||
from cutlass.utils.datatypes import binding_opclass, binding_type
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
cc = 80
|
||||
|
||||
# Partial specialziation for naming tests
|
||||
bound_type = binding_type(cutlass.DataType.s8)
|
||||
name_fn = partial(get_name, element_a=bound_type, element_b=bound_type, arch=cc)
|
||||
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator,
|
||||
threadblock_shape, warp_count, stages, opclass, swizzle=None):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: layouts of A, B, and C operands
|
||||
:type layouts: list or tuple
|
||||
:param alignments: alingments of A, B, and C operands
|
||||
:type alignments: list or tuple
|
||||
:param element_output: data type of the output element
|
||||
:type element_output: cutlass.DataType
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:type element_accumulator: cutlass.DataType
|
||||
:param threadblock_shape: dimensions of threadblock tiles
|
||||
:type threadblock_shape: list or tuple
|
||||
:param warp_count: warps to be launched per threadblock dimension
|
||||
:type warp_count: list or tuple
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass.OpClass
|
||||
:param swizzle: threadblock swizzling functor
|
||||
"""
|
||||
|
||||
cluster_shape = [1, 1, 1]
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass.DataType.s8
|
||||
element_B = cutlass.DataType.s8
|
||||
layout_A, layout_B, layout_C = layouts
|
||||
alignment_A, alignment_B, alignment_C = alignments
|
||||
|
||||
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
|
||||
element_C=element_output, element_D=element_output,
|
||||
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
|
||||
element_accumulator=element_accumulator,
|
||||
kernel_cc=cc)
|
||||
|
||||
plan.opclass = opclass
|
||||
if swizzle is not None:
|
||||
plan.swizzling_functor = swizzle
|
||||
td = plan.tile_descriptions()[0]
|
||||
td.threadblock_shape = threadblock_shape
|
||||
td.stages = stages
|
||||
td.warp_count = warp_count
|
||||
td.cluster_shape = cluster_shape
|
||||
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
|
||||
|
||||
self.assertTrue(test_all_gemm(op, 'universal'))
|
||||
|
||||
element_epilogue = element_accumulator
|
||||
name = name_fn(layouts, alignments, binding_type(element_output), binding_type(element_accumulator),
|
||||
binding_type(element_epilogue), cluster_shape, threadblock_shape, stages, opclass=binding_opclass(opclass))
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
|
||||
class GemmS8Sm80(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
|
||||
class GemmS8Sm80StreamK(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# Tests using TensorOp
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp)
|
||||
|
||||
add_test_tensorop(GemmS8Sm80, LayoutCombination.TNN, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [256, 128, 64], [4, 2, 1], 3)
|
||||
add_test_tensorop(GemmS8Sm80, LayoutCombination.TNT, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [128, 256, 64], [2, 4, 1], 3)
|
||||
add_test_tensorop(GemmS8Sm80, LayoutCombination.TNN, [16, 16, 4], cutlass.DataType.s32, cutlass.DataType.s32, [64, 64, 64], [1, 1, 1], 4)
|
||||
|
||||
# Tests using SIMT
|
||||
add_test_simt = partial(add_test, opclass=cutlass.OpcodeClass.Simt)
|
||||
|
||||
add_test_simt(GemmS8Sm80, LayoutCombination.NNN, [1, 1, 1], cutlass.DataType.s8, cutlass.DataType.s32, [128, 128, 8], [2, 2, 1], 2)
|
||||
add_test_simt(GemmS8Sm80, LayoutCombination.TNN, [1, 1, 1], cutlass.DataType.s8, cutlass.DataType.s32, [64, 128, 8], [1, 2, 1], 2)
|
||||
add_test_simt(GemmS8Sm80, LayoutCombination.NTN, [1, 1, 1], cutlass.DataType.s8, cutlass.DataType.s32, [128, 64, 8], [2, 1, 1], 2)
|
||||
add_test_simt(GemmS8Sm80, LayoutCombination.TTN, [1, 1, 1], cutlass.DataType.s32, cutlass.DataType.s32, [64, 64, 8], [1, 1, 1], 2)
|
||||
add_test_simt(GemmS8Sm80, LayoutCombination.NNT, [1, 1, 1], cutlass.DataType.s32, cutlass.DataType.s32, [128, 128, 8], [2, 2, 1], 2)
|
||||
|
||||
# Stream K tests
|
||||
add_test_streamk = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK)
|
||||
add_test_streamk(GemmS8Sm80StreamK, LayoutCombination.TNT, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [128, 256, 64], [2, 4, 1], 3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
155
test/python/gemm/gemm_s8_sm90.py
Normal file
155
test/python/gemm/gemm_s8_sm90.py
Normal file
@ -0,0 +1,155 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Low-level functionality tests for GEMM with S8 operands on SM90
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import cutlass
|
||||
from cutlass.utils.datatypes import binding_opclass, binding_type
|
||||
from cutlass.backend.test.gemm_testbed import test_all_gemm
|
||||
import unittest
|
||||
|
||||
from cutlass.backend.test.utils import LayoutCombination, get_name
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
cc = 90
|
||||
|
||||
# Partial specialziation for naming tests
|
||||
bound_type = binding_type(cutlass.DataType.s8)
|
||||
name_fn = partial(get_name, element_a=bound_type, element_b=bound_type, arch=cc)
|
||||
|
||||
|
||||
def add_test(cls, layouts, alignments, element_output, element_accumulator,
|
||||
cluster_shape, threadblock_shape, stages, opclass, persistent=False, swizzle=None):
|
||||
"""
|
||||
Create a test-running function with the given specification and set it as a method of `cls`.
|
||||
|
||||
:param cls: class to which the generated method will be added
|
||||
:type cls: type
|
||||
:param layouts: layouts of A, B, and C operands
|
||||
:type layouts: list or tuple
|
||||
:param alignments: alingments of A, B, and C operands
|
||||
:type alignments: list or tuple
|
||||
:param element_output: data type of the output element
|
||||
:type element_output: cutlass.DataType
|
||||
:param element_accumulator: data type used in accumulation
|
||||
:type element_accumulator: cutlass.DataType
|
||||
:param cluster_shape: dimensions of threadblock cluster
|
||||
:type cluster_shape: list or tuple
|
||||
:param threadblock_shape: dimensions of threadblock tiles
|
||||
:type threadblock_shape: list or tuple
|
||||
:param warp_count: warps to be launched per threadblock dimension
|
||||
:type warp_count: list or tuple
|
||||
:param stages: number of pipeline stages to use in the kernel
|
||||
:type stages: int
|
||||
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
|
||||
:type opclass: cutlass.OpClass
|
||||
:param persistent: whether this is a persistent warp-specialized kernel
|
||||
:type persistent: bool
|
||||
:param swizzle: threadblock swizzling functor
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Dynamically-generated function that constructs a GEMM operation and verifies it against
|
||||
multiple test cases.
|
||||
"""
|
||||
element_A = cutlass.DataType.s8
|
||||
element_B = cutlass.DataType.s8
|
||||
layout_A, layout_B, layout_C = layouts
|
||||
alignment_A, alignment_B, alignment_C = alignments
|
||||
|
||||
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
|
||||
element_C=element_output, element_D=element_output,
|
||||
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
|
||||
element_accumulator=element_accumulator)
|
||||
|
||||
plan.opclass = opclass
|
||||
if swizzle is not None:
|
||||
plan.swizzling_functor = swizzle
|
||||
td = plan.tile_descriptions()[0]
|
||||
td.threadblock_shape = threadblock_shape
|
||||
td.stages = stages
|
||||
td.cluster_shape = cluster_shape
|
||||
td.persistent = persistent
|
||||
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
|
||||
self.assertTrue(test_all_gemm(op, 'universal'))
|
||||
|
||||
if persistent:
|
||||
suffix = "_persistent"
|
||||
else:
|
||||
suffix = ""
|
||||
|
||||
element_epilogue = element_accumulator
|
||||
name = name_fn(layouts, alignments, binding_type(element_output), binding_type(element_accumulator),
|
||||
binding_type(element_epilogue), cluster_shape, threadblock_shape, stages,
|
||||
opclass=binding_opclass(opclass), suffix=suffix)
|
||||
setattr(cls, name, run)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
|
||||
class GemmS8Sm90(unittest.TestCase):
|
||||
"""
|
||||
Wrapper class to which tests will be added dynamically in __main__
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
add_test_tensorop = partial(add_test, opclass=cutlass.OpcodeClass.TensorOp)
|
||||
|
||||
# Tests with 1x1x1 clusters
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNN, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [1, 1, 1], [128, 128, 128], 3)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [1, 1, 1], [128, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 8], cutlass.DataType.s8, cutlass.DataType.s32, [1, 1, 1], [128, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [1, 1, 1], [64, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [1, 1, 1], [128, 64, 32], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [4, 4, 16], cutlass.DataType.s8, cutlass.DataType.s32, [1, 1, 1], [128, 128, 128], None)
|
||||
|
||||
# Tests with different cluster shapes
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [2, 2, 1], [128, 128, 128], None)
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [1, 4, 1], [128, 128, 128], None)
|
||||
|
||||
# Tests with persistent warp-specialized threadblocks
|
||||
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass.DataType.s8, cutlass.DataType.s32, [2, 1, 1], [128, 128, 128], None, persistent=True)
|
||||
|
||||
# Tests for SIMT
|
||||
add_test_simt = partial(add_test, opclass=cutlass.OpcodeClass.Simt)
|
||||
add_test_simt(GemmS8Sm90, LayoutCombination.TNN, [1, 1, 1], cutlass.DataType.s8, cutlass.DataType.s32, [1, 1, 1], [64, 32, 8], 2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
42
test/python/gemm/run_all_tests.py
Normal file
42
test/python/gemm/run_all_tests.py
Normal file
@ -0,0 +1,42 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import unittest
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
loader = unittest.TestLoader()
|
||||
tests = loader.discover('./', 'gemm_*.py')
|
||||
testRunner = unittest.runner.TextTestRunner()
|
||||
results = testRunner.run(tests)
|
||||
if not results.wasSuccessful():
|
||||
raise Exception('Test cases failed')
|
||||
354
test/python/interface/gemm_interface.py
Normal file
354
test/python/interface/gemm_interface.py
Normal file
@ -0,0 +1,354 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
"""
|
||||
Tests the high-level GEMM interface
|
||||
"""
|
||||
|
||||
from math import ceil
|
||||
import unittest
|
||||
|
||||
import cutlass
|
||||
import cutlass_bindings
|
||||
import cutlass.utils.datatypes as datatypes
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
|
||||
|
||||
class GemmEquivalence:
|
||||
"""
|
||||
Helper class for testing the equivalence of different constructions of the Gemm interface
|
||||
"""
|
||||
def __init__(self, element_A, element_B, element_C, element_D, element_accumulator,
|
||||
layout_A, layout_B, layout_C, alignment_A, alignment_B, alignment_C):
|
||||
self.element_A = element_A
|
||||
self.element_B = element_B
|
||||
self.element_C = element_C
|
||||
self.element_D = element_D
|
||||
self.element_accumulator = element_accumulator
|
||||
self.layout_A = layout_A
|
||||
self.layout_B = layout_B
|
||||
self.layout_C = layout_C
|
||||
self.alignment_A = alignment_A
|
||||
self.alignment_B = alignment_B
|
||||
self.alignment_C = alignment_C
|
||||
self.plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B, element_C=element_C,
|
||||
element_D=element_D, element_accumulator=element_accumulator,
|
||||
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C)
|
||||
self.op = self.plan.construct(alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
|
||||
|
||||
def _plans_equal(self, other_plan) -> bool:
|
||||
"""
|
||||
Compares whether two plans are equal
|
||||
|
||||
:param other_plan: plan to compare against the default GEMM
|
||||
:type other_plan: cutlass.op.Gemm
|
||||
|
||||
:return: whether `other_plan` is equivalent to `self.plan`
|
||||
:rtype: bool
|
||||
"""
|
||||
other_op = other_plan.construct(alignment_A=self.alignment_A, alignment_B=self.alignment_B, alignment_C=self.alignment_C)
|
||||
|
||||
# Compare whether the operations are equal by comparing the C++ code that would be emitted for them
|
||||
return self.op.rt_module.emit() == other_op.rt_module.emit()
|
||||
|
||||
def generic_test(self):
|
||||
"""
|
||||
Tests the equivalence of various constructions of the Gemm interface when using CUTLASS data types
|
||||
and layouts for constructing the Gemm interface
|
||||
"""
|
||||
if not datatypes.numpy_available:
|
||||
return
|
||||
|
||||
# Test when specifying all parameters
|
||||
plan_other = cutlass.op.Gemm(element_A=self.element_A, element_B=self.element_B, element_C=self.element_C,
|
||||
element_D=self.element_D, element_accumulator=self.element_accumulator,
|
||||
layout_A=self.layout_A, layout_B=self.layout_B, layout_C=self.layout_C)
|
||||
assert self._plans_equal(plan_other)
|
||||
|
||||
# Test when specifying all parameters but A
|
||||
plan_other = cutlass.op.Gemm(element_B=self.element_B, element_C=self.element_C,
|
||||
element_D=self.element_D, element_accumulator=self.element_accumulator,
|
||||
layout_B=self.layout_B, layout_C=self.layout_C,
|
||||
element=self.element_A, layout=self.layout_A)
|
||||
assert self._plans_equal(plan_other)
|
||||
|
||||
# Test when specifying all parameters but A and B as tensors and using generic element and output
|
||||
# Only run this test if the layouts and types for A and B are equal.
|
||||
if self.element_A == self.element_B and self.layout_A == self.layout_B:
|
||||
plan_other = cutlass.op.Gemm(element_C=self.element_C, element_D=self.element_D, element_accumulator=self.element_accumulator,
|
||||
layout_C=self.layout_C, element=self.element_A, layout=self.layout_A)
|
||||
assert self._plans_equal(plan_other)
|
||||
|
||||
# Test without explicit accumulator. Only run if the type of C and the accumulator.
|
||||
if self.element_C == self.element_accumulator:
|
||||
plan_other = cutlass.op.Gemm(element_A=self.element_A, element_B=self.element_B, element_C=self.element_C,
|
||||
element_D=self.element_D, layout_A=self.layout_A, layout_B=self.layout_B,
|
||||
layout_C=self.layout_C)
|
||||
assert self._plans_equal(plan_other)
|
||||
|
||||
# Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same.
|
||||
if (self.element_A == self.element_B and self.element_A == self.element_C and self.element_A == self.element_D
|
||||
and self.element_A == self.element_accumulator and
|
||||
self.layout_A == self.layout_B and self.layout_A == self.layout_C):
|
||||
plan_other = cutlass.op.Gemm(element=self.element_A, layout=self.layout_A)
|
||||
assert self._plans_equal(plan_other)
|
||||
|
||||
def numpy_test(self):
|
||||
"""
|
||||
Tests the equivalence of various constructions of the Gemm interface when using numpy as a frontend
|
||||
"""
|
||||
if not datatypes.numpy_available:
|
||||
return
|
||||
|
||||
import numpy as np
|
||||
type_A = datatypes.numpy_type(self.element_A)
|
||||
type_B = datatypes.numpy_type(self.element_B)
|
||||
type_C = datatypes.numpy_type(self.element_C)
|
||||
type_D = datatypes.numpy_type(self.element_D)
|
||||
type_accum = datatypes.numpy_type(self.element_accumulator)
|
||||
|
||||
layout_to_order = {
|
||||
cutlass.LayoutType.RowMajor: 'C',
|
||||
cutlass.LayoutType.ColumnMajor: 'F'
|
||||
}
|
||||
size = (2, 2)
|
||||
A = np.zeros(size, order=layout_to_order[self.layout_A], dtype=type_A)
|
||||
B = np.zeros(size, order=layout_to_order[self.layout_B], dtype=type_B)
|
||||
C = np.zeros(size, order=layout_to_order[self.layout_C], dtype=type_C)
|
||||
D = np.zeros(size, order=layout_to_order[self.layout_C], dtype=type_D)
|
||||
|
||||
# Test when specifying all parameters via tensors
|
||||
plan_np = cutlass.op.Gemm(A=A, B=B, C=C, D=D, element_accumulator=type_accum)
|
||||
assert self._plans_equal(plan_np)
|
||||
|
||||
# Test when specifying all parameters but A as tensors
|
||||
plan_np = cutlass.op.Gemm(B=B, C=C, D=D, element_accumulator=type_accum, element_A=type_A, layout_A=self.layout_A)
|
||||
assert self._plans_equal(plan_np)
|
||||
|
||||
# Test when specifying all parameters but A and B as tensors and using generic element and output
|
||||
# Only run this test if the layouts and types for A and B are equal.
|
||||
if type_A == type_B and self.layout_A == self.layout_B:
|
||||
plan_np = cutlass.op.Gemm(C=C, D=D, element_accumulator=type_accum, element=type_A, layout=self.layout_A)
|
||||
assert self._plans_equal(plan_np)
|
||||
|
||||
# Test without explicit accumulator. Only run if the type of C and the accumulator.
|
||||
if type_C == type_accum:
|
||||
plan_np = cutlass.op.Gemm(A=A, B=B, C=C, D=D)
|
||||
assert self._plans_equal(plan_np)
|
||||
|
||||
# Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same.
|
||||
if (type_A == type_B and type_A == type_C and type_A == type_D and type_A == type_accum and
|
||||
self.layout_A == self.layout_B and self.layout_A == self.layout_C):
|
||||
plan_np = cutlass.op.Gemm(element=type_A, layout=self.layout_A)
|
||||
assert self._plans_equal(plan_np)
|
||||
|
||||
def test_all(self):
|
||||
"""
|
||||
Runs all tests on the Gemm interface
|
||||
"""
|
||||
self.generic_test()
|
||||
self.numpy_test()
|
||||
|
||||
|
||||
class GemmEquivalenceTest(unittest.TestCase):
|
||||
"""
|
||||
Tests the equivalence of different constructions of the Gemm interface
|
||||
"""
|
||||
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
|
||||
def test_gemm_equivalence_f16_f16_f16_f16_f16_ttt_8_8_8(self):
|
||||
gemm_eq = GemmEquivalence(
|
||||
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
|
||||
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16,
|
||||
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor,
|
||||
alignment_A=8, alignment_B=8, alignment_C=8)
|
||||
gemm_eq.test_all()
|
||||
|
||||
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
|
||||
def test_gemm_equivalence_f16_f16_f16_f16_f32_ntn_8_8_8(self):
|
||||
gemm_eq = GemmEquivalence(
|
||||
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
|
||||
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32,
|
||||
layout_A=cutlass.LayoutType.ColumnMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.ColumnMajor,
|
||||
alignment_A=8, alignment_B=8, alignment_C=8)
|
||||
gemm_eq.test_all()
|
||||
|
||||
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
|
||||
def test_gemm_equivalence_f16_f16_f16_f16_f16_ttt_4_4_4(self):
|
||||
gemm_eq = GemmEquivalence(
|
||||
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
|
||||
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16,
|
||||
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor,
|
||||
alignment_A=8, alignment_B=8, alignment_C=8)
|
||||
gemm_eq.test_all()
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for F64 Tensor Core tests.")
|
||||
def test_gemm_equivalence_f64_f64_f64_f64_f64_tnt_1_1_1(self):
|
||||
gemm_eq = GemmEquivalence(
|
||||
element_A=cutlass.DataType.f64, element_B=cutlass.DataType.f64, element_C=cutlass.DataType.f64,
|
||||
element_D=cutlass.DataType.f64, element_accumulator=cutlass.DataType.f64,
|
||||
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.ColumnMajor, layout_C=cutlass.LayoutType.RowMajor,
|
||||
alignment_A=1, alignment_B=1, alignment_C=1)
|
||||
gemm_eq.test_all()
|
||||
|
||||
|
||||
class ExpectException:
|
||||
"""
|
||||
Utility class to assert that an exception was raised when expected
|
||||
|
||||
Example:
|
||||
|
||||
.. highlight:: python
|
||||
.. code-block:: python
|
||||
|
||||
with ExceptionExpected(True, 'Division by zero'):
|
||||
x = 1.0 / 0.0
|
||||
|
||||
:param exception_expected: whether an exception is expected to be raised
|
||||
:type exception_expected: bool
|
||||
:param message: message to print if an exception is raised when not expected or vice versa
|
||||
:type message: str
|
||||
"""
|
||||
def __init__(self, exception_expected: bool, message: str = ''):
|
||||
self.exception_expected = exception_expected
|
||||
self.message = message
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, traceback):
|
||||
exception_raised = exc_type is not None
|
||||
assert self.exception_expected == exception_raised, self.message
|
||||
|
||||
# Suppress the exception
|
||||
return True
|
||||
|
||||
|
||||
class GemmErrorTests(unittest.TestCase):
|
||||
"""
|
||||
Tests various error scenarios that arise with the high-level Gemm interface
|
||||
"""
|
||||
|
||||
def test_alignment(self):
|
||||
"""
|
||||
Tests case in which the alignment specified is unsupported
|
||||
"""
|
||||
plan = cutlass.op.Gemm(element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
|
||||
|
||||
with ExpectException(True, 'Alignment 16 is not supported for F16. The construction should fail.'):
|
||||
op = plan.construct(alignment_A=16, alignment_B=16, alignment_C=16)
|
||||
|
||||
def test_tensorop_availability(self):
|
||||
"""
|
||||
Tests case in which only SIMT operations are available but TensorOp is requested
|
||||
"""
|
||||
cc = device_cc()
|
||||
|
||||
# F64 Tensor Core operations are only avaiable on devices with CC >= 80
|
||||
supports_tensorop_f64 = cc >= 80
|
||||
plan = cutlass.op.Gemm(cc=cc, element=cutlass.DataType.f64, layout=cutlass.LayoutType.RowMajor)
|
||||
|
||||
error_msg = f'Incorrectly raised an exception for availability of TensorOp with F64 operands on SM{cc}'
|
||||
with ExpectException(not supports_tensorop_f64, error_msg):
|
||||
plan.opclass = cutlass.OpcodeClass.TensorOp
|
||||
|
||||
expected_opclass = cutlass.OpcodeClass.TensorOp if supports_tensorop_f64 else cutlass.OpcodeClass.Simt
|
||||
assert plan.opclass == expected_opclass, f'Expected opclass to be {expected_opclass}, but received {plan.opclass} for SM{cc}'
|
||||
|
||||
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for F16 Tensor Core tests.")
|
||||
def test_opclass_switch(self):
|
||||
"""
|
||||
Tests cases in which the opcode class in question is switched (e.g., from TensorOp to SIMT)
|
||||
"""
|
||||
plan = cutlass.op.Gemm( element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
|
||||
assert plan.opclass == cutlass.OpcodeClass.TensorOp
|
||||
|
||||
# Ensure that all tile descriptions have opclass of TensorOp
|
||||
for td in plan.tile_descriptions():
|
||||
assert td.math_instruction.opcode_class == cutlass_bindings.OpClass.TensorOp
|
||||
|
||||
plan.opclass = cutlass.OpcodeClass.Simt
|
||||
|
||||
# Ensure that all tile descriptions have opclass of Simt
|
||||
for td in plan.tile_descriptions():
|
||||
assert td.math_instruction.opcode_class == cutlass_bindings.OpClass.Simt
|
||||
|
||||
def test_invalid_tile_description(self):
|
||||
"""
|
||||
Tests scenarios in which an invalid tile description is provided for a given CC
|
||||
"""
|
||||
cc = device_cc()
|
||||
plan = cutlass.op.Gemm(cc=cc, element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
|
||||
td = plan.tile_descriptions()[0]
|
||||
stages = td.stages
|
||||
|
||||
# Zero stage count is valid for SM90+, as this is used to indicate that the builder's auto stage
|
||||
# count should be used
|
||||
with ExpectException(cc < 90, f'Requested zero stages'):
|
||||
td.stages = 0
|
||||
plan.construct(td)
|
||||
|
||||
with ExpectException(cc < 80, f'Requested more than 2 stages on SM{cc}'):
|
||||
td.stages = 3
|
||||
plan.construct(td)
|
||||
|
||||
with ExpectException(True, f'Requested too many stages'):
|
||||
td.stages = 100
|
||||
plan.construct(td)
|
||||
|
||||
# Reset stage count
|
||||
td.stages = stages
|
||||
|
||||
cluster_shape = td.cluster_shape
|
||||
with ExpectException(cc < 90, f'Requested non-unit cluster shape on SM{cc}'):
|
||||
td.cluster_shape = [2, 1, 1]
|
||||
plan.construct(td)
|
||||
|
||||
# Reset cluster shape
|
||||
td.cluster_shape = cluster_shape
|
||||
|
||||
kernel_schedule = td.kernel_schedule
|
||||
with ExpectException(cc < 90, f'Requested a persistent kernel on SM{cc}'):
|
||||
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedPingpong
|
||||
plan.construct(td)
|
||||
|
||||
# Ensure that all returned tile descriptions are unique
|
||||
ops = {}
|
||||
for i, td in enumerate(plan.tile_descriptions()):
|
||||
op = plan.construct(td)
|
||||
code_str = op.rt_module.emit()
|
||||
if code_str in ops:
|
||||
conflicting_td = ops[code_str]
|
||||
assert False, f'Multiple tile descriptions emitted {code_str}\nTile descriptions are:\n{td}\n{conflicting_td}'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user