0
test/python/backend/conv/__init__.py
Normal file
0
test/python/backend/conv/__init__.py
Normal file
@ -0,0 +1,233 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
|
||||
from cutlass.backend.conv2d_operation import *
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dDgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,209 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dDgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage3(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=4,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage3_64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage4_64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=4,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,130 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_dgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend.conv2d_operation import *
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dDgradImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[4, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,127 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dDgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Unity,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,196 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
def conv2d_few_channel_problemsizes(channels):
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 8, 8, channels),
|
||||
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 16, 16, channels),
|
||||
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 16, 16, channels),
|
||||
cutlass_bindings.Tensor4DCoord(16, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(32, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
return problem_sizes
|
||||
|
||||
class Conv2dFpropFewChannelsF16NHWCF16NHWCF16HNWCTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.few_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_few_channel_problemsizes(2)))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_1(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=2,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.few_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_few_channel_problemsizes(1)))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,220 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
def conv2d_fixed_channel_problemsizes(channels):
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 8, 8, channels),
|
||||
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(32, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 7, 7, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
|
||||
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
return problem_sizes
|
||||
|
||||
class Conv2dFpropFixedChannelsF16NHWCF16NHWCF16HNWCTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_8(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(8)))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(4)))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(2)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,341 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dFpropImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 14),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 14),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 23, 56, 98),
|
||||
cutlass_bindings.Tensor4DCoord(128, 3, 3, 98),
|
||||
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 14),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 14),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 23, 56, 98),
|
||||
cutlass_bindings.Tensor4DCoord(128, 3, 3, 98),
|
||||
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float16)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 28),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 28),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 23, 56, 100),
|
||||
cutlass_bindings.Tensor4DCoord(128, 3, 3, 100),
|
||||
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,86 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dFpropImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,128 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend.conv2d_operation import *
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dFpropImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[4, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle2
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,139 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dFpropImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align2(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=2)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
)
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,285 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dStridedDgradImplicitGemmF16NHWCF16NHWCF32NHWCTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x256_64x3_64x64x64(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 256, 64], stages=3,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4_128x128_32x3_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 56, 56, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 1, 1, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 55, 55, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 1, 1, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(2, 2),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,129 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
|
||||
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment, math_inst.element_accumulator,
|
||||
cutlass_bindings.float16
|
||||
)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float16,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 64], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment, math_inst.element_accumulator,
|
||||
cutlass_bindings.float16
|
||||
)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,274 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_64x256_32x4_64x64x32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 16],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[64, 256, 32], stages=3,
|
||||
warp_count=[1, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
|
||||
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
|
||||
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
|
||||
cutlass_bindings.MatrixCoord(3, 3),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,128 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend.conv2d_operation import *
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dWgradImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[1, 1, 1],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 8], stages=4,
|
||||
warp_count=[2, 4, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
@ -0,0 +1,139 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
|
||||
import cutlass.backend
|
||||
from cutlass.backend import *
|
||||
from cutlass.backend.test import *
|
||||
from cutlass.backend.utils.device import device_cc
|
||||
import unittest
|
||||
|
||||
|
||||
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
|
||||
class Conv2dWgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=8)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 16], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation))
|
||||
|
||||
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align1(self):
|
||||
math_inst = MathInstruction(
|
||||
instruction_shape=[16, 8, 8],
|
||||
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
|
||||
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
|
||||
math_operation=MathOperation.multiply_add
|
||||
)
|
||||
|
||||
A = TensorDescription(
|
||||
element=math_inst.element_a,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
B = TensorDescription(
|
||||
element=math_inst.element_b,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=1)
|
||||
C = TensorDescription(
|
||||
element=cutlass_bindings.float32,
|
||||
layout=cutlass_bindings.TensorNHWC,
|
||||
alignment=4)
|
||||
|
||||
tile_description = TileDescription(
|
||||
threadblock_shape=[128, 128, 32], stages=3,
|
||||
warp_count=[2, 2, 1],
|
||||
math_instruction=math_inst
|
||||
)
|
||||
|
||||
epilogue_functor = LinearCombination(
|
||||
C.element, C.alignment,
|
||||
math_inst.element_accumulator, cutlass_bindings.float32)
|
||||
|
||||
operation = Conv2dOperation(
|
||||
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
|
||||
arch=80, tile_description=tile_description, A=A, B=B, C=C,
|
||||
stride_support=StrideSupport.Strided,
|
||||
epilogue_functor=epilogue_functor,
|
||||
swizzling_functor=cutlass_bindings.IdentitySwizzle1
|
||||
)
|
||||
|
||||
problem_sizes = [
|
||||
cutlass_bindings.conv.Conv2dProblemSize(
|
||||
cutlass_bindings.Tensor4DCoord(1, 8, 8, 1),
|
||||
cutlass_bindings.Tensor4DCoord(1, 3, 3, 1),
|
||||
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.MatrixCoord(1, 1),
|
||||
cutlass_bindings.conv.Mode.cross_correlation,
|
||||
1, 1
|
||||
),
|
||||
]
|
||||
|
||||
self.assertTrue(test_all_conv2d(operation, problem_sizes))
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**26, 2**26)
|
||||
unittest.main()
|
||||
42
test/python/backend/conv/run_all_tests.py
Normal file
42
test/python/backend/conv/run_all_tests.py
Normal file
@ -0,0 +1,42 @@
|
||||
#################################################################################################
|
||||
#
|
||||
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
#################################################################################################
|
||||
|
||||
import cutlass.backend
|
||||
import unittest
|
||||
from cutlass.backend.memory_manager import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
cutlass.backend.get_memory_pool(2**32, 2**32)
|
||||
loader = unittest.TestLoader()
|
||||
tests = loader.discover('./', 'conv2d_*.py')
|
||||
testRunner = unittest.runner.TextTestRunner()
|
||||
testRunner.run(tests)
|
||||
Reference in New Issue
Block a user