blob: 0d8a322694b5b0ef876e297e4b5f7e93181ab2e1 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Sang-Hoon Park4715cf92020-01-08 16:02:47 +00002 * Copyright (c) 2017-2020 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
Michalis Spyrou80943252019-01-10 17:19:50 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010022 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000028#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010029#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010031#include "tests/datasets/LargeConvolutionLayerDataset.h"
32#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000033#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010034#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010039
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46namespace
47{
Georgios Pinitas5e207532018-04-27 14:38:16 +010048constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Georgios Pinitas8be91482019-03-26 17:23:28 +000049RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
steniu01f81652d2017-09-11 15:29:12 +010050RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
Georgios Pinitas51e53a32018-10-22 13:49:08 +010051constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
steniu01f81652d2017-09-11 15:29:12 +010052constexpr float tolerance_num = 0.07f; /**< Tolerance number */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010053
54/** CNN data types */
55const auto CNNDataTypes = framework::dataset::make("DataType",
56{
57 DataType::F16,
58 DataType::F32,
Chunosov5124be52017-11-22 20:42:13 +070059 DataType::QASYMM8,
Sang-Hoon Park4715cf92020-01-08 16:02:47 +000060 DataType::QASYMM8_SIGNED,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010061});
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010062
63/** Grouped CNN data types */
64const auto GroupedCNNDataTypes = framework::dataset::make("DataType",
65{
66 DataType::F16,
67 DataType::F32
68});
69
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000070const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
71{
72 ActivationLayerInfo(),
73 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
74 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
75 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
76});
Michalis Spyrou80943252019-01-10 17:19:50 +000077const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
78{
79 ActivationLayerInfo(),
80 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
81});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010082} // namespace
83
84TEST_SUITE(CL)
85TEST_SUITE(ConvolutionLayer)
86
Michalis Spyrou80943252019-01-10 17:19:50 +000087// *INDENT-OFF*
88// clang-format off
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010089DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
Sang-Hoon Park70d33bd2020-01-08 16:29:15 +000090 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
91 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
92 TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM
93 TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD
94 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), // Select GEMM
95 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32), // Select GEMM
96 TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32), // Select WINOGRAD
97 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
98 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::QASYMM8_SIGNED), // Select GEMM
Michalis Spyrou80943252019-01-10 17:19:50 +000099 }),
100 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
101 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
102 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
103 TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
104 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
105 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
106 TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
Sang-Hoon Park70d33bd2020-01-08 16:29:15 +0000107 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
108 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::QASYMM8_SIGNED),
Michalis Spyrou80943252019-01-10 17:19:50 +0000109 })),
110 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
111 TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
112 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
113 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
114 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
115 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
116 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
Sang-Hoon Park70d33bd2020-01-08 16:29:15 +0000117 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
118 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::QASYMM8_SIGNED),
Michalis Spyrou80943252019-01-10 17:19:50 +0000119 })),
120 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
121 PadStrideInfo(1, 2, 1, 1),
122 PadStrideInfo(1, 1, 0, 0),
123 PadStrideInfo(1, 1, 0, 0),
124 PadStrideInfo(2, 1, 0, 0),
125 PadStrideInfo(3, 2, 1, 0),
126 PadStrideInfo(1, 1, 2, 2),
Sang-Hoon Park70d33bd2020-01-08 16:29:15 +0000127 PadStrideInfo(1, 1, 2, 2),
128 PadStrideInfo(1, 1, 2, 2),
Michalis Spyrou80943252019-01-10 17:19:50 +0000129 })),
130 framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
131 GPUTarget::MIDGARD,
132 GPUTarget::G71,
133 GPUTarget::G71,
134 GPUTarget::MIDGARD,
135 GPUTarget::BIFROST,
136 GPUTarget::BIFROST,
Sang-Hoon Park70d33bd2020-01-08 16:29:15 +0000137 GPUTarget::BIFROST,
138 GPUTarget::BIFROST,
Michalis Spyrou80943252019-01-10 17:19:50 +0000139 })),
140 framework::dataset::make("Dilation", { Size2D(1U, 1U),
141 Size2D(1U, 1U),
142 Size2D(1U, 1U),
143 Size2D(1U, 1U),
144 Size2D(1U, 1U),
145 Size2D(1U, 1U),
146 Size2D(1U, 1U),
147 Size2D(2U, 1U),
Sang-Hoon Park70d33bd2020-01-08 16:29:15 +0000148 Size2D(2U, 1U),
Michalis Spyrou80943252019-01-10 17:19:50 +0000149 })),
Sang-Hoon Park70d33bd2020-01-08 16:29:15 +0000150 framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true, true })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000151 framework::dataset::make("Expected",{ ConvolutionMethod::GEMM,
152 ConvolutionMethod::GEMM,
153 ConvolutionMethod::GEMM,
154 ConvolutionMethod::WINOGRAD,
155 ConvolutionMethod::GEMM,
156 ConvolutionMethod::GEMM,
157 ConvolutionMethod::WINOGRAD,
158 ConvolutionMethod::GEMM,
Sang-Hoon Park70d33bd2020-01-08 16:29:15 +0000159 ConvolutionMethod::GEMM,
Michalis Spyrou80943252019-01-10 17:19:50 +0000160 })),
161 input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100162{
163 ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
164 &weights_info.clone()->set_is_resizable(true),
165 &output_info.clone()->set_is_resizable(true), conv_info,
166 WeightsInfo(),
167 ActivationLayerInfo(),
168 gpu_target,
169 dilation,
170 enable_fast_math);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000171 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
172}
Michalis Spyrou80943252019-01-10 17:19:50 +0000173// clang-format on
174// *INDENT-ON*
175TEST_SUITE_END() // ConvolutionLayer
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000176
177TEST_SUITE(GEMMConvolutionLayer)
178
Michalis Spyrou80943252019-01-10 17:19:50 +0000179DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallConvolutionLayerDataset(),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000180 CNNDataTypes),
181 ActivationFunctionsDataset),
182 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100183{
Chunosov5124be52017-11-22 20:42:13 +0700184 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
185
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100186 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100187 CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
188 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
189 CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
190 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100191
192 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
193 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
194 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
195 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
196
Chunosov5124be52017-11-22 20:42:13 +0700197 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
198 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
199
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100200 // Create and configure function
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000201 CLGEMMConvolutionLayer conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000202 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100203
204 // Validate valid region
205 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
206 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
207 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
208 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
209
210 validate(src.info()->valid_region(), src_valid_region);
211 validate(weights.info()->valid_region(), weights_valid_region);
212 validate(bias.info()->valid_region(), bias_valid_region);
213 validate(dst.info()->valid_region(), dst_valid_region);
214
Chunosov5124be52017-11-22 20:42:13 +0700215 // Validate QuantizationInfo
216 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
217 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
218
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100219 // Validate padding
220 //TODO(COMPMID-415) Need to validate padding?
221}
222
223template <typename T>
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000224using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100225
226TEST_SUITE(Float)
227TEST_SUITE(FP16)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000228
Michalis Spyrou80943252019-01-10 17:19:50 +0000229FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000230 framework::dataset::make("ReshapeWeights", { true })),
231 framework::dataset::make("DataType",
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000232 DataType::F16)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100233 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000234 ActivationFunctionsSmallDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100235{
236 // Validate output
steniu013e05e4e2017-08-25 17:18:01 +0100237 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100238}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000239
Michalis Spyrou80943252019-01-10 17:19:50 +0000240FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY,
Michalis Spyrou618451d2019-01-18 16:32:25 +0000241 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
Michalis Spyrou80943252019-01-10 17:19:50 +0000242 framework::dataset::make("ReshapeWeights", { true })),
243 framework::dataset::make("DataType",
244 DataType::F16)),
245 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
246 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100247{
248 // Validate output
steniu013e05e4e2017-08-25 17:18:01 +0100249 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100250}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100251TEST_SUITE_END() // FP16
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100252
253TEST_SUITE(FP32)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000254
Michalis Spyrou80943252019-01-10 17:19:50 +0000255FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000256 framework::dataset::make("ReshapeWeights", { true })),
257 framework::dataset::make("DataType",
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000258 DataType::F32)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100259 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000260 ActivationFunctionsSmallDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100261{
262 // Validate output
263 validate(CLAccessor(_target), _reference, tolerance_f32);
264}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000265
Michalis Spyrou80943252019-01-10 17:19:50 +0000266FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
Michalis Spyrou618451d2019-01-18 16:32:25 +0000267 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
Michalis Spyrou80943252019-01-10 17:19:50 +0000268 framework::dataset::make("ReshapeWeights", { true })),
269 framework::dataset::make("DataType",
270 DataType::F32)),
271 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
272 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100273{
274 // Validate output
Georgios Pinitas5e207532018-04-27 14:38:16 +0100275 validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, absolute_tolerance_float);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100276}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100277TEST_SUITE_END() // FP32
278TEST_SUITE_END() // Float
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100279
280template <typename T>
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000281using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000282template <typename T>
283using CLGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, int8_t>;
Chunosov5124be52017-11-22 20:42:13 +0700284
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000285const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
286{
287 ActivationLayerInfo(),
288 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
289 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
290});
Michalis Spyrou80943252019-01-10 17:19:50 +0000291const auto QuantizedActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
292{
293 ActivationLayerInfo(),
294 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
295});
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000296
Chunosov5124be52017-11-22 20:42:13 +0700297TEST_SUITE(Quantized)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000298
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000299const auto QuantizationData = framework::dataset::make("QuantizationInfo",
300{
301 QuantizationInfo(0.5f, 10),
302 QuantizationInfo(0.3f, 3),
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100303 QuantizationInfo(1.1f, 10),
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000304});
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000305TEST_SUITE(QASYMM8)
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000306
Michalis Spyrou80943252019-01-10 17:19:50 +0000307FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
308 combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
309 framework::dataset::make("ReshapeWeights", { true })),
310 framework::dataset::make("DataType", DataType::QASYMM8)),
311 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
312 QuantizationData),
313 QuantizedActivationFunctionsSmallDataset))
Chunosov5124be52017-11-22 20:42:13 +0700314{
315 // Validate output
316 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
317}
Michalis Spyrou80943252019-01-10 17:19:50 +0000318FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
Michalis Spyrou618451d2019-01-18 16:32:25 +0000319 combine(combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
Michalis Spyrou80943252019-01-10 17:19:50 +0000320 framework::dataset::make("ReshapeWeights", { true })),
321 framework::dataset::make("DataType", DataType::QASYMM8)),
322 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
323 QuantizationData),
324 QuantizedActivationFunctionsDataset))
Chunosov5124be52017-11-22 20:42:13 +0700325{
326 // Validate output
327 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100328}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100329TEST_SUITE_END() // QASYMM8
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000330TEST_SUITE(QASYMM8_SIGNED)
331
332FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
333 combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
334 framework::dataset::make("ReshapeWeights", { true })),
335 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
336 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
337 QuantizationData),
338 QuantizedActivationFunctionsSmallDataset))
339{
340 // Validate output
341 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
342}
343TEST_SUITE_END() // QASYMM8_SIGNED
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000344TEST_SUITE(QSYMM8_PER_CHANNEL)
345
346FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
347 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
348 framework::dataset::make("ReshapeWeights", { true })),
349 framework::dataset::make("DataType", { DataType::QASYMM8 })),
350 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
351 QuantizationData),
352 QuantizedActivationFunctionsSmallDataset),
353 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
354{
355 // Validate output
356 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
357}
358FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
359 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
360 framework::dataset::make("ReshapeWeights", { true })),
361 framework::dataset::make("DataType", { DataType::QASYMM8 })),
362 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
363 QuantizationData),
364 QuantizedActivationFunctionsDataset),
365 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
366{
367 // Validate output
368 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
369}
370TEST_SUITE_END() // QSYMM8_PER_CHANNEL
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100371TEST_SUITE_END() // Quantized
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100372
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100373TEST_SUITE_END() // GEMMConvolutionLayer
374
375template <typename T>
376using CLGEMMGroupedConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
377
378TEST_SUITE(GroupedGEMMConvolutionLayer)
379
Michalis Spyrou618451d2019-01-18 16:32:25 +0000380DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100381 GroupedCNNDataTypes),
382 ActivationFunctionsDataset),
383 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
384{
385 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
386
387 // The number of groups is calculated dividing the number of input channels of the input tensor by the number of input channels of the weights shape
388 const int num_groups = input_shape[2] / weights_shape[2];
389
390 // Create tensors
391 CLTensor src = create_tensor<CLTensor>(input_shape, data_type);
392 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1);
393 CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1);
394 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
395
396 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
397 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
398 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
399 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
400
401 // Create and configure function
402 CLGEMMConvolutionLayer conv;
403 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info, num_groups);
404
405 // Validate valid region
406 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
407 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
408 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
409 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
410
411 validate(src.info()->valid_region(), src_valid_region);
412 validate(weights.info()->valid_region(), weights_valid_region);
413 validate(bias.info()->valid_region(), bias_valid_region);
414 validate(dst.info()->valid_region(), dst_valid_region);
415
416 // Validate padding
417 //TODO(COMPMID-415) Need to validate padding?
418}
419
420TEST_SUITE(Float)
421TEST_SUITE(FP32)
422
Michalis Spyrou618451d2019-01-18 16:32:25 +0000423FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100424 framework::dataset::make("ReshapeWeights", { true })),
425 framework::dataset::make("DataType", DataType::F32)),
426 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000427 ActivationFunctionsSmallDataset))
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100428{
429 // Validate output
430 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
431}
432
Michalis Spyrou80943252019-01-10 17:19:50 +0000433FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
434 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
435 framework::dataset::make("ReshapeWeights", { true })),
436 framework::dataset::make("DataType", DataType::F32)),
437 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
438 ActivationFunctionsDataset))
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100439{
440 // Validate output
441 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
442}
443TEST_SUITE_END() // FP32
444
445TEST_SUITE(FP16)
446
Michalis Spyrou618451d2019-01-18 16:32:25 +0000447FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100448 framework::dataset::make("ReshapeWeights", { true })),
449 framework::dataset::make("DataType", DataType::F16)),
450 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000451 ActivationFunctionsSmallDataset))
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100452{
453 // Validate output
454 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
455}
456
Michalis Spyrou80943252019-01-10 17:19:50 +0000457FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY,
458 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
459 framework::dataset::make("ReshapeWeights", { true })),
460 framework::dataset::make("DataType", DataType::F16)),
461 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
462 ActivationFunctionsDataset))
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100463{
464 // Validate output
465 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
466}
467TEST_SUITE_END() // FP16
468TEST_SUITE_END() // Float
469
470TEST_SUITE_END() // GroupedGEMMConvolutionLayer
471TEST_SUITE_END() // CL
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100472} // namespace validation
473} // namespace test
474} // namespace arm_compute