blob: b435744cdc1e18658f5d6798878ad35c627a1184 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Sheri Zhangac6499a2021-02-10 15:32:38 +00002 * Copyright (c) 2017-2021 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000026#include "arm_compute/runtime/NEON/functions/NEGEMMConv2d.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000027#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010028#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010029#include "arm_compute/runtime/Tensor.h"
30#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010031#include "tests/NEON/Accessor.h"
32#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/datasets/LargeConvolutionLayerDataset.h"
34#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000035#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010036#include "tests/framework/Asserts.h"
37#include "tests/framework/Macros.h"
38#include "tests/framework/datasets/Datasets.h"
39#include "tests/validation/Validation.h"
40#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010041#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010042
43namespace arm_compute
44{
45namespace test
46{
47namespace validation
48{
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000049namespace detail
50{
51template <>
52void configure_conv_function<NEGEMMConv2d, Tensor>(NEGEMMConv2d &func,
53 Tensor *src, const Tensor *weights, const Tensor *bias, Tensor *dst,
54 const PadStrideInfo &info, const WeightsInfo &weights_info,
55 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
56{
57 ARM_COMPUTE_UNUSED(weights_info);
58
59 Conv2dInfo conv_info(info, dilation, act_info, false, num_groups);
60 func.configure(src, weights, bias, dst, conv_info);
61}
62} // namespace detail
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010063namespace
64{
Pablo Telloaf7e6002018-10-08 15:53:14 +010065const RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance for FP32 types */
66const RelativeTolerance<float> rel_tolerance_winograd_3x3_f32(0.05f); /**< Relative tolerance for FP32 types */
67const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute tolerance for FP32 types */
68const AbsoluteTolerance<float> abs_tolerance_1xN_f32(0.0041f); /**< Absolute tolerance for FP32 types */
Pablo Tello952aeb12018-09-12 09:47:25 +010069
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000070#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010071const AbsoluteTolerance<half> tolerance_convolution_layer_f16(half(0.4f));
72constexpr float tolerance_num_f16 = 0.15f;
73#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
74
75#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice41acb762018-08-23 10:25:06 +010076const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
77const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */
78constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */
79#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
80constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010081
82/** CNN data types */
83const auto CNNDataTypes = framework::dataset::make("DataType",
84{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000085#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010086 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000087#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010088 DataType::F32,
Isabella Gottardie6630e42018-01-18 15:50:39 +000089 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010090});
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000091const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
92{
93 ActivationLayerInfo(),
94 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
95 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f)
96});
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010097
98const auto QuantizationData = framework::dataset::make("QuantizationInfo",
99{
100 QuantizationInfo(0.5f, 10),
101 QuantizationInfo(0.3f, 3),
102 QuantizationInfo(1.f, 10),
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000103 QuantizationInfo(1.1f, 10),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100104});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100105} // namespace
106
107TEST_SUITE(NEON)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000108TEST_SUITE(ConvolutionLayer)
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000109
110// *INDENT-OFF*
111// clang-format off
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100112DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000113 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32),
114 TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32),
115 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
116 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32)
117 }),
118 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32),
119 TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32),
120 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
121 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16)
122 })),
123 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32),
124 TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32),
125 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
126 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32)
127 })),
128 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
129 PadStrideInfo(1, 1, 0, 0),
130 PadStrideInfo(2, 1, 0, 0),
131 PadStrideInfo(3, 2, 1, 0)
132 })),
133 framework::dataset::make("FastMath", { true,
134 true,
135 false,
136 false
137 })),
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100138 framework::dataset::make("Expected", { ConvolutionMethod::WINOGRAD, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
139 input_info, weights_info, output_info, conv_info, fast_math, expected)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000140{
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100141 ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
142 &weights_info.clone()->set_is_resizable(true),
143 &output_info.clone()->set_is_resizable(true), conv_info, WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000144 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
145}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000146// clang-format on
147// *INDENT-ON*
148TEST_SUITE_END() // ConvolutionLayer
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000149
Pablo Tello89519332017-11-17 11:52:36 +0000150TEST_SUITE(WinogradLayer)
151template <typename T>
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100152using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000153template <typename T>
154using NEWinogradConvolutionLayerMixedDataLayoutFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, T, true, true>;
Pablo Tello89519332017-11-17 11:52:36 +0000155
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000156template <typename T>
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000157using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, T, false>;
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000158
Pablo Tello89519332017-11-17 11:52:36 +0000159TEST_SUITE(FP32)
Pablo Tello7282d562018-06-14 15:35:49 +0100160
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100161TEST_SUITE(Conv1x3)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000162FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100163 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x3Dataset(),
164 framework::dataset::make("DataType", { DataType::F32 })),
165 ActivationFunctionsDataset),
166 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
167{
168 // Validate output
169 validate(Accessor(_target), _reference, abs_tolerance_f32);
170}
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000171FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEWinogradConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT,
172 combine(combine(combine(combine(combine(combine(combine(combine(
173 framework::dataset::make("Input", TensorShape(8U, 8U, 32U)),
174 framework::dataset::make("Weight", TensorShape(1U, 3U, 32U, 1U))),
175 framework::dataset::make("Bias", TensorShape(1U))),
176 framework::dataset::make("Output", TensorShape(8U, 6U, 1U))),
177 framework::dataset::make("PadStrideInfo", PadStrideInfo(1, 1, 0, 0))),
178 framework::dataset::make("Dilation", Size2D(1U, 1U))),
179 framework::dataset::make("DataType", { DataType::F32 })),
180 ActivationFunctionsDataset),
181 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
182{
183 // Validate output
184 validate(Accessor(_target), _reference, abs_tolerance_f32);
185}
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100186FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
187 combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(),
188 framework::dataset::make("DataType", { DataType::F32 })),
189 ActivationFunctionsDataset),
190 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
191{
192 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100193 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100194}
195
196TEST_SUITE_END() // Conv1x3
197
198TEST_SUITE(Conv3x1)
199FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
200 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x1Dataset(),
201 framework::dataset::make("DataType", { DataType::F32 })),
202 ActivationFunctionsDataset),
203 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
204{
205 // Validate output
206 validate(Accessor(_target), _reference, abs_tolerance_f32);
207}
208FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
209 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x1Dataset(),
210 framework::dataset::make("DataType", { DataType::F32 })),
211 ActivationFunctionsDataset),
212 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
213{
214 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100215 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100216}
217
218TEST_SUITE_END() // Conv3x1
219
Pablo Tello000d33a2018-09-03 16:59:20 +0100220TEST_SUITE(Conv1x5)
221FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
222 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x5Dataset(),
223 framework::dataset::make("DataType", { DataType::F32 })),
224 ActivationFunctionsDataset),
225 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
226{
227 // Validate output
228 validate(Accessor(_target), _reference, abs_tolerance_f32);
229}
230FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
231 combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x5Dataset(),
232 framework::dataset::make("DataType", { DataType::F32 })),
233 ActivationFunctionsDataset),
234 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
235{
236 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100237 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tello000d33a2018-09-03 16:59:20 +0100238}
239
240TEST_SUITE_END() // Conv1x5
241
242TEST_SUITE(Conv5x1)
243FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
244 combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x1Dataset(),
245 framework::dataset::make("DataType", { DataType::F32 })),
246 ActivationFunctionsDataset),
247 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
248{
249 // Validate output
250 validate(Accessor(_target), _reference, abs_tolerance_f32);
251}
252FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
253 combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x1Dataset(),
254 framework::dataset::make("DataType", { DataType::F32 })),
255 ActivationFunctionsDataset),
256 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
257{
258 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100259 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tello000d33a2018-09-03 16:59:20 +0100260}
261
262TEST_SUITE_END() // Conv5x1
263
Pablo Tello96e922e2018-09-26 11:25:15 +0100264TEST_SUITE(Conv7x1)
265FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
266 combine(combine(combine(datasets::SmallWinogradConvolutionLayer7x1Dataset(),
267 framework::dataset::make("DataType", { DataType::F32 })),
268 ActivationFunctionsDataset),
269 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
270{
271 // Validate output
272 validate(Accessor(_target), _reference, abs_tolerance_f32);
273}
274
275FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
276 combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(),
277 framework::dataset::make("DataType", { DataType::F32 })),
278 ActivationFunctionsDataset),
279 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
280{
281 // Validate output
282 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
283}
284TEST_SUITE_END() // Conv7x1
285
286TEST_SUITE(Conv1x7)
287FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
288 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x7Dataset(),
289 framework::dataset::make("DataType", { DataType::F32 })),
290 ActivationFunctionsDataset),
291 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
292{
293 // Validate output
294 validate(Accessor(_target), _reference, abs_tolerance_f32);
295}
296
297FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
298 combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(),
299 framework::dataset::make("DataType", { DataType::F32 })),
300 ActivationFunctionsDataset),
301 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
302{
303 // Validate output
304 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
305}
306TEST_SUITE_END() // Conv1x7
307
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100308TEST_SUITE(Conv3x3)
309FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
310 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
Pablo Tello7282d562018-06-14 15:35:49 +0100311 framework::dataset::make("DataType", { DataType::F32 })),
312 ActivationFunctionsDataset),
313 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
314
Pablo Tello89519332017-11-17 11:52:36 +0000315{
316 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100317 validate(Accessor(_target), _reference, abs_tolerance_f32);
Pablo Tello89519332017-11-17 11:52:36 +0000318}
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100319FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
320 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
321 framework::dataset::make("DataType", { DataType::F32 })),
322 ActivationFunctionsDataset),
323 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
324
325{
326 // Validate output
Pablo Telloaf7e6002018-10-08 15:53:14 +0100327 // floating point arithmetic the Winograd results will not be exactly the same as direct convolution, especially for big shapes
328 validate(Accessor(_target), _reference, rel_tolerance_winograd_3x3_f32, 0.f, float(abs_tolerance_f32));
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100329}
330TEST_SUITE_END() // Conv3x3
331
332TEST_SUITE(Conv5x5)
333FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
334 combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x5Dataset(),
335 framework::dataset::make("DataType", { DataType::F32 })),
336 ActivationFunctionsDataset),
337 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
338
339{
340 // Validate output
341 validate(Accessor(_target), _reference, abs_tolerance_f32);
342}
343FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
344 combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(),
345 framework::dataset::make("DataType", { DataType::F32 })),
346 ActivationFunctionsDataset),
347 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
348
349{
350 // Validate output
351 validate(Accessor(_target), _reference, abs_tolerance_f32);
352}
353
354TEST_SUITE_END() // Conv5x5
Pablo Tello89519332017-11-17 11:52:36 +0000355
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000356FIXTURE_DATA_TEST_CASE(RunSmallNoBias, NEWinogradConvolutionLayerNoBiasFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tello7282d562018-06-14 15:35:49 +0100357 combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
358 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
359 framework::dataset::make("DataType", { DataType::F32 })),
360 ActivationFunctionsDataset),
361
362 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000363{
364 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100365 validate(Accessor(_target), _reference, abs_tolerance_f32);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000366}
367
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000368TEST_SUITE_END() // FP32
Georgios Pinitas5ce897f2020-04-29 11:44:10 +0100369
370#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
371TEST_SUITE(FP16)
372using CLWinogradConvolutionLayerFastMathFixture16 = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, half, float>;
373
374TEST_SUITE(Conv3x3)
375FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture16, framework::DatasetMode::PRECOMMIT,
376 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
377 framework::dataset::make("DataType", { DataType::F16 })),
378 ActivationFunctionsDataset),
379 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
380
381{
382 // Validate output
383 validate(Accessor(_target), _reference, tolerance_convolution_layer_f16, tolerance_num_f16);
384}
385
386FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, framework::DatasetMode::NIGHTLY,
387 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
388 framework::dataset::make("DataType", { DataType::F16 })),
389 ActivationFunctionsDataset),
390 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
391
392{
393 // Validate output
394 validate(Accessor(_target), _reference, tolerance_convolution_layer_f16, tolerance_num_f16);
395}
396TEST_SUITE_END() // Conv3x3
397TEST_SUITE_END() // FP16
398#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000399TEST_SUITE_END() // WinogradLayer
Pablo Tello89519332017-11-17 11:52:36 +0000400
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000401TEST_SUITE(GEMMConvolutionLayer)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100402template <typename T>
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000403using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000404template <typename T>
405using NEGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T, true>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100406
407TEST_SUITE(Float)
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000408#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
409TEST_SUITE(BFLOAT16)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100410FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
411 framework::dataset::make("ReshapeWeights", { true })),
412 framework::dataset::make("DataType", DataType::BFLOAT16)),
413 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
414 ActivationFunctionsDataset))
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000415{
416 // Validate output
417 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
418}
419TEST_SUITE_END() // BFLOAT16
420#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
421
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000422#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100423TEST_SUITE(FP16)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100424FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
425 framework::dataset::make("ReshapeWeights", { true })),
426 framework::dataset::make("DataType", DataType::F16)),
427 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
428 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100429{
430 // Validate output
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100431 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100432}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000433TEST_SUITE_END() // FP16
434#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100435
436TEST_SUITE(FP32)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100437FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
438 framework::dataset::make("ReshapeWeights", { true })),
439 framework::dataset::make("DataType", DataType::F32)),
440 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
441 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100442{
443 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100444 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100445}
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000446FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::ALL,
447 combine(combine(combine(combine(combine(combine(combine(combine(combine(
448 framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
449 framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
450 framework::dataset::make("Bias", TensorShape(2U))),
451 framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
452 framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
453 framework::dataset::make("Dilation", Size2D(1, 1))),
454 framework::dataset::make("ReshapeWeights", { true })),
455 framework::dataset::make("DataType", DataType::F32)),
456 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
457 ActivationFunctionsDataset))
458{
459 // Validate output
460 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
461}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000462TEST_SUITE_END() // FP32
463TEST_SUITE_END() // Float
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100464
465template <typename T>
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000466using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T>;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000467template <typename T>
468using NEGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T, true>;
Isabella Gottardie6630e42018-01-18 15:50:39 +0000469
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100470template <typename T>
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000471using NEGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEConvolutionLayer, T, int8_t>;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100472
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000473const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
474{
475 ActivationLayerInfo(),
476 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
477 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
478});
Isabella Gottardie6630e42018-01-18 15:50:39 +0000479TEST_SUITE(Quantized)
480TEST_SUITE(QASYMM8)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100481FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
482 framework::dataset::make("ReshapeWeights", { true })),
483 framework::dataset::make("DataType", DataType::QASYMM8)),
484 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
485 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
486 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000487{
488 // Validate output
489 validate(Accessor(_target), _reference, tolerance_qasymm8);
490}
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000491FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
492 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
493 framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
494 framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
495 framework::dataset::make("Bias", TensorShape(2U))),
496 framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
497 framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
498 framework::dataset::make("Dilation", Size2D(1, 1))),
499 framework::dataset::make("ReshapeWeights", { true })),
500 framework::dataset::make("DataType", DataType::QASYMM8)),
501 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
502 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
503 QuantizedActivationFunctionsDataset))
504{
505 // Validate output
506 validate(Accessor(_target), _reference, tolerance_qasymm8);
507}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000508TEST_SUITE_END() // QASYMM8
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100509
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000510TEST_SUITE(QASYMM8_SIGNED)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100511FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
512 framework::dataset::make("ReshapeWeights", { true })),
513 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
514 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
515 framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })),
516 QuantizedActivationFunctionsDataset))
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000517{
518 // Validate output
519 validate(Accessor(_target), _reference, tolerance_qasymm8);
520}
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000521FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL,
522 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
523 framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
524 framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
525 framework::dataset::make("Bias", TensorShape(2U))),
526 framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
527 framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
528 framework::dataset::make("Dilation", Size2D(1, 1))),
529 framework::dataset::make("ReshapeWeights", { true })),
530 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
531 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
532 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
533 QuantizedActivationFunctionsDataset))
534{
535 // Validate output
536 validate(Accessor(_target), _reference, tolerance_qasymm8);
537}
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000538TEST_SUITE_END() // QASYMM8_SIGNED
539
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100540TEST_SUITE(QSYMM8_PER_CHANNEL)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100541FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::ALL,
Georgios Pinitas63d4dbd2019-11-08 11:51:56 +0000542 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100543 framework::dataset::make("ReshapeWeights", { true })),
544 framework::dataset::make("DataType", { DataType::QASYMM8 })),
545 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
546 QuantizationData),
547 QuantizedActivationFunctionsDataset),
548 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
549{
550 // Validate output
551 validate(Accessor(_target), _reference, tolerance_qasymm8);
552}
Sang-Hoon Park1fad8142020-07-03 13:07:35 +0100553FIXTURE_DATA_TEST_CASE(RunSmallSigned, NEGEMMConvolutionLayerQuantizedPerChannelFixture<int8_t>, framework::DatasetMode::ALL,
554 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
555 framework::dataset::make("ReshapeWeights", { true })),
556 framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })),
557 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
558 QuantizationData),
559 QuantizedActivationFunctionsDataset),
560 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
561{
562 // Validate output
563 validate(Accessor(_target), _reference, tolerance_qasymm8);
564}
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100565TEST_SUITE_END() // QSYMM8_PER_CHANNEL
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000566TEST_SUITE_END() // Quantized
Isabella Gottardie6630e42018-01-18 15:50:39 +0000567
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000568TEST_SUITE_END() // GEMMConvolutionLayer
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000569
570TEST_SUITE(DirectGEMMConv2d)
571template <typename T>
572using NEDirectGEMMConv2dLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConv2d, T>;
573
574TEST_SUITE(Float)
575TEST_SUITE(FP32)
576FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
577 framework::dataset::make("ReshapeWeights", { true })),
578 framework::dataset::make("DataType", DataType::F32)),
579 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
580 ActivationFunctionsDataset))
581{
582 // Validate output
583 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
584}
585TEST_SUITE_END() // FP32
586TEST_SUITE_END() // Float
587
Georgios Pinitas61ffda42020-11-13 14:03:07 +0000588#ifdef __aarch64__
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000589template <typename T>
590using NEDirectGEMMConv2dLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConv2d, T>;
591
592template <typename T>
593using NEDirectGEMMConv2dLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEGEMMConv2d, T, int8_t>;
594
595const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
596{
597 ActivationLayerInfo(),
598 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
599 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
600});
601TEST_SUITE(Quantized)
602TEST_SUITE(QASYMM8)
603FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
604 framework::dataset::make("ReshapeWeights", { true })),
605 framework::dataset::make("DataType", DataType::QASYMM8)),
606 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
607 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
608 QuantizedActivationFunctionsDataset))
609{
610 // Validate output
611 validate(Accessor(_target), _reference, tolerance_qasymm8);
612}
613TEST_SUITE_END() // QASYMM8
614
615TEST_SUITE(QASYMM8_SIGNED)
616FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
617 framework::dataset::make("ReshapeWeights", { true })),
618 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
619 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
620 framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })),
621 QuantizedActivationFunctionsDataset))
622{
623 // Validate output
624 validate(Accessor(_target), _reference, tolerance_qasymm8);
625}
626TEST_SUITE_END() // QASYMM8_SIGNED
627
628TEST_SUITE(QSYMM8_PER_CHANNEL)
629FIXTURE_DATA_TEST_CASE(RunSmallSigned, NEDirectGEMMConv2dLayerQuantizedPerChannelFixture<int8_t>, framework::DatasetMode::ALL,
630 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
631 framework::dataset::make("ReshapeWeights", { true })),
632 framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })),
633 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
634 QuantizationData),
635 QuantizedActivationFunctionsDataset),
636 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
637{
638 // Validate output
639 validate(Accessor(_target), _reference, tolerance_qasymm8);
640}
641TEST_SUITE_END() // QSYMM8_PER_CHANNEL
642TEST_SUITE_END() // Quantized
Georgios Pinitas61ffda42020-11-13 14:03:07 +0000643#endif // __aarch64__
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000644
645TEST_SUITE_END() // DirectGEMMConv2d
646
Sheri Zhangac6499a2021-02-10 15:32:38 +0000647TEST_SUITE_END() // Neon
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100648} // namespace validation
649} // namespace test
650} // namespace arm_compute