blob: 9dff36b13924d76c7423be4ea325f6710ee7c9df [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000026#include "arm_compute/runtime/NEON/functions/NEGEMMConv2d.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000027#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010028#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010029#include "arm_compute/runtime/Tensor.h"
30#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010031#include "tests/NEON/Accessor.h"
32#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/datasets/LargeConvolutionLayerDataset.h"
34#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000035#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010036#include "tests/framework/Asserts.h"
37#include "tests/framework/Macros.h"
38#include "tests/framework/datasets/Datasets.h"
39#include "tests/validation/Validation.h"
40#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010041#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010042
43namespace arm_compute
44{
45namespace test
46{
47namespace validation
48{
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000049namespace detail
50{
51template <>
52void configure_conv_function<NEGEMMConv2d, Tensor>(NEGEMMConv2d &func,
53 Tensor *src, const Tensor *weights, const Tensor *bias, Tensor *dst,
54 const PadStrideInfo &info, const WeightsInfo &weights_info,
55 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
56{
57 ARM_COMPUTE_UNUSED(weights_info);
58
59 Conv2dInfo conv_info(info, dilation, act_info, false, num_groups);
60 func.configure(src, weights, bias, dst, conv_info);
61}
62} // namespace detail
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010063namespace
64{
Pablo Telloaf7e6002018-10-08 15:53:14 +010065const RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance for FP32 types */
66const RelativeTolerance<float> rel_tolerance_winograd_3x3_f32(0.05f); /**< Relative tolerance for FP32 types */
67const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute tolerance for FP32 types */
68const AbsoluteTolerance<float> abs_tolerance_1xN_f32(0.0041f); /**< Absolute tolerance for FP32 types */
Pablo Tello952aeb12018-09-12 09:47:25 +010069
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000070#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010071const AbsoluteTolerance<half> tolerance_convolution_layer_f16(half(0.4f));
72constexpr float tolerance_num_f16 = 0.15f;
73#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
74
75#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice41acb762018-08-23 10:25:06 +010076const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
77const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */
78constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */
79#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
80constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010081
82/** CNN data types */
83const auto CNNDataTypes = framework::dataset::make("DataType",
84{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000085#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010086 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000087#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010088 DataType::F32,
Isabella Gottardie6630e42018-01-18 15:50:39 +000089 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010090});
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000091const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
92{
93 ActivationLayerInfo(),
94 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
95 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f)
96});
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010097
98const auto QuantizationData = framework::dataset::make("QuantizationInfo",
99{
100 QuantizationInfo(0.5f, 10),
101 QuantizationInfo(0.3f, 3),
102 QuantizationInfo(1.f, 10),
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000103 QuantizationInfo(1.1f, 10),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100104});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100105} // namespace
106
107TEST_SUITE(NEON)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000108TEST_SUITE(ConvolutionLayer)
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000109
110// *INDENT-OFF*
111// clang-format off
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100112DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000113 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32),
114 TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32),
115 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
116 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32)
117 }),
118 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32),
119 TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32),
120 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
121 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16)
122 })),
123 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32),
124 TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32),
125 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
126 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32)
127 })),
128 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
129 PadStrideInfo(1, 1, 0, 0),
130 PadStrideInfo(2, 1, 0, 0),
131 PadStrideInfo(3, 2, 1, 0)
132 })),
133 framework::dataset::make("FastMath", { true,
134 true,
135 false,
136 false
137 })),
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100138 framework::dataset::make("Expected", { ConvolutionMethod::WINOGRAD, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
139 input_info, weights_info, output_info, conv_info, fast_math, expected)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000140{
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100141 ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
142 &weights_info.clone()->set_is_resizable(true),
143 &output_info.clone()->set_is_resizable(true), conv_info, WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000144 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
145}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000146// clang-format on
147// *INDENT-ON*
148TEST_SUITE_END() // ConvolutionLayer
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000149
Pablo Tello89519332017-11-17 11:52:36 +0000150TEST_SUITE(WinogradLayer)
151template <typename T>
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100152using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
Pablo Tello89519332017-11-17 11:52:36 +0000153
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000154template <typename T>
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000155using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, T, false>;
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000156
Pablo Tello89519332017-11-17 11:52:36 +0000157TEST_SUITE(FP32)
Pablo Tello7282d562018-06-14 15:35:49 +0100158
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100159TEST_SUITE(Conv1x3)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000160FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100161 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x3Dataset(),
162 framework::dataset::make("DataType", { DataType::F32 })),
163 ActivationFunctionsDataset),
164 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
165{
166 // Validate output
167 validate(Accessor(_target), _reference, abs_tolerance_f32);
168}
169FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
170 combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(),
171 framework::dataset::make("DataType", { DataType::F32 })),
172 ActivationFunctionsDataset),
173 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
174{
175 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100176 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100177}
178
179TEST_SUITE_END() // Conv1x3
180
181TEST_SUITE(Conv3x1)
182FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
183 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x1Dataset(),
184 framework::dataset::make("DataType", { DataType::F32 })),
185 ActivationFunctionsDataset),
186 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
187{
188 // Validate output
189 validate(Accessor(_target), _reference, abs_tolerance_f32);
190}
191FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
192 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x1Dataset(),
193 framework::dataset::make("DataType", { DataType::F32 })),
194 ActivationFunctionsDataset),
195 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
196{
197 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100198 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100199}
200
201TEST_SUITE_END() // Conv3x1
202
Pablo Tello000d33a2018-09-03 16:59:20 +0100203TEST_SUITE(Conv1x5)
204FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
205 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x5Dataset(),
206 framework::dataset::make("DataType", { DataType::F32 })),
207 ActivationFunctionsDataset),
208 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
209{
210 // Validate output
211 validate(Accessor(_target), _reference, abs_tolerance_f32);
212}
213FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
214 combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x5Dataset(),
215 framework::dataset::make("DataType", { DataType::F32 })),
216 ActivationFunctionsDataset),
217 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
218{
219 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100220 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tello000d33a2018-09-03 16:59:20 +0100221}
222
223TEST_SUITE_END() // Conv1x5
224
225TEST_SUITE(Conv5x1)
226FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
227 combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x1Dataset(),
228 framework::dataset::make("DataType", { DataType::F32 })),
229 ActivationFunctionsDataset),
230 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
231{
232 // Validate output
233 validate(Accessor(_target), _reference, abs_tolerance_f32);
234}
235FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
236 combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x1Dataset(),
237 framework::dataset::make("DataType", { DataType::F32 })),
238 ActivationFunctionsDataset),
239 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
240{
241 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100242 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tello000d33a2018-09-03 16:59:20 +0100243}
244
245TEST_SUITE_END() // Conv5x1
246
Pablo Tello96e922e2018-09-26 11:25:15 +0100247TEST_SUITE(Conv7x1)
248FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
249 combine(combine(combine(datasets::SmallWinogradConvolutionLayer7x1Dataset(),
250 framework::dataset::make("DataType", { DataType::F32 })),
251 ActivationFunctionsDataset),
252 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
253{
254 // Validate output
255 validate(Accessor(_target), _reference, abs_tolerance_f32);
256}
257
258FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
259 combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(),
260 framework::dataset::make("DataType", { DataType::F32 })),
261 ActivationFunctionsDataset),
262 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
263{
264 // Validate output
265 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
266}
267TEST_SUITE_END() // Conv7x1
268
269TEST_SUITE(Conv1x7)
270FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
271 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x7Dataset(),
272 framework::dataset::make("DataType", { DataType::F32 })),
273 ActivationFunctionsDataset),
274 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
275{
276 // Validate output
277 validate(Accessor(_target), _reference, abs_tolerance_f32);
278}
279
280FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
281 combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(),
282 framework::dataset::make("DataType", { DataType::F32 })),
283 ActivationFunctionsDataset),
284 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
285{
286 // Validate output
287 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
288}
289TEST_SUITE_END() // Conv1x7
290
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100291TEST_SUITE(Conv3x3)
292FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
293 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
Pablo Tello7282d562018-06-14 15:35:49 +0100294 framework::dataset::make("DataType", { DataType::F32 })),
295 ActivationFunctionsDataset),
296 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
297
Pablo Tello89519332017-11-17 11:52:36 +0000298{
299 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100300 validate(Accessor(_target), _reference, abs_tolerance_f32);
Pablo Tello89519332017-11-17 11:52:36 +0000301}
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100302FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
303 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
304 framework::dataset::make("DataType", { DataType::F32 })),
305 ActivationFunctionsDataset),
306 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
307
308{
309 // Validate output
Pablo Telloaf7e6002018-10-08 15:53:14 +0100310 // floating point arithmetic the Winograd results will not be exactly the same as direct convolution, especially for big shapes
311 validate(Accessor(_target), _reference, rel_tolerance_winograd_3x3_f32, 0.f, float(abs_tolerance_f32));
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100312}
313TEST_SUITE_END() // Conv3x3
314
315TEST_SUITE(Conv5x5)
316FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
317 combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x5Dataset(),
318 framework::dataset::make("DataType", { DataType::F32 })),
319 ActivationFunctionsDataset),
320 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
321
322{
323 // Validate output
324 validate(Accessor(_target), _reference, abs_tolerance_f32);
325}
326FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
327 combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(),
328 framework::dataset::make("DataType", { DataType::F32 })),
329 ActivationFunctionsDataset),
330 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
331
332{
333 // Validate output
334 validate(Accessor(_target), _reference, abs_tolerance_f32);
335}
336
337TEST_SUITE_END() // Conv5x5
Pablo Tello89519332017-11-17 11:52:36 +0000338
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000339FIXTURE_DATA_TEST_CASE(RunSmallNoBias, NEWinogradConvolutionLayerNoBiasFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tello7282d562018-06-14 15:35:49 +0100340 combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
341 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
342 framework::dataset::make("DataType", { DataType::F32 })),
343 ActivationFunctionsDataset),
344
345 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000346{
347 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100348 validate(Accessor(_target), _reference, abs_tolerance_f32);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000349}
350
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000351TEST_SUITE_END() // FP32
Georgios Pinitas5ce897f2020-04-29 11:44:10 +0100352
353#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
354TEST_SUITE(FP16)
355using CLWinogradConvolutionLayerFastMathFixture16 = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, half, float>;
356
357TEST_SUITE(Conv3x3)
358FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture16, framework::DatasetMode::PRECOMMIT,
359 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
360 framework::dataset::make("DataType", { DataType::F16 })),
361 ActivationFunctionsDataset),
362 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
363
364{
365 // Validate output
366 validate(Accessor(_target), _reference, tolerance_convolution_layer_f16, tolerance_num_f16);
367}
368
369FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, framework::DatasetMode::NIGHTLY,
370 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
371 framework::dataset::make("DataType", { DataType::F16 })),
372 ActivationFunctionsDataset),
373 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
374
375{
376 // Validate output
377 validate(Accessor(_target), _reference, tolerance_convolution_layer_f16, tolerance_num_f16);
378}
379TEST_SUITE_END() // Conv3x3
380TEST_SUITE_END() // FP16
381#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000382TEST_SUITE_END() // WinogradLayer
Pablo Tello89519332017-11-17 11:52:36 +0000383
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000384TEST_SUITE(GEMMConvolutionLayer)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100385template <typename T>
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000386using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100387
388TEST_SUITE(Float)
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000389#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
390TEST_SUITE(BFLOAT16)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100391FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
392 framework::dataset::make("ReshapeWeights", { true })),
393 framework::dataset::make("DataType", DataType::BFLOAT16)),
394 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
395 ActivationFunctionsDataset))
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000396{
397 // Validate output
398 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
399}
400TEST_SUITE_END() // BFLOAT16
401#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
402
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000403#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100404TEST_SUITE(FP16)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100405FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
406 framework::dataset::make("ReshapeWeights", { true })),
407 framework::dataset::make("DataType", DataType::F16)),
408 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
409 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100410{
411 // Validate output
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100412 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100413}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000414TEST_SUITE_END() // FP16
415#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100416
417TEST_SUITE(FP32)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100418FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
419 framework::dataset::make("ReshapeWeights", { true })),
420 framework::dataset::make("DataType", DataType::F32)),
421 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
422 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100423{
424 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100425 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100426}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000427TEST_SUITE_END() // FP32
428TEST_SUITE_END() // Float
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100429
430template <typename T>
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000431using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T>;
Isabella Gottardie6630e42018-01-18 15:50:39 +0000432
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100433template <typename T>
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000434using NEGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEConvolutionLayer, T, int8_t>;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100435
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000436const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
437{
438 ActivationLayerInfo(),
439 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
440 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
441});
Isabella Gottardie6630e42018-01-18 15:50:39 +0000442TEST_SUITE(Quantized)
443TEST_SUITE(QASYMM8)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100444FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
445 framework::dataset::make("ReshapeWeights", { true })),
446 framework::dataset::make("DataType", DataType::QASYMM8)),
447 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
448 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
449 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000450{
451 // Validate output
452 validate(Accessor(_target), _reference, tolerance_qasymm8);
453}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000454TEST_SUITE_END() // QASYMM8
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100455
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000456TEST_SUITE(QASYMM8_SIGNED)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100457FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
458 framework::dataset::make("ReshapeWeights", { true })),
459 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
460 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
461 framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })),
462 QuantizedActivationFunctionsDataset))
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000463{
464 // Validate output
465 validate(Accessor(_target), _reference, tolerance_qasymm8);
466}
467TEST_SUITE_END() // QASYMM8_SIGNED
468
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100469TEST_SUITE(QSYMM8_PER_CHANNEL)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100470FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::ALL,
Georgios Pinitas63d4dbd2019-11-08 11:51:56 +0000471 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100472 framework::dataset::make("ReshapeWeights", { true })),
473 framework::dataset::make("DataType", { DataType::QASYMM8 })),
474 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
475 QuantizationData),
476 QuantizedActivationFunctionsDataset),
477 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
478{
479 // Validate output
480 validate(Accessor(_target), _reference, tolerance_qasymm8);
481}
Sang-Hoon Park1fad8142020-07-03 13:07:35 +0100482FIXTURE_DATA_TEST_CASE(RunSmallSigned, NEGEMMConvolutionLayerQuantizedPerChannelFixture<int8_t>, framework::DatasetMode::ALL,
483 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
484 framework::dataset::make("ReshapeWeights", { true })),
485 framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })),
486 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
487 QuantizationData),
488 QuantizedActivationFunctionsDataset),
489 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
490{
491 // Validate output
492 validate(Accessor(_target), _reference, tolerance_qasymm8);
493}
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100494TEST_SUITE_END() // QSYMM8_PER_CHANNEL
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000495TEST_SUITE_END() // Quantized
Isabella Gottardie6630e42018-01-18 15:50:39 +0000496
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000497TEST_SUITE_END() // GEMMConvolutionLayer
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000498
499TEST_SUITE(DirectGEMMConv2d)
500template <typename T>
501using NEDirectGEMMConv2dLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConv2d, T>;
502
503TEST_SUITE(Float)
504TEST_SUITE(FP32)
505FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
506 framework::dataset::make("ReshapeWeights", { true })),
507 framework::dataset::make("DataType", DataType::F32)),
508 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
509 ActivationFunctionsDataset))
510{
511 // Validate output
512 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
513}
514TEST_SUITE_END() // FP32
515TEST_SUITE_END() // Float
516
Georgios Pinitas61ffda42020-11-13 14:03:07 +0000517#ifdef __aarch64__
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000518template <typename T>
519using NEDirectGEMMConv2dLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConv2d, T>;
520
521template <typename T>
522using NEDirectGEMMConv2dLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEGEMMConv2d, T, int8_t>;
523
524const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
525{
526 ActivationLayerInfo(),
527 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
528 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
529});
530TEST_SUITE(Quantized)
531TEST_SUITE(QASYMM8)
532FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
533 framework::dataset::make("ReshapeWeights", { true })),
534 framework::dataset::make("DataType", DataType::QASYMM8)),
535 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
536 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
537 QuantizedActivationFunctionsDataset))
538{
539 // Validate output
540 validate(Accessor(_target), _reference, tolerance_qasymm8);
541}
542TEST_SUITE_END() // QASYMM8
543
544TEST_SUITE(QASYMM8_SIGNED)
545FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
546 framework::dataset::make("ReshapeWeights", { true })),
547 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
548 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
549 framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })),
550 QuantizedActivationFunctionsDataset))
551{
552 // Validate output
553 validate(Accessor(_target), _reference, tolerance_qasymm8);
554}
555TEST_SUITE_END() // QASYMM8_SIGNED
556
557TEST_SUITE(QSYMM8_PER_CHANNEL)
558FIXTURE_DATA_TEST_CASE(RunSmallSigned, NEDirectGEMMConv2dLayerQuantizedPerChannelFixture<int8_t>, framework::DatasetMode::ALL,
559 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
560 framework::dataset::make("ReshapeWeights", { true })),
561 framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })),
562 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
563 QuantizationData),
564 QuantizedActivationFunctionsDataset),
565 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
566{
567 // Validate output
568 validate(Accessor(_target), _reference, tolerance_qasymm8);
569}
570TEST_SUITE_END() // QSYMM8_PER_CHANNEL
571TEST_SUITE_END() // Quantized
Georgios Pinitas61ffda42020-11-13 14:03:07 +0000572#endif // __aarch64__
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000573
574TEST_SUITE_END() // DirectGEMMConv2d
575
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000576TEST_SUITE_END() // NEON
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100577} // namespace validation
578} // namespace test
579} // namespace arm_compute