blob: 80615c5d571220fc82e792c83ff77ec4e6b56195 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000026#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010027#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010028#include "arm_compute/runtime/Tensor.h"
29#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010030#include "tests/NEON/Accessor.h"
31#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010032#include "tests/datasets/LargeConvolutionLayerDataset.h"
33#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000034#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010040#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010041
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
48namespace
49{
Pablo Telloaf7e6002018-10-08 15:53:14 +010050const RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance for FP32 types */
51const RelativeTolerance<float> rel_tolerance_winograd_3x3_f32(0.05f); /**< Relative tolerance for FP32 types */
52const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute tolerance for FP32 types */
53const AbsoluteTolerance<float> abs_tolerance_1xN_f32(0.0041f); /**< Absolute tolerance for FP32 types */
Pablo Tello952aeb12018-09-12 09:47:25 +010054
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000055#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010056const AbsoluteTolerance<half> tolerance_convolution_layer_f16(half(0.4f));
57constexpr float tolerance_num_f16 = 0.15f;
58#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
59
60#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice41acb762018-08-23 10:25:06 +010061const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
62const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */
63constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */
64#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
65constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010066
67/** CNN data types */
68const auto CNNDataTypes = framework::dataset::make("DataType",
69{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000070#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010071 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000072#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010073 DataType::F32,
Isabella Gottardie6630e42018-01-18 15:50:39 +000074 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010075});
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000076const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
77{
78 ActivationLayerInfo(),
79 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
80 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f)
81});
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010082
83const auto QuantizationData = framework::dataset::make("QuantizationInfo",
84{
85 QuantizationInfo(0.5f, 10),
86 QuantizationInfo(0.3f, 3),
87 QuantizationInfo(1.f, 10),
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +000088 QuantizationInfo(1.1f, 10),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010089});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010090} // namespace
91
92TEST_SUITE(NEON)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000093TEST_SUITE(ConvolutionLayer)
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +000094
95// *INDENT-OFF*
96// clang-format off
Giorgio Arenaa3221e62018-05-03 15:57:48 +010097DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +000098 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32),
99 TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32),
100 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
101 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32)
102 }),
103 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32),
104 TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32),
105 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
106 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16)
107 })),
108 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32),
109 TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32),
110 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
111 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32)
112 })),
113 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
114 PadStrideInfo(1, 1, 0, 0),
115 PadStrideInfo(2, 1, 0, 0),
116 PadStrideInfo(3, 2, 1, 0)
117 })),
118 framework::dataset::make("FastMath", { true,
119 true,
120 false,
121 false
122 })),
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100123 framework::dataset::make("Expected", { ConvolutionMethod::WINOGRAD, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
124 input_info, weights_info, output_info, conv_info, fast_math, expected)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000125{
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100126 ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
127 &weights_info.clone()->set_is_resizable(true),
128 &output_info.clone()->set_is_resizable(true), conv_info, WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000129 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
130}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000131// clang-format on
132// *INDENT-ON*
133TEST_SUITE_END() // ConvolutionLayer
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000134
Pablo Tello89519332017-11-17 11:52:36 +0000135TEST_SUITE(WinogradLayer)
136template <typename T>
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100137using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
Pablo Tello89519332017-11-17 11:52:36 +0000138
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000139template <typename T>
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000140using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, T, false>;
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000141
Pablo Tello89519332017-11-17 11:52:36 +0000142TEST_SUITE(FP32)
Pablo Tello7282d562018-06-14 15:35:49 +0100143
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100144TEST_SUITE(Conv1x3)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000145FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100146 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x3Dataset(),
147 framework::dataset::make("DataType", { DataType::F32 })),
148 ActivationFunctionsDataset),
149 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
150{
151 // Validate output
152 validate(Accessor(_target), _reference, abs_tolerance_f32);
153}
154FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
155 combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(),
156 framework::dataset::make("DataType", { DataType::F32 })),
157 ActivationFunctionsDataset),
158 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
159{
160 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100161 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100162}
163
164TEST_SUITE_END() // Conv1x3
165
166TEST_SUITE(Conv3x1)
167FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
168 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x1Dataset(),
169 framework::dataset::make("DataType", { DataType::F32 })),
170 ActivationFunctionsDataset),
171 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
172{
173 // Validate output
174 validate(Accessor(_target), _reference, abs_tolerance_f32);
175}
176FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
177 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x1Dataset(),
178 framework::dataset::make("DataType", { DataType::F32 })),
179 ActivationFunctionsDataset),
180 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
181{
182 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100183 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100184}
185
186TEST_SUITE_END() // Conv3x1
187
Pablo Tello000d33a2018-09-03 16:59:20 +0100188TEST_SUITE(Conv1x5)
189FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
190 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x5Dataset(),
191 framework::dataset::make("DataType", { DataType::F32 })),
192 ActivationFunctionsDataset),
193 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
194{
195 // Validate output
196 validate(Accessor(_target), _reference, abs_tolerance_f32);
197}
198FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
199 combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x5Dataset(),
200 framework::dataset::make("DataType", { DataType::F32 })),
201 ActivationFunctionsDataset),
202 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
203{
204 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100205 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tello000d33a2018-09-03 16:59:20 +0100206}
207
208TEST_SUITE_END() // Conv1x5
209
210TEST_SUITE(Conv5x1)
211FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
212 combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x1Dataset(),
213 framework::dataset::make("DataType", { DataType::F32 })),
214 ActivationFunctionsDataset),
215 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
216{
217 // Validate output
218 validate(Accessor(_target), _reference, abs_tolerance_f32);
219}
220FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
221 combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x1Dataset(),
222 framework::dataset::make("DataType", { DataType::F32 })),
223 ActivationFunctionsDataset),
224 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
225{
226 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100227 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tello000d33a2018-09-03 16:59:20 +0100228}
229
230TEST_SUITE_END() // Conv5x1
231
Pablo Tello96e922e2018-09-26 11:25:15 +0100232TEST_SUITE(Conv7x1)
233FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
234 combine(combine(combine(datasets::SmallWinogradConvolutionLayer7x1Dataset(),
235 framework::dataset::make("DataType", { DataType::F32 })),
236 ActivationFunctionsDataset),
237 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
238{
239 // Validate output
240 validate(Accessor(_target), _reference, abs_tolerance_f32);
241}
242
243FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
244 combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(),
245 framework::dataset::make("DataType", { DataType::F32 })),
246 ActivationFunctionsDataset),
247 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
248{
249 // Validate output
250 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
251}
252TEST_SUITE_END() // Conv7x1
253
254TEST_SUITE(Conv1x7)
255FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
256 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x7Dataset(),
257 framework::dataset::make("DataType", { DataType::F32 })),
258 ActivationFunctionsDataset),
259 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
260{
261 // Validate output
262 validate(Accessor(_target), _reference, abs_tolerance_f32);
263}
264
265FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
266 combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(),
267 framework::dataset::make("DataType", { DataType::F32 })),
268 ActivationFunctionsDataset),
269 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
270{
271 // Validate output
272 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
273}
274TEST_SUITE_END() // Conv1x7
275
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100276TEST_SUITE(Conv3x3)
277FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
278 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
Pablo Tello7282d562018-06-14 15:35:49 +0100279 framework::dataset::make("DataType", { DataType::F32 })),
280 ActivationFunctionsDataset),
281 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
282
Pablo Tello89519332017-11-17 11:52:36 +0000283{
284 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100285 validate(Accessor(_target), _reference, abs_tolerance_f32);
Pablo Tello89519332017-11-17 11:52:36 +0000286}
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100287FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
288 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
289 framework::dataset::make("DataType", { DataType::F32 })),
290 ActivationFunctionsDataset),
291 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
292
293{
294 // Validate output
Pablo Telloaf7e6002018-10-08 15:53:14 +0100295 // floating point arithmetic the Winograd results will not be exactly the same as direct convolution, especially for big shapes
296 validate(Accessor(_target), _reference, rel_tolerance_winograd_3x3_f32, 0.f, float(abs_tolerance_f32));
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100297}
298TEST_SUITE_END() // Conv3x3
299
300TEST_SUITE(Conv5x5)
301FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
302 combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x5Dataset(),
303 framework::dataset::make("DataType", { DataType::F32 })),
304 ActivationFunctionsDataset),
305 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
306
307{
308 // Validate output
309 validate(Accessor(_target), _reference, abs_tolerance_f32);
310}
311FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
312 combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(),
313 framework::dataset::make("DataType", { DataType::F32 })),
314 ActivationFunctionsDataset),
315 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
316
317{
318 // Validate output
319 validate(Accessor(_target), _reference, abs_tolerance_f32);
320}
321
322TEST_SUITE_END() // Conv5x5
Pablo Tello89519332017-11-17 11:52:36 +0000323
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000324FIXTURE_DATA_TEST_CASE(RunSmallNoBias, NEWinogradConvolutionLayerNoBiasFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tello7282d562018-06-14 15:35:49 +0100325 combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
326 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
327 framework::dataset::make("DataType", { DataType::F32 })),
328 ActivationFunctionsDataset),
329
330 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000331{
332 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100333 validate(Accessor(_target), _reference, abs_tolerance_f32);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000334}
335
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000336TEST_SUITE_END() // FP32
Georgios Pinitas5ce897f2020-04-29 11:44:10 +0100337
338#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
339TEST_SUITE(FP16)
340using CLWinogradConvolutionLayerFastMathFixture16 = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, half, float>;
341
342TEST_SUITE(Conv3x3)
343FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture16, framework::DatasetMode::PRECOMMIT,
344 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
345 framework::dataset::make("DataType", { DataType::F16 })),
346 ActivationFunctionsDataset),
347 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
348
349{
350 // Validate output
351 validate(Accessor(_target), _reference, tolerance_convolution_layer_f16, tolerance_num_f16);
352}
353
354FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, framework::DatasetMode::NIGHTLY,
355 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
356 framework::dataset::make("DataType", { DataType::F16 })),
357 ActivationFunctionsDataset),
358 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
359
360{
361 // Validate output
362 validate(Accessor(_target), _reference, tolerance_convolution_layer_f16, tolerance_num_f16);
363}
364TEST_SUITE_END() // Conv3x3
365TEST_SUITE_END() // FP16
366#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000367TEST_SUITE_END() // WinogradLayer
Pablo Tello89519332017-11-17 11:52:36 +0000368
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000369TEST_SUITE(GEMMConvolutionLayer)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100370template <typename T>
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100371using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100372
373TEST_SUITE(Float)
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000374#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
375TEST_SUITE(BFLOAT16)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100376FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
377 framework::dataset::make("ReshapeWeights", { true })),
378 framework::dataset::make("DataType", DataType::BFLOAT16)),
379 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
380 ActivationFunctionsDataset))
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000381{
382 // Validate output
383 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
384}
385TEST_SUITE_END() // BFLOAT16
386#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
387
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000388#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100389TEST_SUITE(FP16)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100390FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
391 framework::dataset::make("ReshapeWeights", { true })),
392 framework::dataset::make("DataType", DataType::F16)),
393 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
394 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100395{
396 // Validate output
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100397 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100398}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000399TEST_SUITE_END() // FP16
400#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100401
402TEST_SUITE(FP32)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100403FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
404 framework::dataset::make("ReshapeWeights", { true })),
405 framework::dataset::make("DataType", DataType::F32)),
406 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
407 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100408{
409 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100410 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100411}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000412TEST_SUITE_END() // FP32
413TEST_SUITE_END() // Float
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100414
415template <typename T>
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000416using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
Isabella Gottardie6630e42018-01-18 15:50:39 +0000417
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100418template <typename T>
419using NEGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T, int8_t>;
420
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000421const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
422{
423 ActivationLayerInfo(),
424 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
425 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
426});
Isabella Gottardie6630e42018-01-18 15:50:39 +0000427TEST_SUITE(Quantized)
428TEST_SUITE(QASYMM8)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100429FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
430 framework::dataset::make("ReshapeWeights", { true })),
431 framework::dataset::make("DataType", DataType::QASYMM8)),
432 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
433 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
434 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000435{
436 // Validate output
437 validate(Accessor(_target), _reference, tolerance_qasymm8);
438}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000439TEST_SUITE_END() // QASYMM8
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100440
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000441TEST_SUITE(QASYMM8_SIGNED)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100442FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
443 framework::dataset::make("ReshapeWeights", { true })),
444 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
445 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
446 framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })),
447 QuantizedActivationFunctionsDataset))
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000448{
449 // Validate output
450 validate(Accessor(_target), _reference, tolerance_qasymm8);
451}
452TEST_SUITE_END() // QASYMM8_SIGNED
453
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100454TEST_SUITE(QSYMM8_PER_CHANNEL)
Michele Di Giorgioe37662a2020-04-29 15:14:18 +0100455FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::ALL,
Georgios Pinitas63d4dbd2019-11-08 11:51:56 +0000456 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100457 framework::dataset::make("ReshapeWeights", { true })),
458 framework::dataset::make("DataType", { DataType::QASYMM8 })),
459 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
460 QuantizationData),
461 QuantizedActivationFunctionsDataset),
462 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
463{
464 // Validate output
465 validate(Accessor(_target), _reference, tolerance_qasymm8);
466}
Sang-Hoon Park1fad8142020-07-03 13:07:35 +0100467FIXTURE_DATA_TEST_CASE(RunSmallSigned, NEGEMMConvolutionLayerQuantizedPerChannelFixture<int8_t>, framework::DatasetMode::ALL,
468 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
469 framework::dataset::make("ReshapeWeights", { true })),
470 framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })),
471 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
472 QuantizationData),
473 QuantizedActivationFunctionsDataset),
474 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
475{
476 // Validate output
477 validate(Accessor(_target), _reference, tolerance_qasymm8);
478}
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100479TEST_SUITE_END() // QSYMM8_PER_CHANNEL
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000480TEST_SUITE_END() // Quantized
Isabella Gottardie6630e42018-01-18 15:50:39 +0000481
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000482TEST_SUITE_END() // GEMMConvolutionLayer
483TEST_SUITE_END() // NEON
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100484} // namespace validation
485} // namespace test
486} // namespace arm_compute