blob: fbc5a830a9eb14d21cce1625d495094f80d22407 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +00002 * Copyright (c) 2017-2019 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000026#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010027#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010028#include "arm_compute/runtime/Tensor.h"
29#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010030#include "tests/NEON/Accessor.h"
31#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010032#include "tests/datasets/LargeConvolutionLayerDataset.h"
33#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000034#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010040#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010041
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
48namespace
49{
Pablo Telloaf7e6002018-10-08 15:53:14 +010050const RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance for FP32 types */
51const RelativeTolerance<float> rel_tolerance_winograd_3x3_f32(0.05f); /**< Relative tolerance for FP32 types */
52const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute tolerance for FP32 types */
53const AbsoluteTolerance<float> abs_tolerance_1xN_f32(0.0041f); /**< Absolute tolerance for FP32 types */
Pablo Tello952aeb12018-09-12 09:47:25 +010054
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000055#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice41acb762018-08-23 10:25:06 +010056const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
57const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */
58constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */
59#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
60constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010061
62/** CNN data types */
63const auto CNNDataTypes = framework::dataset::make("DataType",
64{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000065#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010066 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000067#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010068 DataType::F32,
Isabella Gottardie6630e42018-01-18 15:50:39 +000069 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010070});
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000071const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
72{
73 ActivationLayerInfo(),
74 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
75 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f)
76});
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010077
78const auto QuantizationData = framework::dataset::make("QuantizationInfo",
79{
80 QuantizationInfo(0.5f, 10),
81 QuantizationInfo(0.3f, 3),
82 QuantizationInfo(1.f, 10),
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +000083 QuantizationInfo(1.1f, 10),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010084});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010085} // namespace
86
87TEST_SUITE(NEON)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000088TEST_SUITE(ConvolutionLayer)
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +000089
90// *INDENT-OFF*
91// clang-format off
Giorgio Arenaa3221e62018-05-03 15:57:48 +010092DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +000093 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32),
94 TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32),
95 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
96 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32)
97 }),
98 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32),
99 TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32),
100 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
101 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16)
102 })),
103 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32),
104 TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32),
105 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
106 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32)
107 })),
108 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
109 PadStrideInfo(1, 1, 0, 0),
110 PadStrideInfo(2, 1, 0, 0),
111 PadStrideInfo(3, 2, 1, 0)
112 })),
113 framework::dataset::make("FastMath", { true,
114 true,
115 false,
116 false
117 })),
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100118 framework::dataset::make("Expected", { ConvolutionMethod::WINOGRAD, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
119 input_info, weights_info, output_info, conv_info, fast_math, expected)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000120{
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100121 ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
122 &weights_info.clone()->set_is_resizable(true),
123 &output_info.clone()->set_is_resizable(true), conv_info, WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000124 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
125}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000126// clang-format on
127// *INDENT-ON*
128TEST_SUITE_END() // ConvolutionLayer
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000129
Pablo Tello89519332017-11-17 11:52:36 +0000130TEST_SUITE(WinogradLayer)
131template <typename T>
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100132using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
Pablo Tello89519332017-11-17 11:52:36 +0000133
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000134template <typename T>
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000135using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, T, false>;
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000136
Pablo Tello89519332017-11-17 11:52:36 +0000137TEST_SUITE(FP32)
Pablo Tello7282d562018-06-14 15:35:49 +0100138
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100139TEST_SUITE(Conv1x3)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000140FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100141 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x3Dataset(),
142 framework::dataset::make("DataType", { DataType::F32 })),
143 ActivationFunctionsDataset),
144 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
145{
146 // Validate output
147 validate(Accessor(_target), _reference, abs_tolerance_f32);
148}
149FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
150 combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(),
151 framework::dataset::make("DataType", { DataType::F32 })),
152 ActivationFunctionsDataset),
153 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
154{
155 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100156 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100157}
158
159TEST_SUITE_END() // Conv1x3
160
161TEST_SUITE(Conv3x1)
162FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
163 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x1Dataset(),
164 framework::dataset::make("DataType", { DataType::F32 })),
165 ActivationFunctionsDataset),
166 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
167{
168 // Validate output
169 validate(Accessor(_target), _reference, abs_tolerance_f32);
170}
171FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
172 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x1Dataset(),
173 framework::dataset::make("DataType", { DataType::F32 })),
174 ActivationFunctionsDataset),
175 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
176{
177 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100178 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100179}
180
181TEST_SUITE_END() // Conv3x1
182
Pablo Tello000d33a2018-09-03 16:59:20 +0100183TEST_SUITE(Conv1x5)
184FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
185 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x5Dataset(),
186 framework::dataset::make("DataType", { DataType::F32 })),
187 ActivationFunctionsDataset),
188 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
189{
190 // Validate output
191 validate(Accessor(_target), _reference, abs_tolerance_f32);
192}
193FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
194 combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x5Dataset(),
195 framework::dataset::make("DataType", { DataType::F32 })),
196 ActivationFunctionsDataset),
197 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
198{
199 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100200 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tello000d33a2018-09-03 16:59:20 +0100201}
202
203TEST_SUITE_END() // Conv1x5
204
205TEST_SUITE(Conv5x1)
206FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
207 combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x1Dataset(),
208 framework::dataset::make("DataType", { DataType::F32 })),
209 ActivationFunctionsDataset),
210 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
211{
212 // Validate output
213 validate(Accessor(_target), _reference, abs_tolerance_f32);
214}
215FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
216 combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x1Dataset(),
217 framework::dataset::make("DataType", { DataType::F32 })),
218 ActivationFunctionsDataset),
219 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
220{
221 // Validate output
Pablo Tello952aeb12018-09-12 09:47:25 +0100222 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
Pablo Tello000d33a2018-09-03 16:59:20 +0100223}
224
225TEST_SUITE_END() // Conv5x1
226
Pablo Tello96e922e2018-09-26 11:25:15 +0100227TEST_SUITE(Conv7x1)
228FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
229 combine(combine(combine(datasets::SmallWinogradConvolutionLayer7x1Dataset(),
230 framework::dataset::make("DataType", { DataType::F32 })),
231 ActivationFunctionsDataset),
232 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
233{
234 // Validate output
235 validate(Accessor(_target), _reference, abs_tolerance_f32);
236}
237
238FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
239 combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(),
240 framework::dataset::make("DataType", { DataType::F32 })),
241 ActivationFunctionsDataset),
242 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
243{
244 // Validate output
245 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
246}
247TEST_SUITE_END() // Conv7x1
248
249TEST_SUITE(Conv1x7)
250FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
251 combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x7Dataset(),
252 framework::dataset::make("DataType", { DataType::F32 })),
253 ActivationFunctionsDataset),
254 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
255{
256 // Validate output
257 validate(Accessor(_target), _reference, abs_tolerance_f32);
258}
259
260FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
261 combine(combine(combine(datasets::LargeWinogradConvolutionLayer7x1Dataset(),
262 framework::dataset::make("DataType", { DataType::F32 })),
263 ActivationFunctionsDataset),
264 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
265{
266 // Validate output
267 validate(Accessor(_target), _reference, abs_tolerance_1xN_f32);
268}
269TEST_SUITE_END() // Conv1x7
270
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100271TEST_SUITE(Conv3x3)
272FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
273 combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
Pablo Tello7282d562018-06-14 15:35:49 +0100274 framework::dataset::make("DataType", { DataType::F32 })),
275 ActivationFunctionsDataset),
276 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
277
Pablo Tello89519332017-11-17 11:52:36 +0000278{
279 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100280 validate(Accessor(_target), _reference, abs_tolerance_f32);
Pablo Tello89519332017-11-17 11:52:36 +0000281}
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100282FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
283 combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
284 framework::dataset::make("DataType", { DataType::F32 })),
285 ActivationFunctionsDataset),
286 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
287
288{
289 // Validate output
Pablo Telloaf7e6002018-10-08 15:53:14 +0100290 // floating point arithmetic the Winograd results will not be exactly the same as direct convolution, especially for big shapes
291 validate(Accessor(_target), _reference, rel_tolerance_winograd_3x3_f32, 0.f, float(abs_tolerance_f32));
Pablo Tellobda6e4b2018-08-22 11:40:33 +0100292}
293TEST_SUITE_END() // Conv3x3
294
295TEST_SUITE(Conv5x5)
296FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
297 combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x5Dataset(),
298 framework::dataset::make("DataType", { DataType::F32 })),
299 ActivationFunctionsDataset),
300 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
301
302{
303 // Validate output
304 validate(Accessor(_target), _reference, abs_tolerance_f32);
305}
306FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
307 combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(),
308 framework::dataset::make("DataType", { DataType::F32 })),
309 ActivationFunctionsDataset),
310 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
311
312{
313 // Validate output
314 validate(Accessor(_target), _reference, abs_tolerance_f32);
315}
316
317TEST_SUITE_END() // Conv5x5
Pablo Tello89519332017-11-17 11:52:36 +0000318
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000319FIXTURE_DATA_TEST_CASE(RunSmallNoBias, NEWinogradConvolutionLayerNoBiasFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tello7282d562018-06-14 15:35:49 +0100320 combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
321 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
322 framework::dataset::make("DataType", { DataType::F32 })),
323 ActivationFunctionsDataset),
324
325 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000326{
327 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100328 validate(Accessor(_target), _reference, abs_tolerance_f32);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000329}
330
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000331TEST_SUITE_END() // FP32
332TEST_SUITE_END() // WinogradLayer
Pablo Tello89519332017-11-17 11:52:36 +0000333
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000334TEST_SUITE(GEMMConvolutionLayer)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100335
Michalis Spyrou5c9f0c42019-01-16 14:48:48 +0000336DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallConvolutionLayerDataset(),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000337 CNNDataTypes),
338 framework::dataset::make("ActivationInfo",
339{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
340input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100341{
Isabella Gottardie6630e42018-01-18 15:50:39 +0000342 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
343
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100344 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100345 Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
346 Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
347 Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
348 Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100349
350 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
351 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
352 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
353 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
354
Isabella Gottardie6630e42018-01-18 15:50:39 +0000355 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
356 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
357
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100358 // Create and configure function
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000359 NEGEMMConvolutionLayer conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000360 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100361
362 // Validate valid region
363 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
364 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
365 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
366 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
367
368 validate(src.info()->valid_region(), src_valid_region);
369 validate(weights.info()->valid_region(), weights_valid_region);
370 validate(bias.info()->valid_region(), bias_valid_region);
371 validate(dst.info()->valid_region(), dst_valid_region);
372
Isabella Gottardie6630e42018-01-18 15:50:39 +0000373 // Validate QuantizationInfo
374 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
375 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
376
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100377 // Validate padding
378 //TODO(COMPMID-415) Need to validate padding?
379}
380
381template <typename T>
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100382using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100383
384TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000385#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100386TEST_SUITE(FP16)
Michalis Spyroue2503892018-04-23 15:17:31 +0100387FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
388 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000389 framework::dataset::make("DataType", DataType::F16)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100390 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000391 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100392{
393 // Validate output
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100394 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100395}
Michalis Spyroue2503892018-04-23 15:17:31 +0100396FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
397 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000398 framework::dataset::make("DataType", DataType::F16)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100399 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000400 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100401{
402 // Validate output
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100403 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100404}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000405TEST_SUITE_END() // FP16
406#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100407
408TEST_SUITE(FP32)
Michalis Spyroue2503892018-04-23 15:17:31 +0100409FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
410 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000411 framework::dataset::make("DataType", DataType::F32)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100412 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000413 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100414{
415 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100416 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100417}
Michalis Spyroue2503892018-04-23 15:17:31 +0100418FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
419 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000420 framework::dataset::make("DataType", DataType::F32)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100421 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000422 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100423{
424 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100425 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100426}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000427TEST_SUITE_END() // FP32
428TEST_SUITE_END() // Float
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100429
430template <typename T>
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000431using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
Isabella Gottardie6630e42018-01-18 15:50:39 +0000432
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100433template <typename T>
434using NEGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T, int8_t>;
435
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000436const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
437{
438 ActivationLayerInfo(),
439 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
440 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
441});
Isabella Gottardie6630e42018-01-18 15:50:39 +0000442TEST_SUITE(Quantized)
443TEST_SUITE(QASYMM8)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100444FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Isabella Gottardie6630e42018-01-18 15:50:39 +0000445 framework::dataset::make("ReshapeWeights", { true })),
446 framework::dataset::make("DataType", DataType::QASYMM8)),
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100447 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000448 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
449 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000450{
451 // Validate output
452 validate(Accessor(_target), _reference, tolerance_qasymm8);
453}
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100454FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000455 framework::dataset::make("ReshapeWeights", { true })),
456 framework::dataset::make("DataType", DataType::QASYMM8)),
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100457 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000458 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
459 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000460{
461 // Validate output
462 validate(Accessor(_target), _reference, tolerance_qasymm8);
463}
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000464TEST_SUITE_END() // QASYMM8
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100465
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000466TEST_SUITE(QASYMM8_SIGNED)
467FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
468 framework::dataset::make("ReshapeWeights", { true })),
469 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
470 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
471 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
472 QuantizedActivationFunctionsDataset))
473{
474 // Validate output
475 validate(Accessor(_target), _reference, tolerance_qasymm8);
476}
477TEST_SUITE_END() // QASYMM8_SIGNED
478
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100479TEST_SUITE(QSYMM8_PER_CHANNEL)
480FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
481 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
482 framework::dataset::make("ReshapeWeights", { true })),
483 framework::dataset::make("DataType", { DataType::QASYMM8 })),
484 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
485 QuantizationData),
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000486 QuantizedActivationFunctionsDataset),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100487 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
488{
489 // Validate output
490 validate(Accessor(_target), _reference, tolerance_qasymm8);
491}
492FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
Georgios Pinitas63d4dbd2019-11-08 11:51:56 +0000493 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100494 framework::dataset::make("ReshapeWeights", { true })),
495 framework::dataset::make("DataType", { DataType::QASYMM8 })),
496 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
497 QuantizationData),
498 QuantizedActivationFunctionsDataset),
499 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
500{
501 // Validate output
502 validate(Accessor(_target), _reference, tolerance_qasymm8);
503}
504TEST_SUITE_END() // QSYMM8_PER_CHANNEL
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000505TEST_SUITE_END() // Quantized
Isabella Gottardie6630e42018-01-18 15:50:39 +0000506
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000507TEST_SUITE_END() // GEMMConvolutionLayer
508TEST_SUITE_END() // NEON
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100509} // namespace validation
510} // namespace test
511} // namespace arm_compute