blob: b6c2f0df1b9e3112d5d59719399b55f717a9f782 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Sheri Zhangac6499a2021-02-10 15:32:38 +00002 * Copyright (c) 2017-2021 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Manuel Bottini87350f42020-09-15 13:03:34 +010024#include "arm_compute/core/Helpers.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010025#include "arm_compute/core/Types.h"
26#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
27#include "arm_compute/runtime/Tensor.h"
28#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010029#include "tests/NEON/Accessor.h"
30#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010031#include "tests/datasets/ShapeDatasets.h"
32#include "tests/framework/Asserts.h"
33#include "tests/framework/Macros.h"
34#include "tests/framework/datasets/Datasets.h"
35#include "tests/validation/Validation.h"
36#include "tests/validation/fixtures/DirectConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010037
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
44namespace
45{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000046#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice41acb762018-08-23 10:25:06 +010047const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
48const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */
49constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */
50#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
51constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010052
Gian Marco Iodice41acb762018-08-23 10:25:06 +010053/** Direct convolution data set.for FP32 */
Giorgio Arenac0f54432018-03-16 14:02:34 +000054const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", { 0, 1 }),
55 combine(framework::dataset::make("PadY", { 0, 1 }),
56 framework::dataset::make("KernelSize", 3))),
57 combine(framework::dataset::make("PadX", { 0, 2 }),
58 combine(framework::dataset::make("PadY", { 0, 2 }),
Pablo Tello06da39d2017-08-10 15:10:40 +010059 framework::dataset::make("KernelSize", 3)))),
Giorgio Arenac0f54432018-03-16 14:02:34 +000060 combine(framework::dataset::make("PadX", { 0, 3 }),
61 combine(framework::dataset::make("PadY", { 0, 3 }),
Pablo Tello06da39d2017-08-10 15:10:40 +010062 framework::dataset::make("KernelSize", 5))));
63
Gian Marco Iodice41acb762018-08-23 10:25:06 +010064/** Direct convolution data set.for FP16 */
65const auto data_pad_f16 = concat(combine(framework::dataset::make("PadX", { 0, 1 }),
66 combine(framework::dataset::make("PadY", { 0, 1 }),
67 framework::dataset::make("KernelSize", 3))),
68 combine(framework::dataset::make("PadX", { 0 }),
69 combine(framework::dataset::make("PadY", { 0 }),
70 framework::dataset::make("KernelSize", 1))));
71
Pablo Tello06da39d2017-08-10 15:10:40 +010072const auto data_f32 = combine(datasets::SmallDirectConvolutionShapes(),
Gian Marco Iodice41acb762018-08-23 10:25:06 +010073 combine(framework::dataset::make("StrideX", { 1, 2, 3 }),
74 combine(framework::dataset::make("StrideY", { 1, 2, 3 }),
Michalis Spyrou064add62018-11-01 18:14:27 +000075 data_pad_f32)));
Pablo Tello06da39d2017-08-10 15:10:40 +010076
Gian Marco Iodice41acb762018-08-23 10:25:06 +010077const auto data_f16 = combine(datasets::SmallDirectConvolutionShapes(),
78 combine(framework::dataset::make("StrideX", { 1, 2, 3 }),
79 combine(framework::dataset::make("StrideY", { 1, 2, 3 }),
Michalis Spyrou064add62018-11-01 18:14:27 +000080 data_pad_f16)));
81
Manuel Bottini87350f42020-09-15 13:03:34 +010082const auto data_prec = combine(datasets::SmallDirectConvolutionShapes(),
83 combine(framework::dataset::make("StrideX", { 1 }),
84 combine(framework::dataset::make("StrideY", { 1 }),
85 combine(framework::dataset::make("PadX", { 1 }),
86 combine(framework::dataset::make("PadY", { 1 }),
87 framework::dataset::make("KernelSize", 3))))));
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +000088
Gian Marco Iodice95f93612019-06-13 15:58:32 +010089const auto data9x9 = combine(datasets::SmallDirectConvolutionShapes(),
90 combine(framework::dataset::make("StrideX", { 1 }),
91 combine(framework::dataset::make("StrideY", { 1 }),
92 combine(framework::dataset::make("PadX", { 0, 2 }),
93 combine(framework::dataset::make("PadY", { 0, 3 }),
94 framework::dataset::make("KernelSize", 9))))));
95
Pablo Marquez Tello4d44ac82021-12-08 15:56:01 +000096
97const auto data8x8 = combine(datasets::SmallDirectConvolutionShapes(),
98 combine(framework::dataset::make("StrideX", { 1 }),
99 combine(framework::dataset::make("StrideY", { 1 }),
100 combine(framework::dataset::make("PadX", { 0 }),
101 combine(framework::dataset::make("PadY", { 0 }),
102 framework::dataset::make("KernelSize", 8))))));
103
104
105
Michalis Spyrou5ce99a22019-01-25 14:17:49 +0000106const auto data_f32_nightly = combine(data_f32, framework::dataset::make("NumKernels", { 1, 4 }));
107const auto data_f16_nightly = combine(data_f16, framework::dataset::make("NumKernels", { 1, 4 }));
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000108
Manuel Bottini87350f42020-09-15 13:03:34 +0100109const auto data_precommit = combine(data_prec, framework::dataset::make("NumKernels", { 1 }));
Gian Marco Iodice95f93612019-06-13 15:58:32 +0100110const auto data_precommit9x9 = combine(data9x9, framework::dataset::make("NumKernels", { 4 }));
Pablo Marquez Tello4d44ac82021-12-08 15:56:01 +0000111const auto data_precommit8x8 = combine(data8x8, framework::dataset::make("NumKernels", { 4 }));
112
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100113
Sang-Hoon Park38515422020-07-08 11:06:30 +0100114/* The following tests is from real use-case that made DirectConvolution
115 * overflows in terms of its tensor indexing. This test case is using
116 * a separate tolerance due to the following reason.
117 * - It has shown that it requires generally larger absolute tolerance
118 * for large numbers or larger relative tolerance for small numbers.
119 * - With the first reason, since it is mainly testing index overflow,
120 * a value with a margin is used to avoid uninteded test failures
121 * during nightly.
122 */
123constexpr AbsoluteTolerance<float> usecase_tolerance_fp32(0.05f);
124
125const auto data_nightly_usecase = combine(framework::dataset::make("InputShape", { TensorShape{ 3U, 800U, 800U } }),
126 combine(framework::dataset::make("StrideX", { 1 }),
127 combine(framework::dataset::make("StrideY", { 1 }),
128 combine(framework::dataset::make("PadX", { 4 }),
129 combine(framework::dataset::make("PadY", { 4 }),
130 combine(framework::dataset::make("KernelSize", 9),
131 framework::dataset::make("NumKernels", { 16 })))))));
132
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000133/** Activation function Dataset*/
134const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
Gian Marco Iodice95f93612019-06-13 15:58:32 +0100135{
136 ActivationLayerInfo(),
137 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
138});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100139} // namespace
140
141TEST_SUITE(NEON)
142TEST_SUITE(DirectConvolutionLayer)
143
Michele Di Giorgio97e25802021-03-25 12:37:45 +0000144/** Check whether the configuration of a Direct Convolution layer with no
145 * bias leads to a successful execution.
146 */
147TEST_CASE(NoBias, framework::DatasetMode::PRECOMMIT)
148{
149 const auto src_shape = TensorShape(27U, 13U, 2U);
150 const auto weights_shape = TensorShape(3U, 3U, 2U, 4U);
151 const auto bias_shape = TensorShape(4U);
152 const auto dst_shape = TensorShape(25U, 11U, 4U);
153 constexpr auto dt = DataType::F32;
154
155 auto src = create_tensor<Tensor>(src_shape, dt);
156 auto weights = create_tensor<Tensor>(weights_shape, dt);
157 auto dst = create_tensor<Tensor>(dst_shape, dt);
158
159 const auto conv_info = PadStrideInfo(1, 1, 0, 0);
160
161 // Create Direct Convolution function
162 NEDirectConvolutionLayer conv{};
163 conv.configure(&src, &weights, nullptr, &dst, conv_info);
164
165 src.allocator()->allocate();
166 weights.allocator()->allocate();
167 dst.allocator()->allocate();
168
169 library->fill_tensor_value(Accessor(src), 1.f);
170 library->fill_tensor_value(Accessor(weights), 1.f);
171
172 conv.run();
173
174 // Compute reference to compare
175 SimpleTensor<float> ref_src{ src_shape, dt };
176 SimpleTensor<float> ref_weights{ weights_shape, dt };
177 SimpleTensor<float> ref_bias{ bias_shape, dt };
178 library->fill_tensor_value(ref_src, 1.f);
179 library->fill_tensor_value(ref_weights, 1.f);
180 // No bias
181 library->fill_tensor_value(ref_bias, 0.f);
182 auto ref_dst = reference::convolution_layer<float>(ref_src, ref_weights, ref_bias, dst_shape, conv_info);
183
184 validate(Accessor(dst), ref_dst);
185}
186
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000187// *INDENT-OFF*
188// clang-format off
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000189DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100190 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights
191 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching input feature maps
192 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width
193 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non-rectangular weights dimensions
194 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions
195 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid stride
196 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size
197 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions
198 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000199 }),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100200 framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16),
201 TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32),
202 TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32),
203 TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32),
204 TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32),
205 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
206 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
207 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
208 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000209 })),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100210 framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32),
211 TensorInfo(TensorShape(4U), 1, DataType::F32),
212 TensorInfo(TensorShape(4U), 1, DataType::F32),
213 TensorInfo(TensorShape(4U), 1, DataType::F32),
214 TensorInfo(TensorShape(4U), 1, DataType::F32),
215 TensorInfo(TensorShape(4U), 1, DataType::F32),
216 TensorInfo(TensorShape(3U), 1, DataType::F32),
217 TensorInfo(TensorShape(4U, 2U), 1, DataType::F32),
218 TensorInfo(TensorShape(4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000219 })),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100220 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
221 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
222 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
223 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
224 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
225 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
226 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
227 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
228 TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000229 })),
230 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
231 PadStrideInfo(1, 1, 0, 0),
232 PadStrideInfo(1, 1, 0, 0),
233 PadStrideInfo(1, 1, 0, 0),
234 PadStrideInfo(1, 1, 0, 0),
235 PadStrideInfo(3, 3, 0, 0),
236 PadStrideInfo(1, 1, 0, 0),
237 PadStrideInfo(1, 1, 0, 0),
238 PadStrideInfo(1, 1, 0, 0),
239 })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000240 framework::dataset::make("ActivationInfo",
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000241{
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000242 ActivationLayerInfo(),
Pablo Marquez Tello90805b82021-06-15 11:51:46 +0100243 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
244 ActivationLayerInfo(),
245 ActivationLayerInfo(),
246 ActivationLayerInfo(),
247 ActivationLayerInfo(),
248 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
249 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
250 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000251})),
252 framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false })),
253 input_info, weights_info, biases_info, output_info, conv_info, act_info, expected)
254{
255 bool is_valid = bool(NEDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, act_info));
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000256 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
257}
258// clang-format on
259// *INDENT-ON*
260
Manuel Bottini87350f42020-09-15 13:03:34 +0100261DATA_TEST_CASE(NoPaddingNHWCKernel, framework::DatasetMode::ALL, combine(combine(combine(data_precommit,
262 framework::dataset::make("DataType", DataType::F32)),
263 ActivationFunctionsDataset),
264 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
265
266 shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, act_info, data_layout)
267{
268 TensorShape input_shape = TensorShape(shape);
269 TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
270 const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
271
272 TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
273 TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
274
275 TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
276
277 if(data_layout == DataLayout::NHWC)
278 {
279 permute(input_shape, PermutationVector(2U, 0U, 1U));
280 permute(weights_shape, PermutationVector(2U, 0U, 1U));
281 permute(output_shape, PermutationVector(2U, 0U, 1U));
282 }
283
284 // Create tensors
285 Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
286 Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
287 Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
288
289 // Create and configure function
290 NEDirectConvolutionLayer conv;
291 conv.configure(&src, &weights, nullptr, &dst, info, act_info);
292
293 validate(src.info()->padding(), PaddingSize(0, 0, 0, 0));
294 validate(weights.info()->padding(), PaddingSize(0, 0, 0, 0));
295 validate(dst.info()->padding(), PaddingSize(0, 0, 0, 0));
296}
297
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100298template <typename T>
299using NEDirectConvolutionLayerFixture = DirectConvolutionValidationFixture<Tensor, Accessor, NEDirectConvolutionLayer, T>;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000300template <typename T>
301using NEDirectConvolutionLayerMixedDataLayoutFixture = DirectConvolutionValidationFixture<Tensor, Accessor, NEDirectConvolutionLayer, T, true>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100302
303TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000304#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100305TEST_SUITE(FP16)
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000306FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit, framework::dataset::make("DataType",
Michalis Spyrou064add62018-11-01 18:14:27 +0000307 DataType::F16)),
308 ActivationFunctionsDataset),
309 framework::dataset::make("DataLayout", DataLayout::NCHW)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100310{
311 // Validate output
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100312 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100313}
Michalis Spyrou064add62018-11-01 18:14:27 +0000314FIXTURE_DATA_TEST_CASE(RunLarge, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_f16_nightly, framework::dataset::make("DataType", DataType::F16)),
315 ActivationFunctionsDataset),
316 framework::dataset::make("DataLayout", DataLayout::NCHW)))
317{
318 // Validate output
319 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
320}
321TEST_SUITE_END() // FP16
322#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100323
324TEST_SUITE(FP32)
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000325FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit, framework::dataset::make("DataType",
Michalis Spyrou064add62018-11-01 18:14:27 +0000326 DataType::F32)),
327 ActivationFunctionsDataset),
328 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100329{
330 // Validate output
331 validate(Accessor(_target), _reference, tolerance_fp32);
332}
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000333FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEDirectConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit,
334 framework::dataset::make("DataType", DataType::F32)),
335 ActivationFunctionsDataset),
336 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
337{
338 // Validate output
339 validate(Accessor(_target), _reference, tolerance_fp32);
340}
Pablo Marquez Tello4d44ac82021-12-08 15:56:01 +0000341
342FIXTURE_DATA_TEST_CASE(RunSmall8x8, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit8x8, framework::dataset::make("DataType",
343 DataType::F32)),
344 ActivationFunctionsDataset),
345 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
346{
347 // Validate output
348 validate(Accessor(_target), _reference, tolerance_fp32);
349}
350
Gian Marco Iodice95f93612019-06-13 15:58:32 +0100351FIXTURE_DATA_TEST_CASE(RunSmall9x9, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit9x9, framework::dataset::make("DataType",
352 DataType::F32)),
353 ActivationFunctionsDataset),
354 framework::dataset::make("DataLayout", { DataLayout::NHWC })))
355{
356 // Validate output
357 validate(Accessor(_target), _reference, tolerance_fp32);
358}
Michalis Spyrou064add62018-11-01 18:14:27 +0000359FIXTURE_DATA_TEST_CASE(RunLarge, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_f32_nightly, framework::dataset::make("DataType",
360 DataType::F32)),
361 ActivationFunctionsDataset),
362 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
363{
364 // Validate output
365 validate(Accessor(_target), _reference, tolerance_fp32);
366}
Sang-Hoon Park38515422020-07-08 11:06:30 +0100367FIXTURE_DATA_TEST_CASE(RunLargeUsecase, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_nightly_usecase, framework::dataset::make("DataType",
368 DataType::F32)),
369 framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
370 framework::dataset::make("DataLayout", { DataLayout::NHWC })))
371{
372 // Validate output
373 validate(Accessor(_target), _reference, usecase_tolerance_fp32);
374}
Michalis Spyrou064add62018-11-01 18:14:27 +0000375TEST_SUITE_END() // FP32
376TEST_SUITE_END() // Float
Michalis Spyrou064add62018-11-01 18:14:27 +0000377TEST_SUITE_END() // DirectConvolutionLayer
Sheri Zhangac6499a2021-02-10 15:32:38 +0000378TEST_SUITE_END() // Neon
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100379} // namespace validation
380} // namespace test
381} // namespace arm_compute