blob: 0779c9d388abd630b076efd245ce565cfdafc895 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
SiCong Li1b2f8682023-01-04 10:04:26 +00002 * Copyright (c) 2017-2023 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Manuel Bottini87350f42020-09-15 13:03:34 +010024#include "arm_compute/core/Helpers.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010025#include "arm_compute/core/Types.h"
Matthew Bentham314d3e22023-06-23 10:53:52 +000026#include "arm_compute/core/utils/StringUtils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010027#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
28#include "arm_compute/runtime/Tensor.h"
29#include "arm_compute/runtime/TensorAllocator.h"
alerah01c9e519d2022-01-31 19:04:10 +020030#include "src/common/cpuinfo/CpuIsaInfo.h"
31#include "src/cpu/kernels/CpuDirectConv2dKernel.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010032#include "tests/NEON/Accessor.h"
33#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010034#include "tests/datasets/ShapeDatasets.h"
35#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/DirectConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010040
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
47namespace
48{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000049#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice41acb762018-08-23 10:25:06 +010050const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
51const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */
52constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */
53#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
54constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010055
Gian Marco Iodice41acb762018-08-23 10:25:06 +010056/** Direct convolution data set.for FP32 */
Giorgio Arenac0f54432018-03-16 14:02:34 +000057const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", { 0, 1 }),
58 combine(framework::dataset::make("PadY", { 0, 1 }),
59 framework::dataset::make("KernelSize", 3))),
60 combine(framework::dataset::make("PadX", { 0, 2 }),
61 combine(framework::dataset::make("PadY", { 0, 2 }),
Pablo Tello06da39d2017-08-10 15:10:40 +010062 framework::dataset::make("KernelSize", 3)))),
Giorgio Arenac0f54432018-03-16 14:02:34 +000063 combine(framework::dataset::make("PadX", { 0, 3 }),
64 combine(framework::dataset::make("PadY", { 0, 3 }),
Pablo Tello06da39d2017-08-10 15:10:40 +010065 framework::dataset::make("KernelSize", 5))));
66
Gian Marco Iodice41acb762018-08-23 10:25:06 +010067/** Direct convolution data set.for FP16 */
68const auto data_pad_f16 = concat(combine(framework::dataset::make("PadX", { 0, 1 }),
69 combine(framework::dataset::make("PadY", { 0, 1 }),
70 framework::dataset::make("KernelSize", 3))),
71 combine(framework::dataset::make("PadX", { 0 }),
72 combine(framework::dataset::make("PadY", { 0 }),
73 framework::dataset::make("KernelSize", 1))));
74
Pablo Tello06da39d2017-08-10 15:10:40 +010075const auto data_f32 = combine(datasets::SmallDirectConvolutionShapes(),
Adnan AlSinan0ef2c212022-01-24 10:20:40 +000076 combine(framework::dataset::make("StrideX", { 1, 2, 3, 4 }),
77 combine(framework::dataset::make("StrideY", { 1, 2, 3, 4 }),
Michalis Spyrou064add62018-11-01 18:14:27 +000078 data_pad_f32)));
Pablo Tello06da39d2017-08-10 15:10:40 +010079
Gian Marco Iodice41acb762018-08-23 10:25:06 +010080const auto data_f16 = combine(datasets::SmallDirectConvolutionShapes(),
81 combine(framework::dataset::make("StrideX", { 1, 2, 3 }),
82 combine(framework::dataset::make("StrideY", { 1, 2, 3 }),
Michalis Spyrou064add62018-11-01 18:14:27 +000083 data_pad_f16)));
84
Manuel Bottini87350f42020-09-15 13:03:34 +010085const auto data_prec = combine(datasets::SmallDirectConvolutionShapes(),
86 combine(framework::dataset::make("StrideX", { 1 }),
87 combine(framework::dataset::make("StrideY", { 1 }),
88 combine(framework::dataset::make("PadX", { 1 }),
89 combine(framework::dataset::make("PadY", { 1 }),
90 framework::dataset::make("KernelSize", 3))))));
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +000091
Gian Marco Iodice95f93612019-06-13 15:58:32 +010092const auto data9x9 = combine(datasets::SmallDirectConvolutionShapes(),
Adnan AlSinan0ef2c212022-01-24 10:20:40 +000093 combine(framework::dataset::make("StrideX", { 1, 2, 3 }),
94 combine(framework::dataset::make("StrideY", { 1, 2, 3 }),
Gian Marco Iodice95f93612019-06-13 15:58:32 +010095 combine(framework::dataset::make("PadX", { 0, 2 }),
96 combine(framework::dataset::make("PadY", { 0, 3 }),
97 framework::dataset::make("KernelSize", 9))))));
98
Pablo Marquez Tello4d44ac82021-12-08 15:56:01 +000099const auto data8x8 = combine(datasets::SmallDirectConvolutionShapes(),
Adnan AlSinan0ef2c212022-01-24 10:20:40 +0000100 combine(framework::dataset::make("StrideX", { 1, 2, 3 }),
101 combine(framework::dataset::make("StrideY", { 1, 2, 3 }),
Pablo Marquez Tello4d44ac82021-12-08 15:56:01 +0000102 combine(framework::dataset::make("PadX", { 0 }),
103 combine(framework::dataset::make("PadY", { 0 }),
104 framework::dataset::make("KernelSize", 8))))));
105
Adnan AlSinan0ef2c212022-01-24 10:20:40 +0000106const auto data_f32_nightly = combine(data_f32, framework::dataset::make("NumKernels", { 1, 4, 5 }));
107const auto data_f16_nightly = combine(data_f16, framework::dataset::make("NumKernels", { 1, 4, 5 }));
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000108
Manuel Bottini87350f42020-09-15 13:03:34 +0100109const auto data_precommit = combine(data_prec, framework::dataset::make("NumKernels", { 1 }));
Gian Marco Iodice95f93612019-06-13 15:58:32 +0100110const auto data_precommit9x9 = combine(data9x9, framework::dataset::make("NumKernels", { 4 }));
Pablo Marquez Tello4d44ac82021-12-08 15:56:01 +0000111const auto data_precommit8x8 = combine(data8x8, framework::dataset::make("NumKernels", { 4 }));
112
Sang-Hoon Park38515422020-07-08 11:06:30 +0100113/* The following tests is from real use-case that made DirectConvolution
114 * overflows in terms of its tensor indexing. This test case is using
115 * a separate tolerance due to the following reason.
116 * - It has shown that it requires generally larger absolute tolerance
117 * for large numbers or larger relative tolerance for small numbers.
118 * - With the first reason, since it is mainly testing index overflow,
119 * a value with a margin is used to avoid uninteded test failures
120 * during nightly.
121 */
122constexpr AbsoluteTolerance<float> usecase_tolerance_fp32(0.05f);
123
124const auto data_nightly_usecase = combine(framework::dataset::make("InputShape", { TensorShape{ 3U, 800U, 800U } }),
125 combine(framework::dataset::make("StrideX", { 1 }),
126 combine(framework::dataset::make("StrideY", { 1 }),
127 combine(framework::dataset::make("PadX", { 4 }),
128 combine(framework::dataset::make("PadY", { 4 }),
129 combine(framework::dataset::make("KernelSize", 9),
130 framework::dataset::make("NumKernels", { 16 })))))));
131
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000132/** Activation function Dataset*/
133const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
Gian Marco Iodice95f93612019-06-13 15:58:32 +0100134{
135 ActivationLayerInfo(),
136 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
137});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100138} // namespace
139
140TEST_SUITE(NEON)
141TEST_SUITE(DirectConvolutionLayer)
142
Michele Di Giorgio97e25802021-03-25 12:37:45 +0000143/** Check whether the configuration of a Direct Convolution layer with no
144 * bias leads to a successful execution.
145 */
146TEST_CASE(NoBias, framework::DatasetMode::PRECOMMIT)
147{
148 const auto src_shape = TensorShape(27U, 13U, 2U);
149 const auto weights_shape = TensorShape(3U, 3U, 2U, 4U);
150 const auto bias_shape = TensorShape(4U);
151 const auto dst_shape = TensorShape(25U, 11U, 4U);
152 constexpr auto dt = DataType::F32;
153
154 auto src = create_tensor<Tensor>(src_shape, dt);
155 auto weights = create_tensor<Tensor>(weights_shape, dt);
156 auto dst = create_tensor<Tensor>(dst_shape, dt);
157
158 const auto conv_info = PadStrideInfo(1, 1, 0, 0);
159
160 // Create Direct Convolution function
161 NEDirectConvolutionLayer conv{};
162 conv.configure(&src, &weights, nullptr, &dst, conv_info);
163
164 src.allocator()->allocate();
165 weights.allocator()->allocate();
166 dst.allocator()->allocate();
167
168 library->fill_tensor_value(Accessor(src), 1.f);
169 library->fill_tensor_value(Accessor(weights), 1.f);
170
171 conv.run();
172
173 // Compute reference to compare
174 SimpleTensor<float> ref_src{ src_shape, dt };
175 SimpleTensor<float> ref_weights{ weights_shape, dt };
176 SimpleTensor<float> ref_bias{ bias_shape, dt };
177 library->fill_tensor_value(ref_src, 1.f);
178 library->fill_tensor_value(ref_weights, 1.f);
179 // No bias
180 library->fill_tensor_value(ref_bias, 0.f);
181 auto ref_dst = reference::convolution_layer<float>(ref_src, ref_weights, ref_bias, dst_shape, conv_info);
182
183 validate(Accessor(dst), ref_dst);
184}
185
alerah01c9e519d2022-01-31 19:04:10 +0200186DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL,
187 concat(combine(combine(framework::dataset::make("CpuExt", std::string("NEON")),
188 framework::dataset::make("DataType", { DataType::F32 })),
189 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
190 combine(combine(framework::dataset::make("CpuExt", std::string("NEON")),
191 framework::dataset::make("DataType", { DataType::F16 })),
192 framework::dataset::make("DataLayout", { DataLayout::NCHW }))),
193 cpu_ext, data_type, data_layout)
194{
195 using namespace cpu::kernels;
196
197 cpuinfo::CpuIsaInfo cpu_isa{};
198 cpu_isa.neon = (cpu_ext == "NEON");
199 cpu_isa.fp16 = (data_type == DataType::F16);
200
201 const auto *selected_impl = CpuDirectConv2dKernel::get_implementation(DataTypeDataLayoutISASelectorData{ data_type, data_layout, cpu_isa }, cpu::KernelSelectionType::Preferred);
202
203 ARM_COMPUTE_ERROR_ON_NULLPTR(selected_impl);
204
205 std::string data_layout_str;
206 if(data_layout == DataLayout::NCHW)
207 {
208 data_layout_str = "nchw";
209 }
210 else
211 {
212 data_layout_str = "nhwc";
213 }
214
215 std::string expected = lower_string(cpu_ext) + "_" + cpu_impl_dt(data_type) + "_" + data_layout_str + "_directconv2d";
216 std::string actual = selected_impl->name;
217
218 ARM_COMPUTE_EXPECT_EQUAL(expected, actual, framework::LogLevel::ERRORS);
219}
220
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000221// *INDENT-OFF*
222// clang-format off
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000223DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
SiCong Li1b2f8682023-01-04 10:04:26 +0000224 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid: Mismatching data type input/weights
225 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid: Mismatching input feature maps
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100226 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width
SiCong Li1b2f8682023-01-04 10:04:26 +0000227 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported non-rectangular weights dimensions
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100228 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions
SiCong Li1b2f8682023-01-04 10:04:26 +0000229 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported stride
230 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported biases size
231 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported biases dimensions
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100232 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000233 }),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100234 framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16),
235 TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32),
236 TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32),
237 TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32),
238 TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32),
239 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
240 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
241 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
242 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000243 })),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100244 framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32),
245 TensorInfo(TensorShape(4U), 1, DataType::F32),
246 TensorInfo(TensorShape(4U), 1, DataType::F32),
247 TensorInfo(TensorShape(4U), 1, DataType::F32),
248 TensorInfo(TensorShape(4U), 1, DataType::F32),
249 TensorInfo(TensorShape(4U), 1, DataType::F32),
250 TensorInfo(TensorShape(3U), 1, DataType::F32),
251 TensorInfo(TensorShape(4U, 2U), 1, DataType::F32),
252 TensorInfo(TensorShape(4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000253 })),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100254 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
255 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
256 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
257 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
258 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
259 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
260 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
261 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
262 TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000263 })),
264 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
265 PadStrideInfo(1, 1, 0, 0),
266 PadStrideInfo(1, 1, 0, 0),
267 PadStrideInfo(1, 1, 0, 0),
268 PadStrideInfo(1, 1, 0, 0),
269 PadStrideInfo(3, 3, 0, 0),
270 PadStrideInfo(1, 1, 0, 0),
271 PadStrideInfo(1, 1, 0, 0),
272 PadStrideInfo(1, 1, 0, 0),
273 })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000274 framework::dataset::make("ActivationInfo",
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000275{
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000276 ActivationLayerInfo(),
Pablo Marquez Tello90805b82021-06-15 11:51:46 +0100277 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
278 ActivationLayerInfo(),
279 ActivationLayerInfo(),
280 ActivationLayerInfo(),
281 ActivationLayerInfo(),
282 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
283 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
284 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000285})),
286 framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false })),
287 input_info, weights_info, biases_info, output_info, conv_info, act_info, expected)
288{
289 bool is_valid = bool(NEDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, act_info));
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000290 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
291}
292// clang-format on
293// *INDENT-ON*
294
Manuel Bottini87350f42020-09-15 13:03:34 +0100295DATA_TEST_CASE(NoPaddingNHWCKernel, framework::DatasetMode::ALL, combine(combine(combine(data_precommit,
296 framework::dataset::make("DataType", DataType::F32)),
297 ActivationFunctionsDataset),
298 framework::dataset::make("DataLayout", { DataLayout::NHWC })),
299
300 shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, act_info, data_layout)
301{
302 TensorShape input_shape = TensorShape(shape);
303 TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
304 const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
305
306 TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
307 TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
308
309 TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
310
311 if(data_layout == DataLayout::NHWC)
312 {
313 permute(input_shape, PermutationVector(2U, 0U, 1U));
314 permute(weights_shape, PermutationVector(2U, 0U, 1U));
315 permute(output_shape, PermutationVector(2U, 0U, 1U));
316 }
317
318 // Create tensors
319 Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
320 Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
321 Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
322
323 // Create and configure function
324 NEDirectConvolutionLayer conv;
325 conv.configure(&src, &weights, nullptr, &dst, info, act_info);
326
327 validate(src.info()->padding(), PaddingSize(0, 0, 0, 0));
328 validate(weights.info()->padding(), PaddingSize(0, 0, 0, 0));
329 validate(dst.info()->padding(), PaddingSize(0, 0, 0, 0));
330}
331
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100332template <typename T>
333using NEDirectConvolutionLayerFixture = DirectConvolutionValidationFixture<Tensor, Accessor, NEDirectConvolutionLayer, T>;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000334template <typename T>
335using NEDirectConvolutionLayerMixedDataLayoutFixture = DirectConvolutionValidationFixture<Tensor, Accessor, NEDirectConvolutionLayer, T, true>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100336
337TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000338#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100339TEST_SUITE(FP16)
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000340FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit, framework::dataset::make("DataType",
Michalis Spyrou064add62018-11-01 18:14:27 +0000341 DataType::F16)),
342 ActivationFunctionsDataset),
343 framework::dataset::make("DataLayout", DataLayout::NCHW)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100344{
345 // Validate output
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100346 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100347}
Michalis Spyrou064add62018-11-01 18:14:27 +0000348FIXTURE_DATA_TEST_CASE(RunLarge, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_f16_nightly, framework::dataset::make("DataType", DataType::F16)),
349 ActivationFunctionsDataset),
350 framework::dataset::make("DataLayout", DataLayout::NCHW)))
351{
352 // Validate output
353 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
354}
355TEST_SUITE_END() // FP16
356#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100357
358TEST_SUITE(FP32)
Michalis Spyrouaeebe4a2019-01-09 14:21:03 +0000359FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit, framework::dataset::make("DataType",
Michalis Spyrou064add62018-11-01 18:14:27 +0000360 DataType::F32)),
361 ActivationFunctionsDataset),
362 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100363{
364 // Validate output
365 validate(Accessor(_target), _reference, tolerance_fp32);
366}
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000367FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEDirectConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit,
Adnan AlSinan0ef2c212022-01-24 10:20:40 +0000368 framework::dataset::make("DataType", DataType::F32)),
369 ActivationFunctionsDataset),
370 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000371{
372 // Validate output
373 validate(Accessor(_target), _reference, tolerance_fp32);
374}
Pablo Marquez Tello4d44ac82021-12-08 15:56:01 +0000375
376FIXTURE_DATA_TEST_CASE(RunSmall8x8, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit8x8, framework::dataset::make("DataType",
377 DataType::F32)),
378 ActivationFunctionsDataset),
379 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
380{
381 // Validate output
382 validate(Accessor(_target), _reference, tolerance_fp32);
383}
384
Gian Marco Iodice95f93612019-06-13 15:58:32 +0100385FIXTURE_DATA_TEST_CASE(RunSmall9x9, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit9x9, framework::dataset::make("DataType",
386 DataType::F32)),
387 ActivationFunctionsDataset),
388 framework::dataset::make("DataLayout", { DataLayout::NHWC })))
389{
390 // Validate output
391 validate(Accessor(_target), _reference, tolerance_fp32);
392}
Michalis Spyrou064add62018-11-01 18:14:27 +0000393FIXTURE_DATA_TEST_CASE(RunLarge, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_f32_nightly, framework::dataset::make("DataType",
394 DataType::F32)),
395 ActivationFunctionsDataset),
396 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
397{
398 // Validate output
399 validate(Accessor(_target), _reference, tolerance_fp32);
400}
Sang-Hoon Park38515422020-07-08 11:06:30 +0100401FIXTURE_DATA_TEST_CASE(RunLargeUsecase, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_nightly_usecase, framework::dataset::make("DataType",
402 DataType::F32)),
403 framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
404 framework::dataset::make("DataLayout", { DataLayout::NHWC })))
405{
406 // Validate output
407 validate(Accessor(_target), _reference, usecase_tolerance_fp32);
408}
Michalis Spyrou064add62018-11-01 18:14:27 +0000409TEST_SUITE_END() // FP32
410TEST_SUITE_END() // Float
Michalis Spyrou064add62018-11-01 18:14:27 +0000411TEST_SUITE_END() // DirectConvolutionLayer
Sheri Zhangac6499a2021-02-10 15:32:38 +0000412TEST_SUITE_END() // Neon
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100413} // namespace validation
414} // namespace test
415} // namespace arm_compute