blob: 7dad653fb517f9d4b2c420cc31582a3acf231bb2 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +00002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
26#include "arm_compute/runtime/Tensor.h"
27#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010028#include "tests/NEON/Accessor.h"
29#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010030#include "tests/datasets/ShapeDatasets.h"
31#include "tests/framework/Asserts.h"
32#include "tests/framework/Macros.h"
33#include "tests/framework/datasets/Datasets.h"
34#include "tests/validation/Validation.h"
35#include "tests/validation/fixtures/DirectConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010036
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43namespace
44{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000045#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice41acb762018-08-23 10:25:06 +010046const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */
47const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */
48constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */
49#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
50constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010051
Gian Marco Iodice41acb762018-08-23 10:25:06 +010052/** Direct convolution data set.for FP32 */
Giorgio Arenac0f54432018-03-16 14:02:34 +000053const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", { 0, 1 }),
54 combine(framework::dataset::make("PadY", { 0, 1 }),
55 framework::dataset::make("KernelSize", 3))),
56 combine(framework::dataset::make("PadX", { 0, 2 }),
57 combine(framework::dataset::make("PadY", { 0, 2 }),
Pablo Tello06da39d2017-08-10 15:10:40 +010058 framework::dataset::make("KernelSize", 3)))),
Giorgio Arenac0f54432018-03-16 14:02:34 +000059 combine(framework::dataset::make("PadX", { 0, 3 }),
60 combine(framework::dataset::make("PadY", { 0, 3 }),
Pablo Tello06da39d2017-08-10 15:10:40 +010061 framework::dataset::make("KernelSize", 5))));
62
Gian Marco Iodice41acb762018-08-23 10:25:06 +010063/** Direct convolution data set.for FP16 */
64const auto data_pad_f16 = concat(combine(framework::dataset::make("PadX", { 0, 1 }),
65 combine(framework::dataset::make("PadY", { 0, 1 }),
66 framework::dataset::make("KernelSize", 3))),
67 combine(framework::dataset::make("PadX", { 0 }),
68 combine(framework::dataset::make("PadY", { 0 }),
69 framework::dataset::make("KernelSize", 1))));
70
Pablo Tello06da39d2017-08-10 15:10:40 +010071const auto data_f32 = combine(datasets::SmallDirectConvolutionShapes(),
Gian Marco Iodice41acb762018-08-23 10:25:06 +010072 combine(framework::dataset::make("StrideX", { 1, 2, 3 }),
73 combine(framework::dataset::make("StrideY", { 1, 2, 3 }),
Michalis Spyrou064add62018-11-01 18:14:27 +000074 data_pad_f32)));
Pablo Tello06da39d2017-08-10 15:10:40 +010075
Gian Marco Iodice41acb762018-08-23 10:25:06 +010076const auto data_f16 = combine(datasets::SmallDirectConvolutionShapes(),
77 combine(framework::dataset::make("StrideX", { 1, 2, 3 }),
78 combine(framework::dataset::make("StrideY", { 1, 2, 3 }),
Michalis Spyrou064add62018-11-01 18:14:27 +000079 data_pad_f16)));
80
81const auto data_f32_nightly = combine(data_f32, framework::dataset::make("NumKernels", { 1, 4, 8, 16 }));
82const auto data_f16_nightly = combine(data_f16, framework::dataset::make("NumKernels", { 1, 4, 8, 16 }));
83const auto data_f32_precommit = combine(data_f32, framework::dataset::make("NumKernels", { 4 }));
84const auto data_f16_precommit = combine(data_f16, framework::dataset::make("NumKernels", { 4 }));
Gian Marco Iodice41acb762018-08-23 10:25:06 +010085
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000086/** Activation function Dataset*/
87const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
88{
89 ActivationLayerInfo(),
90 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
91 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
92 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
93});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010094} // namespace
95
96TEST_SUITE(NEON)
97TEST_SUITE(DirectConvolutionLayer)
98
Michalis Spyrouafa5d812017-11-30 14:25:57 +000099// *INDENT-OFF*
100// clang-format off
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000101DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100102 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights
103 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching input feature maps
104 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width
105 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non-rectangular weights dimensions
106 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions
107 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid stride
108 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size
109 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions
110 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000111 }),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100112 framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16),
113 TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32),
114 TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32),
115 TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32),
116 TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32),
117 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
118 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
119 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
120 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000121 })),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100122 framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32),
123 TensorInfo(TensorShape(4U), 1, DataType::F32),
124 TensorInfo(TensorShape(4U), 1, DataType::F32),
125 TensorInfo(TensorShape(4U), 1, DataType::F32),
126 TensorInfo(TensorShape(4U), 1, DataType::F32),
127 TensorInfo(TensorShape(4U), 1, DataType::F32),
128 TensorInfo(TensorShape(3U), 1, DataType::F32),
129 TensorInfo(TensorShape(4U, 2U), 1, DataType::F32),
130 TensorInfo(TensorShape(4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000131 })),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100132 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
133 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
134 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
135 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
136 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
137 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
138 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
139 TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
140 TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32),
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000141 })),
142 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
143 PadStrideInfo(1, 1, 0, 0),
144 PadStrideInfo(1, 1, 0, 0),
145 PadStrideInfo(1, 1, 0, 0),
146 PadStrideInfo(1, 1, 0, 0),
147 PadStrideInfo(3, 3, 0, 0),
148 PadStrideInfo(1, 1, 0, 0),
149 PadStrideInfo(1, 1, 0, 0),
150 PadStrideInfo(1, 1, 0, 0),
151 })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000152 framework::dataset::make("ActivationInfo",
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000153{
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000154 ActivationLayerInfo(),
155 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
156})),
157 framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false })),
158 input_info, weights_info, biases_info, output_info, conv_info, act_info, expected)
159{
160 bool is_valid = bool(NEDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, act_info));
Michalis Spyrouafa5d812017-11-30 14:25:57 +0000161 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
162}
163// clang-format on
164// *INDENT-ON*
165
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100166//TODO(COMPMID-415): Configuration tests?
167
168template <typename T>
169using NEDirectConvolutionLayerFixture = DirectConvolutionValidationFixture<Tensor, Accessor, NEDirectConvolutionLayer, T>;
170
171TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000172#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100173TEST_SUITE(FP16)
Michalis Spyrou064add62018-11-01 18:14:27 +0000174FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_f16_precommit, framework::dataset::make("DataType",
175 DataType::F16)),
176 ActivationFunctionsDataset),
177 framework::dataset::make("DataLayout", DataLayout::NCHW)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100178{
179 // Validate output
Gian Marco Iodice41acb762018-08-23 10:25:06 +0100180 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100181}
Michalis Spyrou064add62018-11-01 18:14:27 +0000182FIXTURE_DATA_TEST_CASE(RunLarge, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_f16_nightly, framework::dataset::make("DataType", DataType::F16)),
183 ActivationFunctionsDataset),
184 framework::dataset::make("DataLayout", DataLayout::NCHW)))
185{
186 // Validate output
187 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
188}
189TEST_SUITE_END() // FP16
190#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100191
192TEST_SUITE(FP32)
Michalis Spyrou064add62018-11-01 18:14:27 +0000193FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_f32_precommit, framework::dataset::make("DataType",
194 DataType::F32)),
195 ActivationFunctionsDataset),
196 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100197{
198 // Validate output
199 validate(Accessor(_target), _reference, tolerance_fp32);
200}
Michalis Spyrou064add62018-11-01 18:14:27 +0000201FIXTURE_DATA_TEST_CASE(RunLarge, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_f32_nightly, framework::dataset::make("DataType",
202 DataType::F32)),
203 ActivationFunctionsDataset),
204 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
205{
206 // Validate output
207 validate(Accessor(_target), _reference, tolerance_fp32);
208}
209TEST_SUITE_END() // FP32
210TEST_SUITE_END() // Float
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100211
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000212const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
213{
214 ActivationLayerInfo(),
215 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
216 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
217});
218
Michalis Spyrou064add62018-11-01 18:14:27 +0000219TEST_SUITE_END() // DirectConvolutionLayer
220TEST_SUITE_END() // NEON
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100221} // namespace validation
222} // namespace test
223} // namespace arm_compute