blob: 358cec3d6fcae238f0825daab8d0356fca0af5b7 [file] [log] [blame]
Alex Gilday7da29b62018-03-23 14:16:00 +00001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
26#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
27#include "arm_compute/runtime/Tensor.h"
28#include "arm_compute/runtime/TensorAllocator.h"
29#include "tests/NEON/Accessor.h"
30#include "tests/PaddingCalculator.h"
31#include "tests/datasets/DilatedConvolutionLayerDataset.h"
32#include "tests/framework/Asserts.h"
33#include "tests/framework/Macros.h"
34#include "tests/framework/datasets/Datasets.h"
35#include "tests/validation/Validation.h"
36#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
37
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
44namespace
45{
46const AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
47#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
48const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
49#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
50const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
51constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
52
53/** CNN data types */
54const auto CNNDataTypes = framework::dataset::make("DataType",
55{
56#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
57 DataType::F16,
58#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
59 DataType::F32,
60 DataType::QS8,
61 DataType::QS16,
62 DataType::QASYMM8,
63});
64} // namespace
65
66TEST_SUITE(NEON)
67
68TEST_SUITE(DilatedConvolutionLayer)
Andrew Mundy4d9379a2018-03-15 16:47:03 +000069DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
70 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 8U, 2U), 1, DataType::F32, 0),
71 TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32, 0),
72 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
73 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0)
74 }),
75 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
76 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
77 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
78 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0)
79 })),
Alex Gilday7da29b62018-03-23 14:16:00 +000080 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(6U, 6U, 1U), 1, DataType::F32, 0),
81 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
82 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
83 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0)
84 })),
85 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
86 PadStrideInfo(1, 1, 0, 0),
87 PadStrideInfo(2, 1, 0, 0),
88 PadStrideInfo(3, 2, 1, 0)
89 })),
90 framework::dataset::make("Dilation", { Size2D(1U, 2U),
91 Size2D(2U, 1U),
92 Size2D(2U, 2U),
93 Size2D(3U, 3U)
94 })),
95 framework::dataset::make("Expected", { ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
Andrew Mundy4d9379a2018-03-15 16:47:03 +000096 input_info, weights_info, output_info, conv_info, dilation, expected)
Alex Gilday7da29b62018-03-23 14:16:00 +000097{
98 ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(false),
99 &weights_info.clone()->set_is_resizable(false),
Alex Gilday7da29b62018-03-23 14:16:00 +0000100 &output_info.clone()->set_is_resizable(false),
101 conv_info, WeightsInfo(), dilation);
102 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
103}
104TEST_SUITE_END()
105
106TEST_SUITE(GEMMDilatedConvolutionLayer)
107
108DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallDilatedConvolutionLayerDataset(), datasets::LargeDilatedConvolutionLayerDataset()),
109 CNNDataTypes),
110 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
111{
112 // Set fixed point position data type allowed
113 int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
114
115 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
116
117 // Create tensors
118 Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
119 Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
120 Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
121 Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
122
123 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
124 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
125 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
126 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
127
128 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
129 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
130
131 // Create and configure function
132 NEGEMMConvolutionLayer conv;
133 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation);
134
135 // Validate valid region
136 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
137 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
138 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
139 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
140
141 validate(src.info()->valid_region(), src_valid_region);
142 validate(weights.info()->valid_region(), weights_valid_region);
143 validate(bias.info()->valid_region(), bias_valid_region);
144 validate(dst.info()->valid_region(), dst_valid_region);
145
146 // Validate QuantizationInfo
147 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
148 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
149
150 // Validate padding
151 //TODO(COMPMID-415) Need to validate padding?
152}
153
154template <typename T>
155using NEGEMMDilatedConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
156
157TEST_SUITE(Float)
158#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
159TEST_SUITE(FP16)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000160FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
Alex Gilday7da29b62018-03-23 14:16:00 +0000161 framework::dataset::make("ReshapeWeights", { true, false })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000162 framework::dataset::make("DataType", DataType::F16)),
163 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000164{
165 // Validate output
166 validate(Accessor(_target), _reference, tolerance_f16);
167}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000168FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
Alex Gilday7da29b62018-03-23 14:16:00 +0000169 framework::dataset::make("ReshapeWeights", { true, false })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000170 framework::dataset::make("DataType", DataType::F16)),
171 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000172{
173 // Validate output
174 validate(Accessor(_target), _reference, tolerance_f16);
175}
176TEST_SUITE_END()
177#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
178
179TEST_SUITE(FP32)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000180FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
Alex Gilday7da29b62018-03-23 14:16:00 +0000181 framework::dataset::make("ReshapeWeights", { true, false })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000182 framework::dataset::make("DataType", DataType::F32)),
183 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000184{
185 // Validate output
186 validate(Accessor(_target), _reference, tolerance_f32);
187}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000188FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
Alex Gilday7da29b62018-03-23 14:16:00 +0000189 framework::dataset::make("ReshapeWeights", { true, false })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000190 framework::dataset::make("DataType", DataType::F32)),
191 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000192{
193 // Validate output
194 validate(Accessor(_target), _reference, tolerance_f32);
195}
196TEST_SUITE_END()
197TEST_SUITE_END()
198
199template <typename T>
200using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
201
202TEST_SUITE(FixedPoint)
203TEST_SUITE(QS8)
204// We test for fixed point precision [4,6]
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000205FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
206 combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
207 framework::dataset::make("ReshapeWeights", { true, false })),
208 framework::dataset::make("DataType", DataType::QS8)),
209 framework::dataset::make("FractionalBits", 4, 7)),
210 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000211{
212 // Validate output
213 validate(Accessor(_target), _reference, tolerance_q);
214}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000215FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY,
216 combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
217 framework::dataset::make("ReshapeWeights", { true, false })),
218 framework::dataset::make("DataType", DataType::QS8)),
219 framework::dataset::make("FractionalBits", 4, 7)),
220 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000221{
222 // Validate output
223 validate(Accessor(_target), _reference, tolerance_q);
224}
225TEST_SUITE_END()
226
227TEST_SUITE(QS16)
228// Testing for fixed point position [1,14)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000229FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
230 combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
231 framework::dataset::make("ReshapeWeights", { true, false })),
232 framework::dataset::make("DataType", DataType::QS16)),
233 framework::dataset::make("FractionalBits", 1, 14)),
234 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000235{
236 // Validate output
237 validate(Accessor(_target), _reference, tolerance_q);
238}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000239FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY,
240 combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
241 framework::dataset::make("ReshapeWeights", { true, false })),
242 framework::dataset::make("DataType", DataType::QS16)),
243 framework::dataset::make("FractionalBits", 1, 14)),
244 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000245{
246 // Validate output
247 validate(Accessor(_target), _reference, tolerance_q);
248}
249TEST_SUITE_END()
250TEST_SUITE_END()
251
252template <typename T>
253using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
254
255TEST_SUITE(Quantized)
256TEST_SUITE(QASYMM8)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000257FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
258 combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
259 framework::dataset::make("ReshapeWeights", { true })),
260 framework::dataset::make("DataType", DataType::QASYMM8)),
261 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
262 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000263{
264 // Validate output
265 validate(Accessor(_target), _reference, tolerance_qasymm8);
266}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000267FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
268 combine(combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
269 framework::dataset::make("ReshapeWeights", { true })),
270 framework::dataset::make("DataType", DataType::QASYMM8)),
271 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
272 framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
Alex Gilday7da29b62018-03-23 14:16:00 +0000273{
274 // Validate output
275 validate(Accessor(_target), _reference, tolerance_qasymm8);
276}
277TEST_SUITE_END()
278TEST_SUITE_END()
279
280TEST_SUITE_END()
281TEST_SUITE_END()
282} // namespace validation
283} // namespace test
284} // namespace arm_compute