blob: b2e7f423a941ee1e71481a4a83b14f73c24ee362 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Isabella Gottardie6630e42018-01-18 15:50:39 +00002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Pablo Tello89519332017-11-17 11:52:36 +000026#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010027#include "arm_compute/runtime/Tensor.h"
28#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010029#include "tests/NEON/Accessor.h"
30#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010031#include "tests/datasets/LargeConvolutionLayerDataset.h"
32#include "tests/datasets/SmallConvolutionLayerDataset.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Macros.h"
35#include "tests/framework/datasets/Datasets.h"
36#include "tests/validation/Validation.h"
37#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000038#include "tests/validation/fixtures/WinogradLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010039
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46namespace
47{
Moritz Pflanzer6106a4d2017-08-02 09:42:27 +010048const AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000049#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Isabella Gottardie6630e42018-01-18 15:50:39 +000050const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
51#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
52const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
53constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010054
55/** CNN data types */
56const auto CNNDataTypes = framework::dataset::make("DataType",
57{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000058#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010059 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000060#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010061 DataType::F32,
62 DataType::QS8,
63 DataType::QS16,
Isabella Gottardie6630e42018-01-18 15:50:39 +000064 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010065});
66} // namespace
67
68TEST_SUITE(NEON)
Pablo Tello89519332017-11-17 11:52:36 +000069
70#if defined(__aarch64__)
71TEST_SUITE(WinogradLayer)
72template <typename T>
73using NEWinogradLayerFixture = WinogradLayerValidationFixture<Tensor, Accessor, NEWinogradLayer, T>;
74
75TEST_SUITE(FP32)
76FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradLayerFixture<float>, framework::DatasetMode::PRECOMMIT, datasets::SmallWinogradLayerDataset())
77{
78 // Validate output
79 validate(Accessor(_target), _reference, tolerance_f32);
80}
81
82TEST_SUITE_END()
83TEST_SUITE_END()
84#endif /* __aarch64__ */
85
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010086TEST_SUITE(ConvolutionLayer)
87
88DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()), CNNDataTypes),
89 input_shape, weights_shape, bias_shape, output_shape, info, data_type)
90{
91 // Set fixed point position data type allowed
92 int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
93
Isabella Gottardie6630e42018-01-18 15:50:39 +000094 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
95
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010096 // Create tensors
Isabella Gottardie6630e42018-01-18 15:50:39 +000097 Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
98 Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
99 Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
100 Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100101
102 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
103 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
104 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
105 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
106
Isabella Gottardie6630e42018-01-18 15:50:39 +0000107 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
108 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
109
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100110 // Create and configure function
111 NEConvolutionLayer conv;
112 conv.configure(&src, &weights, &bias, &dst, info);
113
114 // Validate valid region
115 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
116 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
117 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
118 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
119
120 validate(src.info()->valid_region(), src_valid_region);
121 validate(weights.info()->valid_region(), weights_valid_region);
122 validate(bias.info()->valid_region(), bias_valid_region);
123 validate(dst.info()->valid_region(), dst_valid_region);
124
Isabella Gottardie6630e42018-01-18 15:50:39 +0000125 // Validate QuantizationInfo
126 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
127 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
128
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100129 // Validate padding
130 //TODO(COMPMID-415) Need to validate padding?
131}
132
133template <typename T>
134using NEConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
135
136TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000137#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100138TEST_SUITE(FP16)
Georgios Pinitas583137c2017-08-31 18:12:42 +0100139FIXTURE_DATA_TEST_CASE(RunSmall, NEConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
140 framework::dataset::make("ReshapeWeights", { true, false })),
141 framework::dataset::make("DataType", DataType::F16)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100142{
143 // Validate output
144 validate(Accessor(_target), _reference, tolerance_f16);
145}
Georgios Pinitas583137c2017-08-31 18:12:42 +0100146FIXTURE_DATA_TEST_CASE(RunLarge, NEConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
147 framework::dataset::make("ReshapeWeights", { true, false })),
148 framework::dataset::make("DataType", DataType::F16)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100149{
150 // Validate output
151 validate(Accessor(_target), _reference, tolerance_f16);
152}
153TEST_SUITE_END()
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000154#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100155
156TEST_SUITE(FP32)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100157FIXTURE_DATA_TEST_CASE(RunSmall, NEConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallConvolutionLayerDataset(),
158 framework::dataset::make("ReshapeWeights", { true, false })),
159 framework::dataset::make("DataType", DataType::F32)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100160{
161 // Validate output
162 validate(Accessor(_target), _reference, tolerance_f32);
163}
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100164FIXTURE_DATA_TEST_CASE(RunLarge, NEConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeConvolutionLayerDataset(),
165 framework::dataset::make("ReshapeWeights", { true, false })),
166 framework::dataset::make("DataType", DataType::F32)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100167{
168 // Validate output
169 validate(Accessor(_target), _reference, tolerance_f32);
170}
171TEST_SUITE_END()
172TEST_SUITE_END()
173
174template <typename T>
175using NEConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEConvolutionLayer, T>;
176
Isabella Gottardie6630e42018-01-18 15:50:39 +0000177TEST_SUITE(FixedPoint)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100178TEST_SUITE(QS8)
179// We test for fixed point precision [4,6]
Georgios Pinitasce54b562017-09-14 17:21:51 +0100180FIXTURE_DATA_TEST_CASE(RunSmall, NEConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
181 framework::dataset::make("ReshapeWeights", { true, false })),
182 framework::dataset::make("DataType", DataType::QS8)),
183 framework::dataset::make("FractionalBits", 4, 7)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100184{
185 // Validate output
186 validate(Accessor(_target), _reference, tolerance_q);
187}
Georgios Pinitasce54b562017-09-14 17:21:51 +0100188FIXTURE_DATA_TEST_CASE(RunLarge, NEConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
189 framework::dataset::make("ReshapeWeights", { true, false })),
190 framework::dataset::make("DataType", DataType::QS8)),
191 framework::dataset::make("FractionalBits", 4, 7)))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100192{
193 // Validate output
194 validate(Accessor(_target), _reference, tolerance_q);
195}
196TEST_SUITE_END()
197
198TEST_SUITE(QS16)
199// Testing for fixed point position [1,14)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100200FIXTURE_DATA_TEST_CASE(RunSmall, NEConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
201 framework::dataset::make("ReshapeWeights", { true, false })),
202 framework::dataset::make("DataType", DataType::QS16)),
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100203 framework::dataset::make("FractionalBits", 1, 14)))
204{
205 // Validate output
206 validate(Accessor(_target), _reference, tolerance_q);
207}
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100208FIXTURE_DATA_TEST_CASE(RunLarge, NEConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
209 framework::dataset::make("ReshapeWeights", { true, false })),
210 framework::dataset::make("DataType", DataType::QS16)),
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100211 framework::dataset::make("FractionalBits", 1, 14)))
212{
213 // Validate output
214 validate(Accessor(_target), _reference, tolerance_q);
215}
216TEST_SUITE_END()
217TEST_SUITE_END()
218
Isabella Gottardie6630e42018-01-18 15:50:39 +0000219template <typename T>
220using NEConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T>;
221
222TEST_SUITE(Quantized)
223TEST_SUITE(QASYMM8)
224FIXTURE_DATA_TEST_CASE(RunSmall, NEConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
225 framework::dataset::make("ReshapeWeights", { true })),
226 framework::dataset::make("DataType", DataType::QASYMM8)),
227 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
228{
229 // Validate output
230 validate(Accessor(_target), _reference, tolerance_qasymm8);
231}
232FIXTURE_DATA_TEST_CASE(RunLarge, NEConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
233 framework::dataset::make("ReshapeWeights", { true })),
234 framework::dataset::make("DataType", DataType::QASYMM8)),
235 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
236{
237 // Validate output
238 validate(Accessor(_target), _reference, tolerance_qasymm8);
239}
240TEST_SUITE_END()
241TEST_SUITE_END()
242
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100243TEST_SUITE_END()
244TEST_SUITE_END()
245} // namespace validation
246} // namespace test
247} // namespace arm_compute