blob: 7b32ae4fb9bac0761b2bd9c4b5c6bfa38ea2e525 [file] [log] [blame]
Moritz Pflanzer69d33412017-08-09 11:45:15 +01001/*
Michalis Spyrou5c9f0c42019-01-16 14:48:48 +00002 * Copyright (c) 2017-2019 ARM Limited.
Moritz Pflanzer69d33412017-08-09 11:45:15 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
26#include "arm_compute/runtime/Tensor.h"
27#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzer69d33412017-08-09 11:45:15 +010028#include "tests/NEON/Accessor.h"
29#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010030#include "tests/datasets/FullyConnectedLayerDataset.h"
31#include "tests/framework/Asserts.h"
32#include "tests/framework/Macros.h"
33#include "tests/framework/datasets/Datasets.h"
34#include "tests/validation/Validation.h"
35#include "tests/validation/fixtures/FullyConnectedLayerFixture.h"
Moritz Pflanzer69d33412017-08-09 11:45:15 +010036
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43namespace
44{
45/** Tolerance for float operations */
Michele Di Giorgio419f33a2018-08-27 14:25:24 +010046constexpr RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F32 */
47constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000048#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice35aea372018-08-24 14:30:36 +010049const AbsoluteTolerance<float> abs_tolerance_f16(0.3f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */
50const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */
51constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */
52#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
Moritz Pflanzer69d33412017-08-09 11:45:15 +010053
Giorgio Arenaa855af12018-07-16 17:20:38 +010054/** Tolerance for quantized asymmetric operations */
55constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
56
Moritz Pflanzer69d33412017-08-09 11:45:15 +010057/** CNN data types */
58const auto CNNDataTypes = framework::dataset::make("DataType",
59{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000060#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzer69d33412017-08-09 11:45:15 +010061 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000062#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzer69d33412017-08-09 11:45:15 +010063 DataType::F32,
Moritz Pflanzer69d33412017-08-09 11:45:15 +010064});
65
66const auto FullyConnectedParameters = combine(framework::dataset::make("TransposeWeights", { false, true }), framework::dataset::make("ReshapeWeights", { false, true }));
67} // namespace
68
69TEST_SUITE(NEON)
70TEST_SUITE(FullyConnectedLayer)
71
Michalis Spyrou5c9f0c42019-01-16 14:48:48 +000072DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallFullyConnectedLayerDataset(),
Moritz Pflanzer69d33412017-08-09 11:45:15 +010073 FullyConnectedParameters),
74 CNNDataTypes),
75 src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
76{
Giorgio Arenaa855af12018-07-16 17:20:38 +010077 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
78 const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo();
79
Moritz Pflanzer69d33412017-08-09 11:45:15 +010080 TensorShape ws(weights_shape);
81
82 // Transpose weights if not done in the function
83 if(!reshape_weights || !transpose_weights)
84 {
85 const size_t shape_x = ws.x();
86 ws.set(0, ws.y());
87 ws.set(1, shape_x);
Moritz Pflanzer69d33412017-08-09 11:45:15 +010088 }
89
90 // Create tensors
Giorgio Arenaa855af12018-07-16 17:20:38 +010091 Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, quantization_info);
92 Tensor weights = create_tensor<Tensor>(ws, data_type, 1, quantization_info);
93 Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, quantization_info);
94 Tensor dst = create_tensor<Tensor>(dst_shape, data_type, 1, quantization_info);
Moritz Pflanzer69d33412017-08-09 11:45:15 +010095
96 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
97 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
98 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
99 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
100
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100101 // Create Fully Connected layer info
102 FullyConnectedLayerInfo fc_info;
103 fc_info.transpose_weights = transpose_weights;
104 fc_info.are_weights_reshaped = !reshape_weights;
105
Giorgio Arenaa855af12018-07-16 17:20:38 +0100106 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
107 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
108
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100109 // Create and configure function.
110 NEFullyConnectedLayer fc;
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100111 fc.configure(&src, &weights, &bias, &dst, fc_info);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100112
113 // Validate valid region
114 const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape);
115 validate(dst.info()->valid_region(), dst_valid_region);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100116
117 // Validate QuantizationInfo
118 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
119 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100120}
121
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000122// *INDENT-OFF*
123// clang-format off
124DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
125 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000126 TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
127 TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
128 TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions
129 TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Wrongly reshaped weights
130 TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
131 }),
132 framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16),
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000133 TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
134 TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
135 TensorInfo(TensorShape(217U, 315U), 1, DataType::F32),
136 TensorInfo(TensorShape(217U, 315U), 1, DataType::F32),
137 TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
138 })),
139 framework::dataset::make("BiasInfo",{ TensorInfo(TensorShape(271U), 1, DataType::F32),
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000140 TensorInfo(TensorShape(192U), 1, DataType::F32),
141 TensorInfo(TensorShape(192U), 1, DataType::F32),
142 TensorInfo(TensorShape(271U), 1, DataType::F32),
143 TensorInfo(TensorShape(271U), 1, DataType::F32),
144 TensorInfo(TensorShape(192U), 1, DataType::F32),
145 })),
146 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000147 TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
148 TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
149 TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
150 TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
151 TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
152 })),
Vidhya Sudhan Loganathan0fc25452018-06-18 14:40:56 +0100153 framework::dataset::make("TransposeWeights",{ true, true, false, true, true, true })),
154 framework::dataset::make("ReshapedWeights",{ false, false, false, false, false , false})),
155 framework::dataset::make("Expected", { false, true, true, false, false, true })),
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000156 input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected)
157{
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100158 // Create Fully Connected layer info
159 FullyConnectedLayerInfo fc_info;
160 fc_info.transpose_weights = transpose_weights;
161 fc_info.are_weights_reshaped = reshaped_weights;
162
163 Status status = NEFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), fc_info);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000164 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
165}
166// clang-format on
167// *INDENT-ON*
168
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100169template <typename T>
Giorgio Arenaa855af12018-07-16 17:20:38 +0100170using NEFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100171
172TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000173#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100174TEST_SUITE(FP16)
Georgios Pinitas583137c2017-08-31 18:12:42 +0100175FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallFullyConnectedLayerDataset(),
176 FullyConnectedParameters),
177 framework::dataset::make("DataType", DataType::F16)))
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100178{
179 // Validate output
Gian Marco Iodice35aea372018-08-24 14:30:36 +0100180 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100181}
Georgios Pinitas583137c2017-08-31 18:12:42 +0100182FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeFullyConnectedLayerDataset(),
183 FullyConnectedParameters),
184 framework::dataset::make("DataType", DataType::F16)))
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100185{
186 // Validate output
Gian Marco Iodice35aea372018-08-24 14:30:36 +0100187 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100188}
189TEST_SUITE_END()
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000190#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100191
192TEST_SUITE(FP32)
193FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallFullyConnectedLayerDataset(), FullyConnectedParameters),
194 framework::dataset::make("DataType", DataType::F32)))
195{
196 // Validate output
Michele Di Giorgio419f33a2018-08-27 14:25:24 +0100197 validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100198}
199FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeFullyConnectedLayerDataset(), FullyConnectedParameters),
200 framework::dataset::make("DataType", DataType::F32)))
201{
202 // Validate output
Michele Di Giorgio419f33a2018-08-27 14:25:24 +0100203 validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100204}
205TEST_SUITE_END()
206TEST_SUITE_END()
207
Giorgio Arenaa855af12018-07-16 17:20:38 +0100208template <typename T>
209using NEFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
210
211TEST_SUITE(Quantized)
212TEST_SUITE(QASYMM8)
213FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(
214 combine(datasets::SmallFullyConnectedLayerDataset(),
215 FullyConnectedParameters),
216 framework::dataset::make("DataType", DataType::QASYMM8)),
217 framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 10) })))
218{
219 // Validate output
220 validate(Accessor(_target), _reference, tolerance_qasymm8);
221}
222FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(
223 combine(datasets::LargeFullyConnectedLayerDataset(),
224 FullyConnectedParameters),
225 framework::dataset::make("DataType", DataType::QASYMM8)),
226 framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 256.f, 10) })))
227{
228 // Validate output
229 validate(Accessor(_target), _reference, tolerance_qasymm8);
230}
231TEST_SUITE_END()
232TEST_SUITE_END()
233
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100234TEST_SUITE_END()
235TEST_SUITE_END()
236} // namespace validation
237} // namespace test
238} // namespace arm_compute