blob: ee7e56227d1d74f181c4c63e8165bfb691c90df7 [file] [log] [blame]
Moritz Pflanzer69d33412017-08-09 11:45:15 +01001/*
Gunes Bayire77736f2024-03-08 15:55:32 +00002 * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
Moritz Pflanzer69d33412017-08-09 11:45:15 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
26#include "arm_compute/runtime/Tensor.h"
27#include "arm_compute/runtime/TensorAllocator.h"
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +010028#include "src/core/helpers/MemoryHelpers.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010029#include "src/cpu/operators/CpuFullyConnected.h"
Moritz Pflanzer69d33412017-08-09 11:45:15 +010030#include "tests/NEON/Accessor.h"
31#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010032#include "tests/datasets/FullyConnectedLayerDataset.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Macros.h"
35#include "tests/framework/datasets/Datasets.h"
36#include "tests/validation/Validation.h"
37#include "tests/validation/fixtures/FullyConnectedLayerFixture.h"
Moritz Pflanzer69d33412017-08-09 11:45:15 +010038
39namespace arm_compute
40{
41namespace test
42{
43namespace validation
44{
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +010045using framework::dataset::make;
Moritz Pflanzer69d33412017-08-09 11:45:15 +010046namespace
47{
48/** Tolerance for float operations */
Michele Di Giorgio419f33a2018-08-27 14:25:24 +010049constexpr RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F32 */
50constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000051#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Gian Marco Iodice35aea372018-08-24 14:30:36 +010052const AbsoluteTolerance<float> abs_tolerance_f16(0.3f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */
53const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */
54constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */
55#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
Moritz Pflanzer69d33412017-08-09 11:45:15 +010056
Giorgio Arenaa855af12018-07-16 17:20:38 +010057/** Tolerance for quantized asymmetric operations */
58constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
Michele Di Giorgio9c700372020-01-08 11:33:44 +000059constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(1);
Giorgio Arenaa855af12018-07-16 17:20:38 +010060
Moritz Pflanzer69d33412017-08-09 11:45:15 +010061/** CNN data types */
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +010062const auto CNNDataTypes = make("DataType",
Moritz Pflanzer69d33412017-08-09 11:45:15 +010063{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000064#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzer69d33412017-08-09 11:45:15 +010065 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000066#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzer69d33412017-08-09 11:45:15 +010067 DataType::F32,
Moritz Pflanzer69d33412017-08-09 11:45:15 +010068});
69
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +010070const auto FullyConnectedParameters = combine(make("TransposeWeights", { false, true }), make("ReshapeWeights", { false, true }));
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +000071
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +010072const auto QuantizationData = make("QuantizationInfo",
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +000073{
74 QuantizationInfo(1.f / 256.f, 10),
75 QuantizationInfo(1.1f, 10),
76});
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +010077
78const auto IgnoredQuantizationData = make("IgnoredQuantizationInfo",
79{
80 QuantizationInfo(),
81});
82
83const auto NoActivationFunctionDataset = make("ActivationInfo",
SiCongLi2e5fd632020-03-02 15:39:15 +000084{
85 ActivationLayerInfo(),
86});
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +010087
88const auto ActivationFunctionsDataset = make("ActivationInfo",
SiCongLi2e5fd632020-03-02 15:39:15 +000089{
90 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
91 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
92 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.75f, 0.25f),
93 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH),
94});
Giorgio Arena1856ff72020-02-07 13:46:45 +000095
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +010096const auto ActivationFunctionsQuantizedDataset = make("ActivationInfo",
SiCongLi2e5fd632020-03-02 15:39:15 +000097{
98 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
99 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
100 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.75f, 0.25f),
101});
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100102} // namespace
103
104TEST_SUITE(NEON)
105TEST_SUITE(FullyConnectedLayer)
106
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100107/** Test case for memory injection in @ref cpu::CpuFullyConnected.
108 *
109 * Configure the operator once and inject memory at run-time in multiple executions.
110 *
111 * Checks performed in order:
112 * - Both runs compute the same output
113 */
114TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
115{
116 auto fc = std::make_unique<cpu::CpuFullyConnected>();
117 const auto src_info = TensorInfo(TensorShape(8U), 1, DataType::F32, DataLayout::NHWC);
118 const auto weight_info = TensorInfo(TensorShape(8U, 4U), 1, DataType::F32, DataLayout::NHWC);
119 const auto bias_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
120 auto dst_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
121 const auto fc_info = FullyConnectedLayerInfo{};
122 fc->configure(&src_info, &weight_info, &bias_info, &dst_info, fc_info);
123
124 // telhs are newly created every call of this lambda function
125 auto src = create_tensor<Tensor>(src_info);
126 auto weight = create_tensor<Tensor>(weight_info);
127 auto bias = create_tensor<Tensor>(bias_info);
128 src.allocator()->allocate();
129 weight.allocator()->allocate();
130 bias.allocator()->allocate();
131
132 ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
133 ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
134
135 auto mg = MemoryGroup{};
136 auto ws = manage_workspace<Tensor>(fc->workspace(), mg, run_pack, prep_pack);
137
138 auto run_conv = [&]() -> Tensor
139 {
140 auto dst = create_tensor<Tensor>(dst_info);
141 dst.allocator()->allocate();
142 run_pack.add_tensor(TensorType::ACL_DST, &dst);
143
144 library->fill_tensor_value(Accessor(src), 1.f);
145 library->fill_tensor_value(Accessor(weight), 2.f);
146 library->fill_tensor_value(Accessor(bias), 3.f);
147 // This operator is configured once and captured by this lambda.
148 fc->prepare(prep_pack);
149 fc->run(run_pack);
150 return dst;
151 };
152 auto result_0 = run_conv();
153 auto result_1 = run_conv();
154 for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
155 {
156 ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
157 }
158}
159
160/** Test case for memory injection in @ref NEFullyConnectedLayer.
161 *
162 * Make sure @ref NEFullyConnectedLayer still works through injecting the memory at configure time using the old API.
163 *
164 * Checks performed in order:
165 * - Both runs compute the same output
166 */
167TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
168{
169 auto fc = std::make_unique<NEFullyConnectedLayer>();
170 const auto src_info = TensorInfo(TensorShape(8U), 1, DataType::F32, DataLayout::NHWC);
171 const auto weight_info = TensorInfo(TensorShape(8U, 4U), 1, DataType::F32, DataLayout::NHWC);
172 const auto bias_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
173 auto dst_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
174 const auto fc_info = FullyConnectedLayerInfo{};
175 auto run_conv = [&]()
176 {
177 auto src = create_tensor<Tensor>(src_info);
178 auto weight = create_tensor<Tensor>(weight_info);
179 auto bias = create_tensor<Tensor>(bias_info);
180 auto dst = create_tensor<Tensor>(dst_info);
181 fc->configure(&src, &weight, &bias, &dst, fc_info);
182 src.allocator()->allocate();
183 weight.allocator()->allocate();
184 bias.allocator()->allocate();
185 dst.allocator()->allocate();
186 library->fill_tensor_value(Accessor(src), 1.f);
187 library->fill_tensor_value(Accessor(weight), 2.f);
188 library->fill_tensor_value(Accessor(bias), 3.f);
189 fc->run();
190 return dst;
191 };
192 auto result_0 = run_conv();
193 auto result_1 = run_conv();
194 for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
195 {
196 ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
197 }
198}
199
200/** Unit test for @ref cpu::CpuFullyConnected with quantized multipler > 1
201 *
202 * Tests output correctness.
203 */
204TEST_CASE(Quant8_Signed_Mult_gt_1, framework::DatasetMode::ALL)
205{
206 auto fc = std::make_unique<cpu::CpuFullyConnected>();
207 const auto src_info = TensorInfo(TensorShape(1U, 3U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.5f, -1));
208 const auto weight_info = TensorInfo(TensorShape(1U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.5, -8));
209 const auto bias_info = TensorInfo(TensorShape(1U), 1, DataType::S32);
210 auto dst_info = TensorInfo(TensorShape(1U, 3U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.1f, 0));
211 const auto fc_info = FullyConnectedLayerInfo{};
212 fc->configure(&src_info, &weight_info, &bias_info, &dst_info, fc_info);
213
214 // telhs are newly created every call of this lambda function
215 auto src = create_tensor<Tensor>(src_info);
216 auto weight = create_tensor<Tensor>(weight_info);
217 auto bias = create_tensor<Tensor>(bias_info);
218 auto dst = create_tensor<Tensor>(dst_info);
219 src.allocator()->allocate();
220 weight.allocator()->allocate();
221 bias.allocator()->allocate();
222 dst.allocator()->allocate();
223
224 ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &dst } };
225 ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
226
227 auto mg = MemoryGroup{};
228 auto ws = manage_workspace<Tensor>(fc->workspace(), mg, run_pack, prep_pack);
229
230 // Initialize input values
231 const std::vector<int8_t> src_values = { 3, 63, 31 };
232 const std::vector<int8_t> weight_values = { -4 };
233 const std::vector<int32_t> bias_values = { 16 };
234 const std::vector<int32_t> expected = { 80, 127, 127 };
235 library->fill_static_values(Accessor(src), src_values);
236 library->fill_static_values(Accessor(weight), weight_values);
237 library->fill_static_values(Accessor(bias), bias_values);
238
239 // Run FC layer
240 fc->prepare(prep_pack);
241 fc->run(run_pack);
242
243 auto dst_ptr = reinterpret_cast<int8_t *>(dst.buffer());
244 for(size_t i = 0; i < dst.info()->tensor_shape().total_size(); ++i)
245 {
246 ARM_COMPUTE_EXPECT(dst_ptr[i] == expected[i], framework::LogLevel::ERRORS);
247 }
248}
249
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000250// *INDENT-OFF*
251// clang-format off
252DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100253 make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000254 TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
255 TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
256 TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions
257 TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Wrongly reshaped weights
258 TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
259 }),
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100260 make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16),
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000261 TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
262 TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
263 TensorInfo(TensorShape(217U, 315U), 1, DataType::F32),
264 TensorInfo(TensorShape(217U, 315U), 1, DataType::F32),
265 TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
266 })),
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100267 make("BiasInfo",{ TensorInfo(TensorShape(271U), 1, DataType::F32),
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000268 TensorInfo(TensorShape(192U), 1, DataType::F32),
269 TensorInfo(TensorShape(192U), 1, DataType::F32),
270 TensorInfo(TensorShape(271U), 1, DataType::F32),
271 TensorInfo(TensorShape(271U), 1, DataType::F32),
272 TensorInfo(TensorShape(192U), 1, DataType::F32),
273 })),
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100274 make("OutputInfo",{ TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000275 TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
276 TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
277 TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
278 TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
279 TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
280 })),
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100281 make("TransposeWeights",{ true, true, false, true, true, true })),
282 make("ReshapedWeights",{ false, false, false, false, false , false})),
283 make("Expected", { false, true, true, false, false, true })),
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000284 input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected)
285{
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100286 // Create Fully Connected layer info
287 FullyConnectedLayerInfo fc_info;
288 fc_info.transpose_weights = transpose_weights;
289 fc_info.are_weights_reshaped = reshaped_weights;
290
291 Status status = NEFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), fc_info);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000292 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
293}
294// clang-format on
295// *INDENT-ON*
296
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100297template <typename T>
Giorgio Arenaa855af12018-07-16 17:20:38 +0100298using NEFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000299template <typename T>
300using NEFullyConnectedLayerMixedDataLayoutFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
Giorgio Arena63e0beb2021-09-24 14:04:27 +0100301template <typename T>
302using NEFullyConnectedLayerDynamicWeightsFixture = FullyConnectedWithDynamicWeightsFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
303template <typename T>
304using NEFullyConnectedLayerDynamicBiasFixture = FullyConnectedWithDynamicBiasFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100305
306TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000307#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100308TEST_SUITE(FP16)
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100309FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
310 FullyConnectedParameters,
311 make("DataType", DataType::F16),
312 NoActivationFunctionDataset))
SiCongLi2e5fd632020-03-02 15:39:15 +0000313{
314 // Validate output
315 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
316}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100317FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
SiCongLi2e5fd632020-03-02 15:39:15 +0000318 combine(datasets::FullyConnectedLayerWithActivationDataset(),
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100319 FullyConnectedParameters,
320 make("DataType", DataType::F16),
SiCongLi2e5fd632020-03-02 15:39:15 +0000321 ActivationFunctionsDataset))
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100322{
323 // Validate output
Gian Marco Iodice35aea372018-08-24 14:30:36 +0100324 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100325}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100326FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeFullyConnectedLayerDataset(),
327 FullyConnectedParameters,
328 make("DataType", DataType::F16),
329 NoActivationFunctionDataset))
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100330{
331 // Validate output
Gian Marco Iodice35aea372018-08-24 14:30:36 +0100332 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100333}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100334FIXTURE_DATA_TEST_CASE(RunDynamicWeights, NEFullyConnectedLayerDynamicWeightsFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
335 make("DataType", DataType::F16),
336 make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)),
337 make("WeightsReshaped", { false, true })))
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000338{
339}
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100340TEST_SUITE_END()
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000341#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100342
343TEST_SUITE(FP32)
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100344FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(), FullyConnectedParameters,
345 make("DataType", DataType::F32),
346 NoActivationFunctionDataset))
SiCongLi2e5fd632020-03-02 15:39:15 +0000347{
348 // Validate output
349 validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
350}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100351FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(
352 make("Input", TensorShape(9U, 5U, 7U)),
353 make("Weights", TensorShape(315U, 271U)),
354 make("Biases", TensorShape(271U)),
355 make("Output", TensorShape(271U)),
356 FullyConnectedParameters,
357 make("DataType", DataType::F32),
358 make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000359{
360 // Validate output
361 validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
362}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100363FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::FullyConnectedLayerWithActivationDataset(),
364 FullyConnectedParameters,
365 make("DataType", DataType::F32),
SiCongLi2e5fd632020-03-02 15:39:15 +0000366 ActivationFunctionsDataset))
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100367{
368 // Validate output
Michele Di Giorgio419f33a2018-08-27 14:25:24 +0100369 validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100370}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100371FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeFullyConnectedLayerDataset(), FullyConnectedParameters,
372 make("DataType", DataType::F32),
373 NoActivationFunctionDataset))
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100374{
375 // Validate output
Michele Di Giorgio419f33a2018-08-27 14:25:24 +0100376 validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100377}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100378FIXTURE_DATA_TEST_CASE(RunDynamicWeights, NEFullyConnectedLayerDynamicWeightsFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
379 make("DataType", DataType::F32),
380 make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)),
381 make("WeightsReshaped", { false, true })))
Giorgio Arena63e0beb2021-09-24 14:04:27 +0100382{
383}
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100384TEST_SUITE_END()
385TEST_SUITE_END()
386
Giorgio Arenaa855af12018-07-16 17:20:38 +0100387template <typename T>
388using NEFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000389template <typename T>
390using NEFullyConnectedLayerQuantizedMixedDataLayoutFixture = FullyConnectedLayerValidationQuantizedFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100391
392TEST_SUITE(Quantized)
393TEST_SUITE(QASYMM8)
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100394FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
395 combine(
396 make("Input", TensorShape(9U, 5U, 7U)),
397 make("Weights", TensorShape(315U, 271U)),
398 make("Biases", TensorShape(271U)),
399 make("Output", TensorShape(271U)),
400 FullyConnectedParameters,
401 make("DataType", DataType::QASYMM8),
402 QuantizationData,
403 make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
Giorgio Arenaa855af12018-07-16 17:20:38 +0100404{
405 // Validate output
406 validate(Accessor(_target), _reference, tolerance_qasymm8);
407}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100408FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, NEFullyConnectedLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
SiCongLi2e5fd632020-03-02 15:39:15 +0000409 combine(datasets::FullyConnectedLayerWithActivationDataset(),
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100410 FullyConnectedParameters,
411 make("DataType", DataType::QASYMM8),
412 QuantizationData,
SiCongLi2e5fd632020-03-02 15:39:15 +0000413 ActivationFunctionsQuantizedDataset))
414{
415 // Validate output
416 validate(Accessor(_target), _reference, tolerance_qasymm8);
417}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100418FIXTURE_DATA_TEST_CASE(RunDynamicWeightsWithActivation, NEFullyConnectedLayerDynamicWeightsFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
419 make("DataType", DataType::QASYMM8),
420 make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)),
421 make("WeightsReshaped", { false })))
422{
423}
424FIXTURE_DATA_TEST_CASE(RunDynamicBiasWithActivation, NEFullyConnectedLayerDynamicBiasFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
425 make("DataType", DataType::QASYMM8),
426 make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
427{
428}
SiCongLi2e5fd632020-03-02 15:39:15 +0000429
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100430// Dynamic Quantization Tests here
431FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
432 combine(datasets::SmallFullyConnectedLayerDataset(),
433 FullyConnectedParameters,
434 make("DataType", DataType::QASYMM8),
435 IgnoredQuantizationData,
436 NoActivationFunctionDataset))
Giorgio Arenaa855af12018-07-16 17:20:38 +0100437{
438 // Validate output
439 validate(Accessor(_target), _reference, tolerance_qasymm8);
440}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100441FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(
442 datasets::LargeFullyConnectedLayerDataset(),
443 FullyConnectedParameters,
444 framework::dataset::make("DataType", DataType::QASYMM8),
445 QuantizationData,
446 NoActivationFunctionDataset))
447{
448 // Validate output
449 validate(Accessor(_target), _reference, tolerance_qasymm8);
450}
451FIXTURE_DATA_TEST_CASE(RunDynamicBias, NEFullyConnectedLayerDynamicBiasFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
452 make("DataType", DataType::QASYMM8),
453 NoActivationFunctionDataset))
Giorgio Arena63e0beb2021-09-24 14:04:27 +0100454{
455}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100456FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
457 combine(
458 make("Input", TensorShape(9U, 5U, 7U)),
459 make("Weights", TensorShape(315U, 271U)),
460 make("Biases", TensorShape(271U)),
461 make("Output", TensorShape(271U)),
462 FullyConnectedParameters,
463 make("DataType", DataType::QASYMM8),
464 IgnoredQuantizationData,
465 NoActivationFunctionDataset))
466{
467 // Validate output
468 validate(Accessor(_target), _reference, tolerance_qasymm8);
469}
470FIXTURE_DATA_TEST_CASE(RunDynamicWeights, NEFullyConnectedLayerDynamicWeightsFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
471 make("DataType", DataType::QASYMM8),
472 NoActivationFunctionDataset,
473 make("WeightsReshaped", { false })))
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000474{
475}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100476TEST_SUITE_END() // QASYMM8
Michele Di Giorgio9c700372020-01-08 11:33:44 +0000477TEST_SUITE(QASYMM8_SIGNED)
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100478FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
479 combine(
480 make("Input", TensorShape(9U, 5U, 7U)),
481 make("Weights", TensorShape(315U, 271U)),
482 make("Biases", TensorShape(271U)),
483 make("Output", TensorShape(271U)),
484 FullyConnectedParameters,
485 make("DataType", DataType::QASYMM8_SIGNED),
486 QuantizationData,
487 make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
488{
489 // Validate output
Gunes Bayire77736f2024-03-08 15:55:32 +0000490 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100491}
492FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
493 combine(datasets::FullyConnectedLayerWithActivationDataset(),
494 FullyConnectedParameters,
495 make("DataType", DataType::QASYMM8_SIGNED),
496 QuantizationData,
497 ActivationFunctionsQuantizedDataset))
498{
499 // Validate output
500 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
501}
502FIXTURE_DATA_TEST_CASE(RunDynamicWeightsWithActivation, NEFullyConnectedLayerDynamicWeightsFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
503 make("DataType", DataType::QASYMM8_SIGNED),
504 make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)),
505 make("WeightsReshaped", { false })))
506{
507}
508
509// Dynamic Quantization tests
510FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(
511 datasets::SmallFullyConnectedLayerDataset(),
512 FullyConnectedParameters,
513 make("DataType", DataType::QASYMM8_SIGNED),
514 IgnoredQuantizationData,
515 NoActivationFunctionDataset))
SiCongLi2e5fd632020-03-02 15:39:15 +0000516{
517 // Validate output
518 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
519}
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000520FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100521 combine(
522 make("Input", TensorShape(9U, 5U, 7U)),
523 make("Weights", TensorShape(315U, 271U)),
524 make("Biases", TensorShape(271U)),
525 make("Output", TensorShape(271U)),
526 FullyConnectedParameters,
527 make("DataType", DataType::QASYMM8_SIGNED),
528 QuantizationData,
529 NoActivationFunctionDataset))
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000530{
531 // Validate output
Gunes Bayire77736f2024-03-08 15:55:32 +0000532 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000533}
Mohammed Suhail Munshi02c452f2023-10-26 00:14:36 +0100534FIXTURE_DATA_TEST_CASE(RunDynamicWeights, NEFullyConnectedLayerDynamicWeightsFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallFullyConnectedLayerDataset(),
535 make("DataType", DataType::QASYMM8_SIGNED),
536 NoActivationFunctionDataset,
537 make("WeightsReshaped", { false })))
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000538{
539}
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000540TEST_SUITE_END() // QASYMM8_SIGNED
541TEST_SUITE_END() // Quantized
542TEST_SUITE_END() // FullyConnectedLayer
543TEST_SUITE_END() // NEON
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100544} // namespace validation
545} // namespace test
546} // namespace arm_compute