blob: 514d156983d9f04447c9f332a96c63b99b5fa44d [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
SiCong Li11ab4512023-11-07 12:04:59 +00002 * Copyright (c) 2017-2024 Arm Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000025#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
26#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
Pablo Tello299025a2017-09-29 11:30:12 +010027#include "arm_compute/runtime/Tensor.h"
28#include "arm_compute/runtime/TensorAllocator.h"
Manuel Bottinicfac51c2021-06-18 15:47:28 +010029#include "src/core/helpers/MemoryHelpers.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010030#include "src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h"
Pablo Tello299025a2017-09-29 11:30:12 +010031#include "tests/NEON/Accessor.h"
Gian Marco Iodiceab182122017-10-09 15:05:40 +010032#include "tests/NEON/Helper.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000033#include "tests/PaddingCalculator.h"
George Wort2d7e6832019-02-22 16:37:41 +000034#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
Gian Marcofa4cacd2017-10-18 17:05:02 +010035#include "tests/datasets/LargeGEMMLowpDataset.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000036#include "tests/datasets/ShapeDatasets.h"
Gian Marcofa4cacd2017-10-18 17:05:02 +010037#include "tests/datasets/SmallGEMMLowpDataset.h"
Pablo Tello299025a2017-09-29 11:30:12 +010038#include "tests/framework/Asserts.h"
39#include "tests/framework/Macros.h"
40#include "tests/framework/datasets/Datasets.h"
41#include "tests/validation/Validation.h"
42#include "tests/validation/fixtures/GEMMLowpFixture.h"
43
44namespace arm_compute
45{
46namespace test
47{
48namespace validation
49{
Radu Salavatf1f1f872024-02-27 18:32:26 +000050using framework::dataset::make;
51
52namespace
53{
54 constexpr AbsoluteTolerance<float> tolerance_batched(1);
55 constexpr AbsoluteTolerance<float> tolerance_quant(1);
56} // namespace
57
58
Pablo Tello299025a2017-09-29 11:30:12 +010059TEST_SUITE(NEON)
60TEST_SUITE(GEMMLowp)
Gian Marcoe75a02b2017-11-08 12:24:09 +000061TEST_SUITE(MatrixMultiplyCore)
SiCong Li11ab4512023-11-07 12:04:59 +000062
Gian Marcoe75a02b2017-11-08 12:24:09 +000063using NEGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
Radu Salavatf1f1f872024-02-27 18:32:26 +000064using NEGEMMLowpMatrixMultiplyCoreAccumulateFixture = GEMMLowpMatrixMultiplyAccumulateValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
SiCong Li11ab4512023-11-07 12:04:59 +000065
morgolock4adaddb2020-09-29 14:24:32 +010066DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()),
67 shape_a, shape_b, shape_c, a_offset, b_offset)
68{
69 // Create tensors
70 Tensor a = create_tensor<Tensor>(shape_a, DataType::QASYMM8);
71 Tensor b = create_tensor<Tensor>(shape_b, DataType::QASYMM8);
72 Tensor c = create_tensor<Tensor>(shape_c, DataType::S32);
73
74 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
75 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
76
77 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
78 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
79 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
80
81 // Create and configure function
82 NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
83 gemmlowp_mm.configure(&a, &b, nullptr, &c);
84
85 // Validate padding is zero
86 validate(a.info()->padding(), PaddingSize());
87 validate(b.info()->padding(), PaddingSize());
88 validate(c.info()->padding(), PaddingSize());
89}
Radu Salavat34bdffb2024-04-15 09:30:57 +000090// accumulation is not supported for Int8/UInt8 in aarch32
91#ifdef __aarch64__
Radu Salavatf1f1f872024-02-27 18:32:26 +000092DATA_TEST_CASE(ValidateAccumulate, framework::DatasetMode::ALL, combine(
93 zip(
94 make("In0",{ TensorShape(21U, 1U) }),
95 make("In1", { TensorShape(1U, 21U) }),
96 make("Dst", { TensorShape(1U, 1U) }),
97 make("a_offset", { -2 }),
98 make("a_offset", { 13 })
99 ),
100 zip(
101 make("OutputDataType", { DataType::S32, DataType::QASYMM8, DataType::QASYMM8_SIGNED}),
102 make("Expected", { true, false, false })
103 )),
104 shape_a, shape_b, shape_dst, a_offset, b_offset, output_data_type, expected)
105{
106 DataType input_data_type = (output_data_type == DataType::S32 ? DataType::QASYMM8 : output_data_type);
107 // Accumulation test for GEMM kernels
108 TensorInfo a(shape_a, 1, input_data_type, QuantizationInfo(1.0f / 255, a_offset));
109 TensorInfo b(shape_b, 1, input_data_type, QuantizationInfo(1.0f / 255, b_offset));
110 TensorInfo dst(shape_dst, 1, output_data_type, QuantizationInfo());
111
112 // Create and configure function
113 GEMMInfo gemm_info = GEMMInfo();
114 gemm_info.set_accumulate(true);
115
116 if (is_data_type_quantized(output_data_type))
117 {
118 GEMMLowpOutputStageInfo gemmLowpOutputStageInfo = GEMMLowpOutputStageInfo();
119 gemmLowpOutputStageInfo.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
120
121 gemm_info.set_gemmlowp_output_stage(gemmLowpOutputStageInfo);
122 }
123
124 cpu::CpuGemmLowpMatrixMultiplyCore gemmlowp_mm;
125 Status status = gemmlowp_mm.validate(&a, &b, nullptr, &dst, gemm_info);
126
127 ARM_COMPUTE_EXPECT((expected == bool(status)), framework::LogLevel::ERRORS);
128}
Radu Salavat34bdffb2024-04-15 09:30:57 +0000129#endif // __arch64__
Radu Salavatf1f1f872024-02-27 18:32:26 +0000130
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000131// *INDENT-OFF*
132// clang-format off
SiCong Li11ab4512023-11-07 12:04:59 +0000133DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(
134 make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100135 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Mismatching data type
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000136 TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
137 TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
138 TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)),
139 }),
SiCong Li11ab4512023-11-07 12:04:59 +0000140 make("InputBInfo",{ TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000141 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
142 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
143 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
144 TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
SiCong Li11ab4512023-11-07 12:04:59 +0000145 }),
146 make("OutputInfo",{ TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000147 TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
148 TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
149 TensorInfo(TensorShape(8U, 11U), 1, DataType::S32),
150 TensorInfo(TensorShape(64U, 32U), 1, DataType::S32),
SiCong Li11ab4512023-11-07 12:04:59 +0000151 }),
152 make("Expected", { true, false, false, false, true })),
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000153 a_info, b_info, output_info, expected)
154{
155 // Lock tensors
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000156 Status status = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false),
157 &b_info.clone()->set_is_resizable(false),
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100158 nullptr,
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000159 &output_info.clone()->set_is_resizable(false));
160 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000161}
162// clang-format on
163// *INDENT-ON*
164
Manuel Bottinicfac51c2021-06-18 15:47:28 +0100165/** Test case for memory injection in @ref cpu::CpuGemmLowpMatrixMultiplyCore.
166 *
167 * Configure the operator once and inject memory at run-time in multiple executions.
168 *
169 * Checks performed in order:
170 * - Both runs compute the same output
171 */
172TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
173{
174 auto gemm = std::make_unique<cpu::CpuGemmLowpMatrixMultiplyCore>();
175 auto a_info = TensorInfo(TensorShape(32U, 72U), 1, DataType::QASYMM8);
176 auto b_info = TensorInfo(TensorShape(17U, 32U), 1, DataType::QASYMM8);
177 auto dst_info = TensorInfo(TensorShape(17U, 72U), 1, DataType::S32);
178 a_info.set_quantization_info(QuantizationInfo(1.0f / 255, -9));
179 b_info.set_quantization_info(QuantizationInfo(1.0f / 255, 1));
180 const auto gemm_info = GEMMInfo{};
181 gemm->configure(&a_info, &b_info, nullptr, &dst_info, gemm_info);
182
183 // telhs are newly created every call of this lambda function
184 auto a = create_tensor<Tensor>(a_info);
185 auto b = create_tensor<Tensor>(b_info);
186 auto dst = create_tensor<Tensor>(dst_info);
187 a.allocator()->allocate();
188 b.allocator()->allocate();
189 dst.allocator()->allocate();
190
191 ITensorPack run_pack =
192 {
193 { TensorType::ACL_SRC_0, &a },
194 { TensorType::ACL_SRC_1, &b },
195 { TensorType::ACL_DST, &dst }
196 };
197 ITensorPack prep_pack =
198 {
199 { TensorType::ACL_SRC_1, &b },
200 };
201
202 auto mg = MemoryGroup{};
203 auto ws = manage_workspace<Tensor>(gemm->workspace(), mg, run_pack, prep_pack);
204
205 auto run_conv = [&]() -> Tensor
206 {
207 auto dst = create_tensor<Tensor>(dst_info);
208 dst.allocator()->allocate();
209 run_pack.add_tensor(TensorType::ACL_DST, &dst);
210
211 library->fill_tensor_value(Accessor(a), static_cast<uint8_t>(1));
212 library->fill_tensor_value(Accessor(b), static_cast<uint8_t>(2));
213 // This operator is configured once and captured by this lambda.
214 gemm->prepare(prep_pack);
215 gemm->run(run_pack);
216 return dst;
217 };
218 auto result_0 = run_conv();
219 auto result_1 = run_conv();
220 for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
221 {
222 ARM_COMPUTE_EXPECT(((uint8_t *)result_0.buffer())[i] == ((uint8_t *)result_1.buffer())[i], framework::LogLevel::ERRORS);
223 }
224}
225
226/** Test case for memory injection in @ref NEGEMMLowpMatrixMultiplyCore.
227 *
228 * Make sure @ref NEGEMMLowpMatrixMultiplyCore still works through injecting the memory at configure time using the old API.
229 *
230 * Checks performed in order:
231 * - Both runs compute the same output
232 */
233TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
234{
235 auto gemm = std::make_unique<NEGEMMLowpMatrixMultiplyCore>();
236 auto a_info = TensorInfo(TensorShape(32U, 72U), 1, DataType::QASYMM8);
237 auto b_info = TensorInfo(TensorShape(17U, 32U), 1, DataType::QASYMM8);
238 auto dst_info = TensorInfo(TensorShape(17U, 72U), 1, DataType::S32);
239 a_info.set_quantization_info(QuantizationInfo(1.0f / 255, -9));
240 b_info.set_quantization_info(QuantizationInfo(1.0f / 255, 1));
241 const auto gemm_info = GEMMInfo{};
242 auto run_conv = [&]()
243 {
244 auto a = create_tensor<Tensor>(a_info);
245 auto b = create_tensor<Tensor>(b_info);
246 auto dst = create_tensor<Tensor>(dst_info);
247 gemm->configure(&a, &b, nullptr, &dst, gemm_info);
248 a.allocator()->allocate();
249 b.allocator()->allocate();
250 dst.allocator()->allocate();
251 library->fill_tensor_value(Accessor(a), static_cast<uint8_t>(1));
252 library->fill_tensor_value(Accessor(b), static_cast<uint8_t>(2));
253 gemm->run();
254 return dst;
255 };
256 auto result_0 = run_conv();
257 auto result_1 = run_conv();
258 for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
259 {
260 ARM_COMPUTE_EXPECT(((uint8_t *)result_0.buffer())[i] == ((uint8_t *)result_1.buffer())[i], framework::LogLevel::ERRORS);
261 }
262}
263
Gian Marcoe75a02b2017-11-08 12:24:09 +0000264FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
Pablo Tello299025a2017-09-29 11:30:12 +0100265{
266 // Validate output
Gian Marcofa4cacd2017-10-18 17:05:02 +0100267 validate(Accessor(_target), _reference);
268}
269
Gian Marcoe75a02b2017-11-08 12:24:09 +0000270FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
Gian Marcofa4cacd2017-10-18 17:05:02 +0100271{
272 // Validate output
273 validate(Accessor(_target), _reference);
Pablo Tello299025a2017-09-29 11:30:12 +0100274}
Pablo Tello299025a2017-09-29 11:30:12 +0100275
Mohammed Suhail Munshi97a609b2022-10-21 11:15:54 +0100276TEST_SUITE(BatchedMatMul)
277TEST_SUITE(QASYMM8)
Radu Salavatf1f1f872024-02-27 18:32:26 +0000278using NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixtureBatchedUnsigned =
279 GEMMLowpBatchedMatrixMultiplyCoreFusedOffsetOutputFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore, false, false, uint8_t, uint8_t, true>;
Mohammed Suhail Munshi97a609b2022-10-21 11:15:54 +0100280FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixtureBatchedUnsigned, framework::DatasetMode::ALL,
SiCong Li11ab4512023-11-07 12:04:59 +0000281 combine(datasets::SmallGEMMLowpFusedBatchedMatMulDataset(),
282 make("DataType", { DataType::QASYMM8 }),
283 make("reshape_b_only_on_first_run", { false })))
Mohammed Suhail Munshi97a609b2022-10-21 11:15:54 +0100284{
285 validate(Accessor(_target), _reference, tolerance_batched);
286}
287TEST_SUITE_END() // QASYMM8
288
Mohammed Suhail Munshi97a609b2022-10-21 11:15:54 +0100289TEST_SUITE(QASYMM8_SIGNED)
Radu Salavatf1f1f872024-02-27 18:32:26 +0000290using NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixtureBatchedSigned =
291 GEMMLowpBatchedMatrixMultiplyCoreFusedOffsetOutputFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore, false, false, int8_t, int8_t, true>;
Mohammed Suhail Munshi97a609b2022-10-21 11:15:54 +0100292FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixtureBatchedSigned, framework::DatasetMode::ALL,
SiCong Li11ab4512023-11-07 12:04:59 +0000293 combine(datasets::SmallGEMMLowpFusedBatchedMatMulDataset(),
294 make("DataType", { DataType::QASYMM8_SIGNED }),
295 make("reshape_b_only_on_first_run", { false })))
Mohammed Suhail Munshi97a609b2022-10-21 11:15:54 +0100296{
297 validate(Accessor(_target), _reference, tolerance_batched);
298}
299TEST_SUITE_END() // QASYMM8_SIGNED
300TEST_SUITE_END() // BatchedMatMul
301
George Wort2d7e6832019-02-22 16:37:41 +0000302TEST_SUITE(FusedOffsetOutput)
Radu Salavatf1f1f872024-02-27 18:32:26 +0000303using NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
SiCong Li11ab4512023-11-07 12:04:59 +0000304FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL,
305 combine(datasets::SmallGEMMLowpFusedOffsetOutputUint8Dataset(),
Radu Salavatf1f1f872024-02-27 18:32:26 +0000306 make("DataType", { DataType::QASYMM8 }),
307 make("reshape_b_only_on_first_run", { false })))
George Wort2d7e6832019-02-22 16:37:41 +0000308{
309 // Validate output
Mohammed Suhail Munshi97a609b2022-10-21 11:15:54 +0100310 validate(Accessor(_target), _reference, tolerance_quant);
George Wort2d7e6832019-02-22 16:37:41 +0000311}
SiCong Li11ab4512023-11-07 12:04:59 +0000312FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::NIGHTLY,
313 combine(datasets::LargeGEMMLowpFusedOffsetOutputUint8Dataset(),
Radu Salavatf1f1f872024-02-27 18:32:26 +0000314 make("DataType", { DataType::QASYMM8 }),
315 make("reshape_b_only_on_first_run", { false })))
George Wort2d7e6832019-02-22 16:37:41 +0000316{
317 // Validate output
Mohammed Suhail Munshi97a609b2022-10-21 11:15:54 +0100318 validate(Accessor(_target), _reference, tolerance_quant);
George Wort2d7e6832019-02-22 16:37:41 +0000319}
320TEST_SUITE_END() // FusedOffsetOutput
Radu Salavatf1f1f872024-02-27 18:32:26 +0000321
Radu Salavatcdce25b2024-04-12 12:26:50 +0000322// accumulation is not supported for Int8/UInt8 in aarch32
323#ifdef __aarch64__
Radu Salavatf1f1f872024-02-27 18:32:26 +0000324TEST_SUITE(ACCUMULATION)
325TEST_SUITE(S32)
326FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreAccumulateFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
327{
328 // Validate output
329 validate(Accessor(_target), _reference);
330}
331FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreAccumulateFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
332{
333 // Validate output
334 validate(Accessor(_target), _reference);
335}
336TEST_SUITE_END() // S32
337TEST_SUITE_END() // ACCUMULATION
Radu Salavatcdce25b2024-04-12 12:26:50 +0000338#endif // __arch64__
Radu Salavatf1f1f872024-02-27 18:32:26 +0000339
Gian Marcoe75a02b2017-11-08 12:24:09 +0000340TEST_SUITE_END() // MatrixMultiplyCore
Gian Marcoe75a02b2017-11-08 12:24:09 +0000341TEST_SUITE_END() // GEMMLowp
Manuel Bottiniae58bdf2021-06-17 17:18:45 +0100342TEST_SUITE_END() // NEON
Pablo Tello299025a2017-09-29 11:30:12 +0100343} // namespace validation
344} // namespace test
345} // namespace arm_compute