blob: 07054462f59fc027171ccfa2e90fbd729bad565b [file] [log] [blame]
Adnan AlSinan171fc3d2022-03-15 18:46:42 +00001/*
2 * Copyright (c) 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEPooling3dLayer.h"
26#include "arm_compute/runtime/Tensor.h"
27#include "arm_compute/runtime/TensorAllocator.h"
28#include "tests/NEON/Accessor.h"
29#include "tests/PaddingCalculator.h"
30#include "tests/datasets/Pooling3dLayerDataset.h"
31#include "tests/datasets/PoolingTypesDataset.h"
32#include "tests/datasets/ShapeDatasets.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Macros.h"
35#include "tests/framework/datasets/Datasets.h"
36#include "tests/validation/Validation.h"
37#include "tests/validation/fixtures/Pooling3dLayerFixture.h"
38
39namespace arm_compute
40{
41namespace test
42{
43namespace validation
44{
45namespace
46{
47/** Input data sets for floating-point data types */
48const auto Pooling3dLayerDatasetFP = combine(combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { Size3D(2, 3, 2) })),
49 framework::dataset::make("Stride", { Size3D(1, 1, 1), Size3D(2, 1, 1), Size3D(1, 2, 1), Size3D(2, 2, 1) })),
50 framework::dataset::make("Padding", { Padding3D(0, 1, 0), Padding3D(1, 1, 1) })),
51 framework::dataset::make("ExcludePadding", { true, false }));
52
53const auto Pooling3dLayerDatasetFPSmall = combine(combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { Size3D(2, 2, 2), Size3D(3, 3, 3) })),
54 framework::dataset::make("Stride", { Size3D(2, 2, 2), Size3D(2, 1, 1) })),
55 framework::dataset::make("Padding", { Padding3D(0, 0, 0), Padding3D(1, 1, 1), Padding3D(1, 0, 0) })),
56 framework::dataset::make("ExcludePadding", { true, false }));
57
Adnan AlSinan9104cd52022-04-06 16:19:31 +010058const auto Pooling3dLayerDatasetQASYMM8Small = combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
59 framework::dataset::make("PoolingSize", { Size3D(3, 3, 3) })),
60 framework::dataset::make("Stride", { Size3D(1, 1, 1), Size3D(2, 1, 1), Size3D(1, 2, 1), Size3D(2, 2, 1) })),
61 framework::dataset::make("Padding", { Padding3D(0, 0, 0), Padding3D(1, 1, 1), Padding3D(1, 0, 0) })),
62 framework::dataset::make("ExcludePadding", { true }));
63
64const auto Pooling3dLayerDatasetQASYMM8Large = combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
65 framework::dataset::make("PoolingSize", { Size3D(3, 3, 3) })),
66 framework::dataset::make("Stride", { Size3D(1, 1, 1), Size3D(2, 2, 1) })),
67 framework::dataset::make("Padding", { Padding3D(0, 0, 0), Padding3D(1, 1, 0) })),
68 framework::dataset::make("ExcludePadding", { true }));
69
Adnan AlSinan171fc3d2022-03-15 18:46:42 +000070using ShapeDataset = framework::dataset::ContainerDataset<std::vector<TensorShape>>;
71
72constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */
Adnan AlSinan4c17ba92022-04-01 19:09:46 +010073#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Adnan AlSinan9104cd52022-04-06 16:19:31 +010074constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */
75#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
76constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for unsigned 8-bit asymmetric type */
77constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_s(1); /**< Tolerance value for comparing reference's output against implementation's output for signed 8-bit asymmetric type */
78
79const auto qasymm8_in_qinfo_dataset = framework::dataset::make("InputQuantInfo", { QuantizationInfo(.2f, 10) });
80const auto qasymm8_out_qinfo_dataset = framework::dataset::make("OutputQuantInfo",
81{
82 QuantizationInfo(.2f, 10), // Same qinfo
83 QuantizationInfo(.1f, 5), // Multiplier <= 1
84 QuantizationInfo(2.f, 3) // Multiplier > 1
85});
86
87const auto qasymm8_signed_in_qinfo_dataset = framework::dataset::make("InputQuantInfo", { QuantizationInfo(.2f, -10) });
88const auto qasymm8_signed_out_qinfo_dataset = framework::dataset::make("OutputQuantInfo",
89{
90 QuantizationInfo(.2f, -10), // Same qinfo
91 QuantizationInfo(.1f, -5), // Multiplier <= 1
92 QuantizationInfo(2.f, -3) // Multiplier > 1
93});
94
Adnan AlSinan171fc3d2022-03-15 18:46:42 +000095} //namespace
96
97TEST_SUITE(NEON)
98TEST_SUITE(Pooling3dLayer)
99
100// *INDENT-OFF*
101// clang-format off
102DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
103 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(2U, 27U, 13U, 4U, 3U), 1, DataType::F32, DataLayout::NDHWC), // Mismatching data type
104 TensorInfo(TensorShape(2U, 27U, 13U, 4U, 2U), 1, DataType::F32, DataLayout::NDHWC), // Invalid pad/size combination
105 TensorInfo(TensorShape(2U, 27U, 13U, 4U, 2U), 1, DataType::F32, DataLayout::NDHWC), // Invalid pad/size combination
106 TensorInfo(TensorShape(2U, 27U, 13U, 4U, 3U), 1, DataType::F32, DataLayout::NDHWC), // Invalid output shape
107 TensorInfo(TensorShape(5U, 13U, 15U, 2U, 3U), 1, DataType::F32, DataLayout::NDHWC), // Global Pooling
108 TensorInfo(TensorShape(13U,13U, 5U, 1U, 2U), 1, DataType::F32, DataLayout::NDHWC), // Invalid output Global Pooling
109 TensorInfo(TensorShape(5U, 13U, 13U, 4U, 4U), 1, DataType::F32, DataLayout::NDHWC),
110 TensorInfo(TensorShape(5U, 13U, 13U, 4U, 4U), 1, DataType::F32, DataLayout::NDHWC), // Invalid data type
111 TensorInfo(TensorShape(5U, 13U, 13U, 4U, 4U), 1, DataType::F32, DataLayout::NHWC), // Invalid data layout
112 TensorInfo(TensorShape(5U, 13U, 13U, 5U, 4U), 1, DataType::F32, DataLayout::NDHWC),
113 TensorInfo(TensorShape(1U, 16U, 1U, 3U, 4U), 1, DataType::F32, DataLayout::NDHWC),
114 TensorInfo(TensorShape(5U, 13U, 13U, 4U, 3U), 1, DataType::F32, DataLayout::NDHWC),
115 TensorInfo(TensorShape(5U, 13U, 13U, 4U, 2U), 1, DataType::F32, DataLayout::NDHWC),
116 TensorInfo(TensorShape(5U, 13U, 13U, 4U, 3U), 1, DataType::F32, DataLayout::NDHWC),
117 }),
118 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(2U, 25U, 11U, 3U, 3U), 1, DataType::F16, DataLayout::NDHWC),
119 TensorInfo(TensorShape(2U, 30U, 11U, 3U, 2U), 1, DataType::F32, DataLayout::NDHWC),
120 TensorInfo(TensorShape(2U, 25U, 16U, 3U, 2U), 1, DataType::F32, DataLayout::NDHWC),
121 TensorInfo(TensorShape(2U, 27U, 13U, 3U, 3U), 1, DataType::F32, DataLayout::NDHWC),
122 TensorInfo(TensorShape(5U, 1U, 1U, 1U, 3U), 1, DataType::F32, DataLayout::NDHWC), // Global pooling applied
123 TensorInfo(TensorShape(5U, 2U, 2U, 2U, 2U), 1, DataType::F32, DataLayout::NDHWC), // Invalid output Global Pooling
124 TensorInfo(TensorShape(5U, 12U, 12U, 3U, 4U), 1, DataType::F32, DataLayout::NDHWC),
125 TensorInfo(TensorShape(5U, 12U, 12U, 3U, 4U), 1, DataType::QASYMM8, DataLayout::NDHWC), // Invalid data type
126 TensorInfo(TensorShape(5U, 12U, 12U, 3U, 4U), 1, DataType::F32, DataLayout::NDHWC), // Invalid data layout
127 TensorInfo(TensorShape(5U, 1U, 1U, 1U, 4U), 1, DataType::F32, DataLayout::NDHWC),
128 TensorInfo(TensorShape(1U, 15U, 1U, 2U, 4U), 1, DataType::F32, DataLayout::NDHWC), // size larger than height
129 TensorInfo(TensorShape(5U, 6U, 6U, 2U, 3U), 1, DataType::F32, DataLayout::NDHWC),
130 TensorInfo(TensorShape(5U, 6U, 6U, 2U, 2U), 1, DataType::F32, DataLayout::NDHWC),
131 TensorInfo(TensorShape(5U, 6U, 6U, 2U, 3U), 1, DataType::F32, DataLayout::NDHWC),
132 })),
133 framework::dataset::make("PoolInfo", { Pooling3dLayerInfo(PoolingType::AVG, 3, Size3D(1, 1, 1), Padding3D(0, 0, 0)),
134 Pooling3dLayerInfo(PoolingType::AVG, 2, Size3D(1, 1, 1), Padding3D(2, 0, 0)),
135 Pooling3dLayerInfo(PoolingType::AVG, 2, Size3D(1, 1, 1), Padding3D(0, 0, 0)),
136 Pooling3dLayerInfo(PoolingType::L2, 3, Size3D(1, 1, 1), Padding3D(0, 0, 0)),
137 Pooling3dLayerInfo(PoolingType::AVG),
138 Pooling3dLayerInfo(PoolingType::MAX),
139 Pooling3dLayerInfo(PoolingType::AVG, 2, Size3D(), Padding3D(), false),
140 Pooling3dLayerInfo(PoolingType::AVG, 2, Size3D(1U, 1U, 1U), Padding3D(), false),
141 Pooling3dLayerInfo(PoolingType::AVG, 2, Size3D(1U, 1U, 1U), Padding3D(), false),
142 Pooling3dLayerInfo(PoolingType::AVG),
143 Pooling3dLayerInfo(PoolingType::MAX, 2, Size3D(1, 1, 2), Padding3D(0, 0, 0), false),
144 Pooling3dLayerInfo(PoolingType::AVG, 2, Size3D(2U, 2U, 2U), Padding3D(), false),
145 Pooling3dLayerInfo(PoolingType::AVG, 1, Size3D(2U, 2U, 2U), Padding3D(2, 2, 2), true), // pool size is equal to the padding size
146 Pooling3dLayerInfo(PoolingType::AVG, 1, Size3D(2U, 2U, 2U), Padding3D(2, 2, 2), false), // pool size is equal to the padding size
147 Pooling3dLayerInfo(PoolingType::AVG, 3, Size3D(2U, 2U, 2U), Padding3D(2,1,2,2,1,2), false, false, DimensionRoundingType::CEIL), // CEIL with asymmetric Padding
148 })),
149 framework::dataset::make("Expected", { false, false, false, false, true, false, false, false, false, true , false, true, false, false, false})),
150 input_info, output_info, pool_info, expected)
151{
152 bool is_valid = bool(NEPooling3dLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info));
153 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
154}
155// clang-format on
156// *INDENT-ON*
157
158template <typename T>
159using NEPoolingLayer3dFixture = Pooling3dLayerValidationFixture<Tensor, Accessor, NEPooling3dLayer, T>;
160
161template <typename T>
162using NESpecial3dPoolingLayerFixture = SpecialPooling3dLayerValidationFixture<Tensor, Accessor, NEPooling3dLayer, T>;
163
164template <typename T>
165using NEPooling3dLayerGlobalFixture = Pooling3dLayerGlobalValidationFixture<Tensor, Accessor, NEPooling3dLayer, T>;
166
167// clang-format on
168// *INDENT-ON*
169TEST_SUITE(Float)
170TEST_SUITE(FP32)
171
172FIXTURE_DATA_TEST_CASE(RunSpecial, NESpecial3dPoolingLayerFixture<float>, framework::DatasetMode::ALL, datasets::Pooling3dLayerDatasetSpecial() * framework::dataset::make("DataType", DataType::F32))
173{
174 // Validate output
175 validate(Accessor(_target), _reference, tolerance_f32);
176}
177
178FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayer3dFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small5dShapes(), combine(Pooling3dLayerDatasetFPSmall,
179 framework::dataset::make("DataType", DataType::F32))))
180{
181 // Validate output
182 validate(Accessor(_target), _reference, tolerance_f32);
183}
184
185FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayer3dFixture<float>, framework::DatasetMode::NIGHTLY,
186 combine(datasets::Large5dShapes(), combine(Pooling3dLayerDatasetFPSmall, framework::dataset::make("DataType", DataType::F32))))
187{
188 // Validate output
189 validate(Accessor(_target), _reference, tolerance_f32);
190}
191
192TEST_SUITE(GlobalPooling)
193// *INDENT-OFF*
194// clang-format off
195FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayer3dFixture<float>, framework::DatasetMode::ALL,
196 combine(combine(combine(combine(combine(combine(
197 framework::dataset::make("InputShape", { TensorShape(3U, 27U, 13U, 4U),
198 TensorShape(4U, 27U, 13U, 4U, 2U)
199 }),
200 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
201 framework::dataset::make("PoolingSize", { Size3D(27, 13, 4) })),
202 framework::dataset::make("Strides", Size3D(1, 1, 1))),
203 framework::dataset::make("Paddings", Padding3D(0, 0, 0))),
204 framework::dataset::make("ExcludePadding", {false, true})),
205 framework::dataset::make("DataType", DataType::F32)))
206{
207 // Validate output
208 validate(Accessor(_target), _reference, tolerance_f32);
209}
210
211FIXTURE_DATA_TEST_CASE(RunGlobalSmall, NEPooling3dLayerGlobalFixture<float>, framework::DatasetMode::ALL,
212 combine(combine(
213 framework::dataset::make("InputShape", { TensorShape(27U, 13U, 4U, 3U),
214 TensorShape(27U, 13U, 4U, 4U, 2U)
215 }),
216 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
217 framework::dataset::make("DataType", DataType::F32)))
218{
219 // Validate output
220 validate(Accessor(_target), _reference, tolerance_f32);
221}
222
223FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayer3dFixture<float>, framework::DatasetMode::NIGHTLY,
224 combine(combine(combine(combine(combine(combine(
225 framework::dataset::make("InputShape", { TensorShape(4U, 79U, 37U, 11U),
226 TensorShape(4U, 79U, 37U, 11U, 2U)
227 }),
228 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
229 framework::dataset::make("PoolingSize", { Size3D(79, 37, 11) })),
230 framework::dataset::make("Strides", Size3D(1, 1, 1))),
231 framework::dataset::make("Paddings", Padding3D(0, 0, 0))),
232 framework::dataset::make("ExcludePadding", {false, true})),
233 framework::dataset::make("DataType", DataType::F32)))
234{
235 // Validate output
236 validate(Accessor(_target), _reference, tolerance_f32);
237}
238
239TEST_SUITE_END() // GlobalPooling
240TEST_SUITE_END() // FP32
241
242#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
243TEST_SUITE(FP16)
244
245FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayer3dFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small5x5Shapes(), combine(Pooling3dLayerDatasetFPSmall,
246 framework::dataset::make("DataType", DataType::F16))))
247{
248 // Validate output
249 validate(Accessor(_target), _reference, tolerance_f16);
250}
251
252
253FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayer3dFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::Large5dShapes(), combine(Pooling3dLayerDatasetFP,
254 framework::dataset::make("DataType",
255 DataType::F16))))
256{
257 // Validate output
258 validate(Accessor(_target), _reference, tolerance_f16);
259}
260
261TEST_SUITE(GlobalPooling)
262// *INDENT-OFF*
263// clang-format off
264FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayer3dFixture<half>, framework::DatasetMode::ALL,
265 combine(combine(combine(combine(combine(combine(
266 framework::dataset::make("InputShape", { TensorShape(3U, 27U, 13U, 4U),
267 TensorShape(4U, 27U, 13U, 4U, 2U)
268 }),
269 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
270 framework::dataset::make("PoolingSize", { Size3D(27, 13, 4) })),
271 framework::dataset::make("Strides", Size3D(1, 1, 1))),
272 framework::dataset::make("Paddings", Padding3D(0, 0, 0))),
273 framework::dataset::make("ExcludePadding", {false, true})),
274 framework::dataset::make("DataType", DataType::F16)))
275{
276 // Validate output
277 validate(Accessor(_target), _reference, tolerance_f16);
278}
279
280
281FIXTURE_DATA_TEST_CASE(RunSmallGlobal, NEPooling3dLayerGlobalFixture<half>, framework::DatasetMode::ALL,
282 combine(combine(
283 framework::dataset::make("InputShape", { TensorShape(27U, 13U, 4U, 3U),
284 TensorShape(27U, 13U, 4U, 4U, 2U)
285 }),
286 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
287 framework::dataset::make("DataType", DataType::F16)))
288{
289 // Validate output
290 validate(Accessor(_target), _reference, tolerance_f16);
291}
292
293FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayer3dFixture<half>, framework::DatasetMode::NIGHTLY,
294 combine(combine(combine(combine(combine(combine(
295 framework::dataset::make("InputShape", { TensorShape(4U, 79U, 37U, 11U),
296 TensorShape(4U, 79U, 37U, 11U, 2U)
297 }),
298 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
299 framework::dataset::make("PoolingSize", { Size3D(79, 37, 11) })),
300 framework::dataset::make("Strides", Size3D(1, 1, 1))),
301 framework::dataset::make("Paddings", Padding3D(0, 0, 0))),
302 framework::dataset::make("ExcludePadding", false)),
303 framework::dataset::make("DataType", DataType::F16)))
304{
305 // Validate output
306 validate(Accessor(_target), _reference, tolerance_f16);
307}
308
309// clang-format on
310// *INDENT-ON*
311TEST_SUITE_END() // GlobalPooling
312TEST_SUITE_END() // FP16
313#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Adnan AlSinan171fc3d2022-03-15 18:46:42 +0000314TEST_SUITE_END() // Float
Adnan AlSinan9104cd52022-04-06 16:19:31 +0100315TEST_SUITE(Quantized)
316
317template <typename T>
318using NEPooling3dLayerQuantizedFixture = Pooling3dLayerValidationQuantizedFixture<Tensor, Accessor, NEPooling3dLayer, T>;
319
320TEST_SUITE(QASYMM8)
321FIXTURE_DATA_TEST_CASE(RunSmall, NEPooling3dLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small5dShapes(),
322 combine(Pooling3dLayerDatasetQASYMM8Small,
323 framework::dataset::make("DataType", DataType::QASYMM8))),
324 qasymm8_in_qinfo_dataset),
325 qasymm8_out_qinfo_dataset))
326{
327 // Validate output
328 validate(Accessor(_target), _reference, tolerance_qasymm8);
329}
330
331FIXTURE_DATA_TEST_CASE(RunLarge, NEPooling3dLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::Large5dShapes(),
332 combine(Pooling3dLayerDatasetQASYMM8Large,
333 framework::dataset::make("DataType", DataType::QASYMM8))),
334 qasymm8_in_qinfo_dataset),
335 qasymm8_out_qinfo_dataset))
336{
337 // Validate output
338 validate(Accessor(_target), _reference, tolerance_qasymm8);
339}
340
341TEST_SUITE_END() // QASYMM8
342
343TEST_SUITE(QASYMM8_SIGNED)
344
345FIXTURE_DATA_TEST_CASE(RunSmall, NEPooling3dLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small5dShapes(),
346 combine(Pooling3dLayerDatasetQASYMM8Small,
347 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
348 qasymm8_signed_in_qinfo_dataset),
349 qasymm8_signed_out_qinfo_dataset))
350{
351 // Validate output
352 validate(Accessor(_target), _reference, tolerance_qasymm8_s);
353}
354
355TEST_SUITE_END() // QASYMM8_SIGNED
356TEST_SUITE_END() // Quantized
Adnan AlSinan171fc3d2022-03-15 18:46:42 +0000357TEST_SUITE_END() // Pooling3dLayer
358TEST_SUITE_END() // NEON
359} // namespace validation
360} // namespace test
361} // namespace arm_compute