blob: e7e46b7bc5f7dc0c0321de3b3a98f18278f2b1bd [file] [log] [blame]
Pablo Tello4a626a72018-04-04 10:01:14 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
Pablo Tello4a626a72018-04-04 10:01:14 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Pablo Tello4a626a72018-04-04 10:01:14 +010024#include "arm_compute/core/Types.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010025#include "src/core/CL/kernels/CLIm2ColKernel.h"
Pablo Tello4a626a72018-04-04 10:01:14 +010026#include "tests/CL/CLAccessor.h"
Georgios Pinitasc1c366d2020-03-30 18:09:24 +010027#include "tests/CL/Helper.h"
Pablo Tello4a626a72018-04-04 10:01:14 +010028#include "tests/framework/Asserts.h"
29#include "tests/framework/Macros.h"
30#include "tests/framework/datasets/Datasets.h"
31#include "tests/validation/Validation.h"
32#include "tests/validation/fixtures/Im2ColFixture.h"
33
34namespace arm_compute
35{
36namespace test
37{
38namespace validation
39{
Pablo Tello4a626a72018-04-04 10:01:14 +010040TEST_SUITE(CL)
41TEST_SUITE(Im2Col)
42
43using CLIm2Col = CLSynthetizeFunction<CLIm2ColKernel>;
44
Georgios Pinitasc1c366d2020-03-30 18:09:24 +010045/** Negative tests
46 *
47 * A series of validation tests on configurations which according to the API specification
48 * the function should fail against.
49 *
50 * Checks performed in order:
51 * - Pass unsupported data type for input
52 * - Pass a quantized input and ask to compress the bias into the resulting matrix
53 * - Pass a dilation factor of 0
54 * - Check NHWC data layout while requesting to perform a grouped operation
55 * - Check NCHW grouped operation when the number of channels is not multiple of the groups
56 * - Pass an invalid output shape
57 */
58TEST_CASE(Negative, framework::DatasetMode::ALL)
Pablo Tello4a626a72018-04-04 10:01:14 +010059{
Georgios Pinitasc1c366d2020-03-30 18:09:24 +010060 // Unsupported data type
61 {
62 const auto input = TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::SIZET);
63 const auto output = TensorInfo(TensorShape(9U, 10U, 12U, 2U), 1, DataType::F32);
64 const auto conv_size = Size2D(3, 3);
65 const bool has_bias = false;
66 const auto status = CLIm2ColKernel::validate(&input, &output, conv_size, PadStrideInfo(), has_bias);
67 ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
68 }
Pablo Tello4a626a72018-04-04 10:01:14 +010069
Georgios Pinitasc1c366d2020-03-30 18:09:24 +010070 // Passing quantized input and ask to merge the bias in the output
71 {
72 const auto input = TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::QASYMM8);
73 const auto output = TensorInfo(TensorShape(9U, 80U, 2U), 1, DataType::QASYMM8);
74 const auto conv_size = Size2D(3, 3);
75 const bool has_bias = true;
76 const auto status = CLIm2ColKernel::validate(&input, &output, conv_size, PadStrideInfo(), has_bias);
77 ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
78 }
79
80 // Invalid dilation
81 {
82 const auto input = TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32);
83 const auto output = TensorInfo(TensorShape(9U, 80U, 2U), 1, DataType::F32);
84 const auto conv_size = Size2D(3, 3);
85 const auto dilation = Size2D(0, 1);
86 const bool has_bias = false;
87 const auto status = CLIm2ColKernel::validate(&input, &output, conv_size, PadStrideInfo(), has_bias, dilation);
88 ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
89 }
90
91 // NHWC and grouping greater than 1
92 {
93 const auto input = TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32, DataLayout::NHWC);
94 const auto output = TensorInfo(TensorShape(9U, 80U, 2U), 1, DataType::F32);
95 const auto conv_size = Size2D(3, 3);
96 const auto dilation = Size2D(1, 1);
97 const bool has_bias = false;
98 const unsigned int num_groups = 2;
99 const auto status = CLIm2ColKernel::validate(&input, &output, conv_size, PadStrideInfo(), has_bias, dilation, num_groups);
100 ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
101 }
102
103 // NCWH and channels % num_groups !=0
104 {
105 const auto input = TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32, DataLayout::NCHW);
106 const auto output = TensorInfo(TensorShape(9U, 80U, 2U), 1, DataType::F32);
107 const auto conv_size = Size2D(3, 3);
108 const auto dilation = Size2D(1, 1);
109 const bool has_bias = false;
110 const unsigned int num_groups = 2;
111 const auto status = CLIm2ColKernel::validate(&input, &output, conv_size, PadStrideInfo(), has_bias, dilation, num_groups);
112 ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
113 }
114
115 // Invalid output shape
116 {
117 const auto input = TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32);
118 const auto output = TensorInfo(TensorShape(9U, 81U, 2U), 1, DataType::F32);
119 const auto conv_size = Size2D(3, 3);
120 const bool has_bias = false;
121 const auto status = CLIm2ColKernel::validate(&input, &output, conv_size, PadStrideInfo(), has_bias);
122 ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
123 }
SiCong Lif650ea52020-08-05 15:04:00 +0100124
125 // Kernel dimensions are too big
126 {
127 const auto input = TensorInfo(TensorShape(1U, 9U, 5U, 2U), 1, DataType::F32, DataLayout::NHWC);
128 const auto output = TensorInfo(TensorShape(1U, 1U, 1U, 2U), 1, DataType::F32, DataLayout::NHWC);
129 const auto conv_size = Size2D(9, 9);
130 const bool has_bias = false;
131 const auto status = CLIm2ColKernel::validate(&input, &output, conv_size, PadStrideInfo(), has_bias);
132 ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
133 }
Pablo Tello4a626a72018-04-04 10:01:14 +0100134}
Pablo Tello4a626a72018-04-04 10:01:14 +0100135
136template <typename T>
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100137using CLIm2ColFixture = Im2ColValidationFixture<CLTensor, CLAccessor, CLIm2Col, T, true>;
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100138
139TEST_SUITE(NHWC)
140
SiCong Lif650ea52020-08-05 15:04:00 +0100141/** Test that there's no padding added to input or output as part of configure
142 *
143 * @note 2 elements processed per iteration
144 *
145 * Three tests will be run:
146 * - Channels are multiple of elements processed
147 * - Channels larger and non multiple of elements used
148 * - Channels smaller and not multiple of elements used
149 *
150 */
151DATA_TEST_CASE(ValidateZeroPaddingNumElemsPerIterEqual2, framework::DatasetMode::ALL,
152 combine(combine(combine(combine(combine(
153 framework::dataset::make("InputChannel",
154{
155 2, 9, 1,
156}),
157framework::dataset::make("DataType", { DataType::F32 })),
158framework::dataset::make("Kernel", { Size2D(3, 4) })),
159framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 1, 2) })),
160framework::dataset::make("QInfo", { QuantizationInfo() })),
161framework::dataset::make("DataLayout", { DataLayout::NHWC })),
162input_channel, data_type, conv_size, pad_stride_info, qinfo, data_layout)
163{
164 TensorShape input_shape(input_channel, 10U, 30U, 3U);
165 const bool has_bias = false;
166
167 const auto input_info = TensorInfo(input_shape, 1, data_type, data_layout);
168 const auto output_shape = compute_im2col_conv_shape(&input_info, conv_size, pad_stride_info, has_bias, Size2D(1U, 1U), true);
169
170 CLTensor input = create_tensor<CLTensor>(input_shape, data_type, 1, qinfo, data_layout);
171 CLTensor output = create_tensor<CLTensor>(output_shape, data_type, 1, qinfo, data_layout);
172
173 CLIm2ColKernel im2col;
174 im2col.configure(&input, &output, conv_size, pad_stride_info, has_bias);
175
176 // Ensure there're no paddings added at all
177 const bool no_padding = input.info()->padding().empty() && output.info()->padding().empty();
178 ARM_COMPUTE_EXPECT(no_padding, framework::LogLevel::ERRORS);
179}
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100180/** Test special kernel used for NHWC for 3x3 kernels
181 *
182 * @note 2 elements processed per iteration
183 *
184 * Three tests will be run:
185 * - Channels are multiple of elements processed
186 * - Channels larger and non multiple of elements used
187 * - Channels smaller and not multiple of elements used
188 *
189 * Kernel tested im2col3x3_nhwc
190 */
191FIXTURE_DATA_TEST_CASE(W3x3,
192 CLIm2ColFixture<float>,
193 framework::DatasetMode::ALL,
194 combine(combine(combine(combine(combine(combine(
195 framework::dataset::make("InputShape",
196{
197 TensorShape(2U, 5U, 7U, 2U), TensorShape(3U, 4U, 6U, 2U), TensorShape(1U, 5U, 3U, 2U),
198}),
199framework::dataset::make("DataType", DataType::F32)),
200framework::dataset::make("Kernel", Size2D(3, 3))),
SiCong Lif650ea52020-08-05 15:04:00 +0100201framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 2), PadStrideInfo(1, 1, 0, 0) })),
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100202framework::dataset::make("QInfo", QuantizationInfo())),
203framework::dataset::make("DataLayout", DataLayout::NHWC)),
204framework::dataset::make("Groups", 1)))
Pablo Tello4a626a72018-04-04 10:01:14 +0100205{
206 // Validate output
207 validate(CLAccessor(_target), _reference);
208}
Pablo Tello4a626a72018-04-04 10:01:14 +0100209
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100210/** Test special kernel used for NHWC for 9x9 kernels
211 *
212 * @note 2 elements processed per iteration
213 *
214 * Three tests will be run:
215 * - Channels are multiple of elements processed
216 * - Channels larger and non multiple of elements used
217 * - Channels smaller and not multiple of elements used
218 *
219 * Kernel tested im2col9x9_nhwc
220 */
221FIXTURE_DATA_TEST_CASE(W9x9,
222 CLIm2ColFixture<float>,
223 framework::DatasetMode::ALL,
224 combine(combine(combine(combine(combine(combine(
225 framework::dataset::make("InputShape",
226{
SiCong Lif650ea52020-08-05 15:04:00 +0100227 TensorShape(2U, 13U, 15U, 2U), TensorShape(3U, 15U, 12U, 2U), TensorShape(1U, 13U, 22U, 2U),
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100228}),
229framework::dataset::make("DataType", DataType::F32)),
230framework::dataset::make("Kernel", Size2D(9, 9))),
SiCong Lif650ea52020-08-05 15:04:00 +0100231framework::dataset::make("PadStride", { PadStrideInfo(2, 2, 1, 2), PadStrideInfo(1, 1, 0, 0) })),
232framework::dataset::make("QInfo", QuantizationInfo())),
233framework::dataset::make("DataLayout", DataLayout::NHWC)),
234framework::dataset::make("Groups", 1)))
235{
236 // Validate output
237 validate(CLAccessor(_target), _reference);
238}
239
240/** Test generic kernel used for NHWC
241 *
242 * @note 2 elements processed per iteration
243 *
244 * Three tests will be run:
245 * - Channels are multiple of elements processed
246 * - Channels larger and non multiple of elements used
247 * - Channels smaller and not multiple of elements used
248 *
249 * Kernel tested im2col_generic_nhwc
250 */
251FIXTURE_DATA_TEST_CASE(Generic,
252 CLIm2ColFixture<float>,
253 framework::DatasetMode::ALL,
254 combine(combine(combine(combine(combine(combine(
255 framework::dataset::make("InputShape",
256{
257 TensorShape(4U, 13U, 15U, 2U), TensorShape(7U, 15U, 12U, 1U), TensorShape(1U, 5U, 3U, 1U),
258}),
259framework::dataset::make("DataType", DataType::F32)),
260framework::dataset::make("Kernel", Size2D(5, 3))),
261framework::dataset::make("PadStride", { PadStrideInfo(2, 2, 1, 2), PadStrideInfo(1, 1, 0, 0) })),
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100262framework::dataset::make("QInfo", QuantizationInfo())),
263framework::dataset::make("DataLayout", DataLayout::NHWC)),
264framework::dataset::make("Groups", 1)))
Pablo Tello4a626a72018-04-04 10:01:14 +0100265{
266 // Validate output
267 validate(CLAccessor(_target), _reference);
268}
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100269TEST_SUITE_END() // NHWC
Pablo Tello4a626a72018-04-04 10:01:14 +0100270
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100271TEST_SUITE(NCHW)
Pablo Tello4a626a72018-04-04 10:01:14 +0100272
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100273/** Test special kernel used for NCHW for 1x1 kernels with stride 1 and no padding
274 *
275 * @note 4 elements processed per iteration
276 *
277 * Three tests will be run:
278 * - Channels are multiple of elements processed
279 * - Channels larger and non multiple of elements used
280 * - Channels smaller and not multiple of elements used
281 *
282 * Kernel tested im2col1x1_stridex1_nchw
283 */
284FIXTURE_DATA_TEST_CASE(W1x1_Stride1_NoPad,
285 CLIm2ColFixture<float>,
286 framework::DatasetMode::ALL,
287 combine(combine(combine(combine(combine(combine(
288 framework::dataset::make("InputShape", { TensorShape(4U, 4U, 3U, 2U), TensorShape(5U, 4U, 3U, 2U), TensorShape(3U, 4U, 3U, 2U) }),
289 framework::dataset::make("DataType", DataType::F32)),
290 framework::dataset::make("Kernel", Size2D(1, 1))),
291 framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
292 framework::dataset::make("QInfo", QuantizationInfo())),
293 framework::dataset::make("DataLayout", DataLayout::NCHW)),
294 framework::dataset::make("Groups", 1)))
Giorgio Arena0f170392018-07-18 16:13:12 +0100295{
296 // Validate output
297 validate(CLAccessor(_target), _reference);
298}
299
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100300/** Test special kernel used for NCHW for 3x3 kernels
301 *
302 * @note 1 elements processed per iteration
303 *
304 * Executed single test as padding is required.
305 *
306 * Kernel tested im2col3x3_nchw
307 */
308FIXTURE_DATA_TEST_CASE(W3x3,
309 CLIm2ColFixture<float>,
310 framework::DatasetMode::ALL,
311 combine(combine(combine(combine(combine(combine(
312 framework::dataset::make("InputShape", TensorShape(4U, 4U, 3U, 2U)),
313 framework::dataset::make("DataType", DataType::F32)),
314 framework::dataset::make("Kernel", Size2D(3, 3))),
315 framework::dataset::make("PadStride", PadStrideInfo(1, 2, 1, 2))),
316 framework::dataset::make("QInfo", QuantizationInfo())),
317 framework::dataset::make("DataLayout", DataLayout::NCHW)),
318 framework::dataset::make("Groups", { 1, 3 })))
Giorgio Arena0f170392018-07-18 16:13:12 +0100319{
320 // Validate output
321 validate(CLAccessor(_target), _reference);
322}
323
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100324/** Test special kernel used for NCHW for 5x5 kernels
325 *
326 * @note 1 elements processed per iteration
327 *
328 * Executed single test as padding is required.
329 *
330 * Kernel tested im2col5x5_nchw
331 */
332FIXTURE_DATA_TEST_CASE(W5x5,
333 CLIm2ColFixture<float>,
334 framework::DatasetMode::ALL,
335 combine(combine(combine(combine(combine(combine(
336 framework::dataset::make("InputShape", TensorShape(7U, 4U, 3U, 2U)),
337 framework::dataset::make("DataType", DataType::F32)),
338 framework::dataset::make("Kernel", Size2D(5, 5))),
339 framework::dataset::make("PadStride", PadStrideInfo(2, 1, 2, 1))),
340 framework::dataset::make("QInfo", QuantizationInfo())),
341 framework::dataset::make("DataLayout", DataLayout::NCHW)),
342 framework::dataset::make("Groups", { 1, 3 })))
Giorgio Arena0f170392018-07-18 16:13:12 +0100343{
344 // Validate output
345 validate(CLAccessor(_target), _reference);
346}
347
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100348/** Test special kernel used for NCHW for 11x11 kernels when no padding present
349 *
350 * @note 1 elements processed per iteration
351 *
352 * Two tests will be run:
353 * - Without padding requirements
354 * - With padding requirements
355 *
356 * Kernel tested im2col11x11_padx0_pady0_nchw
357 */
358FIXTURE_DATA_TEST_CASE(W11x11_NoPad,
359 CLIm2ColFixture<float>,
360 framework::DatasetMode::ALL,
361 combine(combine(combine(combine(combine(combine(
362 framework::dataset::make("InputShape", { TensorShape(11U, 11U, 2U, 2U), TensorShape(14U, 13U, 1U, 2U) }),
363 framework::dataset::make("DataType", DataType::F32)),
364 framework::dataset::make("Kernel", Size2D(11, 11))),
365 framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
366 framework::dataset::make("QInfo", QuantizationInfo())),
367 framework::dataset::make("DataLayout", DataLayout::NCHW)),
368 framework::dataset::make("Groups", 1)))
Giorgio Arena0f170392018-07-18 16:13:12 +0100369{
370 // Validate output
371 validate(CLAccessor(_target), _reference);
372}
Giorgio Arena0f170392018-07-18 16:13:12 +0100373
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100374/** Test special kernel used for NCHW for kernels which do not fall in the categories above and have no padding present
375 *
376 * @note 1 elements processed per iteration
377 *
378 * Executed single test as padding is required.
379 *
380 * Kernel tested im2col_generic_padx0_pady0_nchw
381 */
382FIXTURE_DATA_TEST_CASE(GenericZeroPad,
383 CLIm2ColFixture<float>,
384 framework::DatasetMode::ALL,
385 combine(combine(combine(combine(combine(combine(
386 framework::dataset::make("InputShape", TensorShape(13U, 11U, 2U, 2U)),
387 framework::dataset::make("DataType", DataType::F32)),
388 framework::dataset::make("Kernel", Size2D(3, 2))),
389 framework::dataset::make("PadStride", PadStrideInfo(2, 1, 0, 0))),
390 framework::dataset::make("QInfo", QuantizationInfo())),
391 framework::dataset::make("DataLayout", DataLayout::NCHW)),
392 framework::dataset::make("Groups", { 1, 2 })))
393{
394 // Validate output
395 validate(CLAccessor(_target), _reference);
396}
397TEST_SUITE_END() // NCHW
398
399/** Generic NCHW/NHWC kernel
400 *
401 * @note 1 elements processed per iteration
402 *
403 * Padding is not needed thus executed sample tests with different kernels sizes
404 * and stride/padding information
405 *
406 * Kernel tested im2col_generic_(nchw|nhwc)
407 */
408FIXTURE_DATA_TEST_CASE(Generic,
409 CLIm2ColFixture<float>,
410 framework::DatasetMode::ALL,
411 combine(combine(combine(combine(combine(combine(
SiCong Lif650ea52020-08-05 15:04:00 +0100412 framework::dataset::make("InputShape", TensorShape(13U, 11U, 5U, 2U)),
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100413 framework::dataset::make("DataType", DataType::F32)),
414 framework::dataset::make("Kernel", { Size2D(3, 2), Size2D(3, 5) })),
415 framework::dataset::make("PadStride", PadStrideInfo(2, 1, 2, 1))),
416 framework::dataset::make("QInfo", QuantizationInfo())),
417 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
418 framework::dataset::make("Groups", 1)))
419{
420 // Validate output
421 validate(CLAccessor(_target), _reference);
422}
423
424/** Tests to check that quantized padding value is set correctly
425 *
426 * Kernels tested:
427 * - im2col_generic_nhwc
428 * - im2col_generic_nchw
429 * - im2col5x5_nchw
430 * - im2col3x3_nhwc
431 * - im2col3x3_nchw
432 * - im2col9x9_nhwc
433 */
434FIXTURE_DATA_TEST_CASE(Quantized,
435 CLIm2ColFixture<uint8_t>,
436 framework::DatasetMode::ALL,
437 combine(combine(combine(combine(combine(combine(
SiCong Lif650ea52020-08-05 15:04:00 +0100438 framework::dataset::make("InputShape", TensorShape(13U, 11U, 11U, 2U)),
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100439 framework::dataset::make("DataType", DataType::QASYMM8)),
440 framework::dataset::make("Kernel", { Size2D(1, 1), Size2D(3, 3), Size2D(5, 5), Size2D(3, 5), Size2D(9, 9) })),
441 framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
442 framework::dataset::make("QInfo", QuantizationInfo(0.5f, 10))),
443 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
444 framework::dataset::make("Groups", 1)))
445{
446 // Validate output
447 validate(CLAccessor(_target), _reference);
448}
449
450/** Tests to check that half-precision execution
451 *
452 * Kernels tested:
453 * - im2col_generic_nhwc
454 * - im2col_generic_nchw
455 * - im2col5x5_nchw
456 * - im2col3x3_nhwc
457 * - im2col3x3_nchw
458 * - im2col9x9_nhwc
459 */
460FIXTURE_DATA_TEST_CASE(FP16,
461 CLIm2ColFixture<half>,
462 framework::DatasetMode::ALL,
463 combine(combine(combine(combine(combine(combine(
SiCong Lif650ea52020-08-05 15:04:00 +0100464 framework::dataset::make("InputShape", TensorShape(13U, 11U, 11U, 2U)),
Georgios Pinitasc1c366d2020-03-30 18:09:24 +0100465 framework::dataset::make("DataType", DataType::F16)),
466 framework::dataset::make("Kernel", { Size2D(1, 1), Size2D(3, 3), Size2D(5, 5), Size2D(3, 5), Size2D(9, 9) })),
467 framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
468 framework::dataset::make("QInfo", QuantizationInfo())),
469 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
470 framework::dataset::make("Groups", 1)))
471{
472 // Validate output
473 validate(CLAccessor(_target), _reference);
474}
475
476TEST_SUITE_END() // Im2Col
477TEST_SUITE_END() // CL
Pablo Tello4a626a72018-04-04 10:01:14 +0100478} // namespace validation
479} // namespace test
480} // namespace arm_compute