blob: 5c96cd4c59c296daaeebcefc82cb412aa0e7835e [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Gian Marco20d78482018-01-11 15:10:58 +00002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000028#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010029#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010031#include "tests/datasets/LargeConvolutionLayerDataset.h"
32#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000033#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010034#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010039
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46namespace
47{
Georgios Pinitas5e207532018-04-27 14:38:16 +010048constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
steniu01f81652d2017-09-11 15:29:12 +010049RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
50RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
Chunosov5124be52017-11-22 20:42:13 +070051constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
steniu01f81652d2017-09-11 15:29:12 +010052constexpr float tolerance_num = 0.07f; /**< Tolerance number */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010053
54/** CNN data types */
55const auto CNNDataTypes = framework::dataset::make("DataType",
56{
57 DataType::F16,
58 DataType::F32,
Chunosov5124be52017-11-22 20:42:13 +070059 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010060});
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010061
62/** Grouped CNN data types */
63const auto GroupedCNNDataTypes = framework::dataset::make("DataType",
64{
65 DataType::F16,
66 DataType::F32
67});
68
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000069const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
70{
71 ActivationLayerInfo(),
72 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
73 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
74 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
75});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010076} // namespace
77
78TEST_SUITE(CL)
79TEST_SUITE(ConvolutionLayer)
80
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010081DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010082 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
83 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
84 TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32),
85 TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32),
86 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
87 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32),
88 TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32),
89 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010090 }),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010091 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
92 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
93 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
94 TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
95 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
96 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
97 TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
98 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010099 })),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100100 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
101 TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
102 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
103 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
104 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
105 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
106 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
107 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100108 })),
109 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
110 PadStrideInfo(1, 2, 1, 1),
111 PadStrideInfo(1, 1, 0, 0),
Gian Marco Iodicea8aef292018-05-14 14:21:39 +0100112 PadStrideInfo(1, 1, 0, 0),
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100113 PadStrideInfo(2, 1, 0, 0),
114 PadStrideInfo(3, 2, 1, 0),
115 PadStrideInfo(1, 1, 2, 2),
116 PadStrideInfo(1, 1, 2, 2)
117 })),
118 framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
119 GPUTarget::MIDGARD,
120 GPUTarget::G71,
Gian Marco Iodicea8aef292018-05-14 14:21:39 +0100121 GPUTarget::G71,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100122 GPUTarget::MIDGARD,
123 GPUTarget::BIFROST,
124 GPUTarget::BIFROST,
125 GPUTarget::BIFROST
126 })),
127 framework::dataset::make("Dilation",
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000128{
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100129 Size2D(1U, 1U),
130 Size2D(1U, 1U),
131 Size2D(1U, 1U),
132 Size2D(1U, 1U),
133 Size2D(1U, 1U),
134 Size2D(1U, 1U),
Gian Marco Iodicea8aef292018-05-14 14:21:39 +0100135 Size2D(1U, 1U),
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100136 Size2D(2U, 1U),
137})),
Gian Marco Iodicea8aef292018-05-14 14:21:39 +0100138framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true })),
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100139framework::dataset::make("Expected",
140{
Gian Marco Iodicea8aef292018-05-14 14:21:39 +0100141 ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100142})),
143input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
144{
145 ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
146 &weights_info.clone()->set_is_resizable(true),
147 &output_info.clone()->set_is_resizable(true), conv_info,
148 WeightsInfo(),
149 ActivationLayerInfo(),
150 gpu_target,
151 dilation,
152 enable_fast_math);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000153 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
154}
155TEST_SUITE_END()
156
157TEST_SUITE(GEMMConvolutionLayer)
158
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000159DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
160 CNNDataTypes),
161 ActivationFunctionsDataset),
162 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100163{
Chunosov5124be52017-11-22 20:42:13 +0700164 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
165
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100166 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100167 CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
168 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
169 CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
170 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100171
172 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
173 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
174 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
175 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
176
Chunosov5124be52017-11-22 20:42:13 +0700177 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
178 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
179
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100180 // Create and configure function
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000181 CLGEMMConvolutionLayer conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000182 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100183
184 // Validate valid region
185 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
186 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
187 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
188 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
189
190 validate(src.info()->valid_region(), src_valid_region);
191 validate(weights.info()->valid_region(), weights_valid_region);
192 validate(bias.info()->valid_region(), bias_valid_region);
193 validate(dst.info()->valid_region(), dst_valid_region);
194
Chunosov5124be52017-11-22 20:42:13 +0700195 // Validate QuantizationInfo
196 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
197 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
198
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100199 // Validate padding
200 //TODO(COMPMID-415) Need to validate padding?
201}
202
203template <typename T>
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000204using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100205
206TEST_SUITE(Float)
207TEST_SUITE(FP16)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000208
Michalis Spyroue2503892018-04-23 15:17:31 +0100209FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000210 framework::dataset::make("ReshapeWeights", { true })),
211 framework::dataset::make("DataType",
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000212 DataType::F16)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100213 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000214 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100215{
216 // Validate output
steniu013e05e4e2017-08-25 17:18:01 +0100217 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100218}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000219
Michalis Spyroue2503892018-04-23 15:17:31 +0100220FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000221 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000222 framework::dataset::make("DataType",
223 DataType::F16)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100224 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000225 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100226{
227 // Validate output
steniu013e05e4e2017-08-25 17:18:01 +0100228 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100229}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100230TEST_SUITE_END() // FP16
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100231
232TEST_SUITE(FP32)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000233
Michalis Spyroue2503892018-04-23 15:17:31 +0100234FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000235 framework::dataset::make("ReshapeWeights", { true })),
236 framework::dataset::make("DataType",
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000237 DataType::F32)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100238 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000239 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100240{
241 // Validate output
242 validate(CLAccessor(_target), _reference, tolerance_f32);
243}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000244
Michalis Spyroue2503892018-04-23 15:17:31 +0100245FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000246 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000247 framework::dataset::make("DataType",
248 DataType::F32)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100249 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000250 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100251{
252 // Validate output
Georgios Pinitas5e207532018-04-27 14:38:16 +0100253 validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, absolute_tolerance_float);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100254}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100255TEST_SUITE_END() // FP32
256TEST_SUITE_END() // Float
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100257
258template <typename T>
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000259using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Chunosov5124be52017-11-22 20:42:13 +0700260
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000261const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
262{
263 ActivationLayerInfo(),
264 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
265 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
266});
267
Chunosov5124be52017-11-22 20:42:13 +0700268TEST_SUITE(Quantized)
269TEST_SUITE(QASYMM8)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000270
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100271FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Chunosov5124be52017-11-22 20:42:13 +0700272 framework::dataset::make("ReshapeWeights", { true })),
273 framework::dataset::make("DataType", DataType::QASYMM8)),
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100274 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000275 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
276 QuantizedActivationFunctionsDataset))
Chunosov5124be52017-11-22 20:42:13 +0700277{
278 // Validate output
279 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
280}
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100281FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000282 framework::dataset::make("ReshapeWeights", { true })),
283 framework::dataset::make("DataType", DataType::QASYMM8)),
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100284 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000285 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })),
286 QuantizedActivationFunctionsDataset))
Chunosov5124be52017-11-22 20:42:13 +0700287{
288 // Validate output
289 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100290}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100291TEST_SUITE_END() // QASYMM8
292TEST_SUITE_END() // Quantized
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100293
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100294TEST_SUITE_END() // GEMMConvolutionLayer
295
296template <typename T>
297using CLGEMMGroupedConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
298
299TEST_SUITE(GroupedGEMMConvolutionLayer)
300
301DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
302 GroupedCNNDataTypes),
303 ActivationFunctionsDataset),
304 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
305{
306 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
307
308 // The number of groups is calculated dividing the number of input channels of the input tensor by the number of input channels of the weights shape
309 const int num_groups = input_shape[2] / weights_shape[2];
310
311 // Create tensors
312 CLTensor src = create_tensor<CLTensor>(input_shape, data_type);
313 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1);
314 CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1);
315 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
316
317 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
318 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
319 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
320 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
321
322 // Create and configure function
323 CLGEMMConvolutionLayer conv;
324 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info, num_groups);
325
326 // Validate valid region
327 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
328 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
329 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
330 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
331
332 validate(src.info()->valid_region(), src_valid_region);
333 validate(weights.info()->valid_region(), weights_valid_region);
334 validate(bias.info()->valid_region(), bias_valid_region);
335 validate(dst.info()->valid_region(), dst_valid_region);
336
337 // Validate padding
338 //TODO(COMPMID-415) Need to validate padding?
339}
340
341TEST_SUITE(Float)
342TEST_SUITE(FP32)
343
344FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
345 framework::dataset::make("ReshapeWeights", { true })),
346 framework::dataset::make("DataType", DataType::F32)),
347 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
348 ActivationFunctionsDataset))
349{
350 // Validate output
351 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
352}
353
354FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
355 framework::dataset::make("ReshapeWeights", { true })),
356 framework::dataset::make("DataType", DataType::F32)),
357 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
358 ActivationFunctionsDataset))
359{
360 // Validate output
361 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
362}
363TEST_SUITE_END() // FP32
364
365TEST_SUITE(FP16)
366
367FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
368 framework::dataset::make("ReshapeWeights", { true })),
369 framework::dataset::make("DataType", DataType::F16)),
370 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
371 ActivationFunctionsDataset))
372{
373 // Validate output
374 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
375}
376
377FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
378 framework::dataset::make("ReshapeWeights", { true })),
379 framework::dataset::make("DataType", DataType::F16)),
380 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
381 ActivationFunctionsDataset))
382{
383 // Validate output
384 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
385}
386TEST_SUITE_END() // FP16
387TEST_SUITE_END() // Float
388
389TEST_SUITE_END() // GroupedGEMMConvolutionLayer
390TEST_SUITE_END() // CL
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100391} // namespace validation
392} // namespace test
393} // namespace arm_compute