blob: 5ee5d849c36127ea3778f29b359383f9dc6007ac [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Michalis Spyrou80943252019-01-10 17:19:50 +00002 * Copyright (c) 2017-2019 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
Michalis Spyrou80943252019-01-10 17:19:50 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010022 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000028#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010029#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010031#include "tests/datasets/LargeConvolutionLayerDataset.h"
32#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000033#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010034#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010039
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46namespace
47{
Georgios Pinitas5e207532018-04-27 14:38:16 +010048constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
Georgios Pinitas8be91482019-03-26 17:23:28 +000049RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
steniu01f81652d2017-09-11 15:29:12 +010050RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
Georgios Pinitas51e53a32018-10-22 13:49:08 +010051constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
steniu01f81652d2017-09-11 15:29:12 +010052constexpr float tolerance_num = 0.07f; /**< Tolerance number */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010053
54/** CNN data types */
55const auto CNNDataTypes = framework::dataset::make("DataType",
56{
57 DataType::F16,
58 DataType::F32,
Chunosov5124be52017-11-22 20:42:13 +070059 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010060});
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010061
62/** Grouped CNN data types */
63const auto GroupedCNNDataTypes = framework::dataset::make("DataType",
64{
65 DataType::F16,
66 DataType::F32
67});
68
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000069const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
70{
71 ActivationLayerInfo(),
72 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
73 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
74 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
75});
Michalis Spyrou80943252019-01-10 17:19:50 +000076const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
77{
78 ActivationLayerInfo(),
79 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
80});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010081} // namespace
82
83TEST_SUITE(CL)
84TEST_SUITE(ConvolutionLayer)
85
Michalis Spyrou80943252019-01-10 17:19:50 +000086// *INDENT-OFF*
87// clang-format off
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010088DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
Michalis Spyrou80943252019-01-10 17:19:50 +000089 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
90 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
91 TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM
92 TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD
93 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), // Select GEMM
94 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32), // Select GEMM
95 TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32), // Select WINOGRAD
96 TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32) // Select GEMM
97 }),
98 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
99 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
100 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
101 TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
102 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
103 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
104 TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
105 TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32)
106 })),
107 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
108 TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
109 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
110 TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
111 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
112 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
113 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
114 TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32)
115 })),
116 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
117 PadStrideInfo(1, 2, 1, 1),
118 PadStrideInfo(1, 1, 0, 0),
119 PadStrideInfo(1, 1, 0, 0),
120 PadStrideInfo(2, 1, 0, 0),
121 PadStrideInfo(3, 2, 1, 0),
122 PadStrideInfo(1, 1, 2, 2),
123 PadStrideInfo(1, 1, 2, 2)
124 })),
125 framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
126 GPUTarget::MIDGARD,
127 GPUTarget::G71,
128 GPUTarget::G71,
129 GPUTarget::MIDGARD,
130 GPUTarget::BIFROST,
131 GPUTarget::BIFROST,
132 GPUTarget::BIFROST
133 })),
134 framework::dataset::make("Dilation", { Size2D(1U, 1U),
135 Size2D(1U, 1U),
136 Size2D(1U, 1U),
137 Size2D(1U, 1U),
138 Size2D(1U, 1U),
139 Size2D(1U, 1U),
140 Size2D(1U, 1U),
141 Size2D(2U, 1U),
142 })),
143 framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true })),
144 framework::dataset::make("Expected",{ ConvolutionMethod::GEMM,
145 ConvolutionMethod::GEMM,
146 ConvolutionMethod::GEMM,
147 ConvolutionMethod::WINOGRAD,
148 ConvolutionMethod::GEMM,
149 ConvolutionMethod::GEMM,
150 ConvolutionMethod::WINOGRAD,
151 ConvolutionMethod::GEMM,
152 })),
153 input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100154{
155 ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
156 &weights_info.clone()->set_is_resizable(true),
157 &output_info.clone()->set_is_resizable(true), conv_info,
158 WeightsInfo(),
159 ActivationLayerInfo(),
160 gpu_target,
161 dilation,
162 enable_fast_math);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000163 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
164}
Michalis Spyrou80943252019-01-10 17:19:50 +0000165// clang-format on
166// *INDENT-ON*
167TEST_SUITE_END() // ConvolutionLayer
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000168
169TEST_SUITE(GEMMConvolutionLayer)
170
Michalis Spyrou80943252019-01-10 17:19:50 +0000171DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallConvolutionLayerDataset(),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000172 CNNDataTypes),
173 ActivationFunctionsDataset),
174 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100175{
Chunosov5124be52017-11-22 20:42:13 +0700176 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
177
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100178 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100179 CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
180 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
181 CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
182 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100183
184 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
185 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
186 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
187 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
188
Chunosov5124be52017-11-22 20:42:13 +0700189 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
190 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
191
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100192 // Create and configure function
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000193 CLGEMMConvolutionLayer conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000194 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100195
196 // Validate valid region
197 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
198 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
199 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
200 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
201
202 validate(src.info()->valid_region(), src_valid_region);
203 validate(weights.info()->valid_region(), weights_valid_region);
204 validate(bias.info()->valid_region(), bias_valid_region);
205 validate(dst.info()->valid_region(), dst_valid_region);
206
Chunosov5124be52017-11-22 20:42:13 +0700207 // Validate QuantizationInfo
208 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
209 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
210
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100211 // Validate padding
212 //TODO(COMPMID-415) Need to validate padding?
213}
214
215template <typename T>
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000216using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100217
218TEST_SUITE(Float)
219TEST_SUITE(FP16)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000220
Michalis Spyrou80943252019-01-10 17:19:50 +0000221FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000222 framework::dataset::make("ReshapeWeights", { true })),
223 framework::dataset::make("DataType",
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000224 DataType::F16)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100225 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000226 ActivationFunctionsSmallDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100227{
228 // Validate output
steniu013e05e4e2017-08-25 17:18:01 +0100229 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100230}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000231
Michalis Spyrou80943252019-01-10 17:19:50 +0000232FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY,
Michalis Spyrou618451d2019-01-18 16:32:25 +0000233 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
Michalis Spyrou80943252019-01-10 17:19:50 +0000234 framework::dataset::make("ReshapeWeights", { true })),
235 framework::dataset::make("DataType",
236 DataType::F16)),
237 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
238 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100239{
240 // Validate output
steniu013e05e4e2017-08-25 17:18:01 +0100241 validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100242}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100243TEST_SUITE_END() // FP16
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100244
245TEST_SUITE(FP32)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000246
Michalis Spyrou80943252019-01-10 17:19:50 +0000247FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000248 framework::dataset::make("ReshapeWeights", { true })),
249 framework::dataset::make("DataType",
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000250 DataType::F32)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100251 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000252 ActivationFunctionsSmallDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100253{
254 // Validate output
255 validate(CLAccessor(_target), _reference, tolerance_f32);
256}
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000257
Michalis Spyrou80943252019-01-10 17:19:50 +0000258FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
Michalis Spyrou618451d2019-01-18 16:32:25 +0000259 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
Michalis Spyrou80943252019-01-10 17:19:50 +0000260 framework::dataset::make("ReshapeWeights", { true })),
261 framework::dataset::make("DataType",
262 DataType::F32)),
263 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
264 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100265{
266 // Validate output
Georgios Pinitas5e207532018-04-27 14:38:16 +0100267 validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, absolute_tolerance_float);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100268}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100269TEST_SUITE_END() // FP32
270TEST_SUITE_END() // Float
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100271
272template <typename T>
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000273using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000274template <typename T>
275using CLGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, int8_t>;
Chunosov5124be52017-11-22 20:42:13 +0700276
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000277const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
278{
279 ActivationLayerInfo(),
280 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
281 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
282});
Michalis Spyrou80943252019-01-10 17:19:50 +0000283const auto QuantizedActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
284{
285 ActivationLayerInfo(),
286 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
287});
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000288
Chunosov5124be52017-11-22 20:42:13 +0700289TEST_SUITE(Quantized)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000290
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000291const auto QuantizationData = framework::dataset::make("QuantizationInfo",
292{
293 QuantizationInfo(0.5f, 10),
294 QuantizationInfo(0.3f, 3),
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100295 QuantizationInfo(1.1f, 10),
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000296});
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000297TEST_SUITE(QASYMM8)
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000298
Michalis Spyrou80943252019-01-10 17:19:50 +0000299FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
300 combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
301 framework::dataset::make("ReshapeWeights", { true })),
302 framework::dataset::make("DataType", DataType::QASYMM8)),
303 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
304 QuantizationData),
305 QuantizedActivationFunctionsSmallDataset))
Chunosov5124be52017-11-22 20:42:13 +0700306{
307 // Validate output
308 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
309}
Michalis Spyrou80943252019-01-10 17:19:50 +0000310FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
Michalis Spyrou618451d2019-01-18 16:32:25 +0000311 combine(combine(combine(combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
Michalis Spyrou80943252019-01-10 17:19:50 +0000312 framework::dataset::make("ReshapeWeights", { true })),
313 framework::dataset::make("DataType", DataType::QASYMM8)),
314 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
315 QuantizationData),
316 QuantizedActivationFunctionsDataset))
Chunosov5124be52017-11-22 20:42:13 +0700317{
318 // Validate output
319 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100320}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100321TEST_SUITE_END() // QASYMM8
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000322TEST_SUITE(QSYMM8_PER_CHANNEL)
323
324FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
325 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
326 framework::dataset::make("ReshapeWeights", { true })),
327 framework::dataset::make("DataType", { DataType::QASYMM8 })),
328 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
329 QuantizationData),
330 QuantizedActivationFunctionsSmallDataset),
331 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
332{
333 // Validate output
334 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
335}
336FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
337 combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
338 framework::dataset::make("ReshapeWeights", { true })),
339 framework::dataset::make("DataType", { DataType::QASYMM8 })),
340 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
341 QuantizationData),
342 QuantizedActivationFunctionsDataset),
343 framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL })))
344{
345 // Validate output
346 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
347}
348TEST_SUITE_END() // QSYMM8_PER_CHANNEL
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100349TEST_SUITE_END() // Quantized
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100350
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100351TEST_SUITE_END() // GEMMConvolutionLayer
352
353template <typename T>
354using CLGEMMGroupedConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
355
356TEST_SUITE(GroupedGEMMConvolutionLayer)
357
Michalis Spyrou618451d2019-01-18 16:32:25 +0000358DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100359 GroupedCNNDataTypes),
360 ActivationFunctionsDataset),
361 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
362{
363 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
364
365 // The number of groups is calculated dividing the number of input channels of the input tensor by the number of input channels of the weights shape
366 const int num_groups = input_shape[2] / weights_shape[2];
367
368 // Create tensors
369 CLTensor src = create_tensor<CLTensor>(input_shape, data_type);
370 CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1);
371 CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1);
372 CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
373
374 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
375 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
376 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
377 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
378
379 // Create and configure function
380 CLGEMMConvolutionLayer conv;
381 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info, num_groups);
382
383 // Validate valid region
384 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
385 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
386 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
387 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
388
389 validate(src.info()->valid_region(), src_valid_region);
390 validate(weights.info()->valid_region(), weights_valid_region);
391 validate(bias.info()->valid_region(), bias_valid_region);
392 validate(dst.info()->valid_region(), dst_valid_region);
393
394 // Validate padding
395 //TODO(COMPMID-415) Need to validate padding?
396}
397
398TEST_SUITE(Float)
399TEST_SUITE(FP32)
400
Michalis Spyrou618451d2019-01-18 16:32:25 +0000401FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100402 framework::dataset::make("ReshapeWeights", { true })),
403 framework::dataset::make("DataType", DataType::F32)),
404 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000405 ActivationFunctionsSmallDataset))
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100406{
407 // Validate output
408 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
409}
410
Michalis Spyrou80943252019-01-10 17:19:50 +0000411FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
412 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
413 framework::dataset::make("ReshapeWeights", { true })),
414 framework::dataset::make("DataType", DataType::F32)),
415 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
416 ActivationFunctionsDataset))
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100417{
418 // Validate output
419 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
420}
421TEST_SUITE_END() // FP32
422
423TEST_SUITE(FP16)
424
Michalis Spyrou618451d2019-01-18 16:32:25 +0000425FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100426 framework::dataset::make("ReshapeWeights", { true })),
427 framework::dataset::make("DataType", DataType::F16)),
428 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Michalis Spyrou80943252019-01-10 17:19:50 +0000429 ActivationFunctionsSmallDataset))
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100430{
431 // Validate output
432 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
433}
434
Michalis Spyrou80943252019-01-10 17:19:50 +0000435FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY,
436 combine(combine(combine(combine(framework::dataset::concat(datasets::SmallGroupedConvolutionLayerDataset(), datasets::LargeGroupedConvolutionLayerDataset()),
437 framework::dataset::make("ReshapeWeights", { true })),
438 framework::dataset::make("DataType", DataType::F16)),
439 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
440 ActivationFunctionsDataset))
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100441{
442 // Validate output
443 validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
444}
445TEST_SUITE_END() // FP16
446TEST_SUITE_END() // Float
447
448TEST_SUITE_END() // GroupedGEMMConvolutionLayer
449TEST_SUITE_END() // CL
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100450} // namespace validation
451} // namespace test
452} // namespace arm_compute