blob: b1351f67472b66bb0f9df158129084dceb67f9cf [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco Iodiceff1fe3e2021-01-02 09:58:51 +00002 * Copyright (c) 2017-2021 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
30#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/runtime/CL/CLScheduler.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010032
Anthony Barbier6ff3b192017-09-04 18:44:23 +010033#include <cmath>
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010034#include <memory>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035#include <tuple>
36
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +010037namespace arm_compute
38{
Georgios Pinitas78c00902018-01-09 17:33:11 +000039using namespace arm_compute::misc::shape_calculator;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010040
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010041CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000042 : _memory_manager(std::move(memory_manager)), _function()
Anthony Barbier6ff3b192017-09-04 18:44:23 +010043{
44}
45
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010046CLConvolutionLayer::~CLConvolutionLayer() = default;
47
Alex Gilday7da29b62018-03-23 14:16:00 +000048void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010049 const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
Chunosov5124be52017-11-22 20:42:13 +070050{
Manuel Bottini2b84be52020-04-08 10:15:51 +010051 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
52}
53
54void CLConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
55 const WeightsInfo &weights_info,
56 const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
57{
Isabella Gottardif07d28d2018-02-06 14:52:43 +000058 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010059 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010060 enable_fast_math, num_groups));
Isabella Gottardif07d28d2018-02-06 14:52:43 +000061
Gian Marco Iodicee52a3002018-04-11 15:59:10 +010062 switch(CLConvolutionLayer::get_convolution_method(input->info(), weights->info(), output->info(), conv_info,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010063 weights_info, act_info, CLScheduler::get().target(), dilation, enable_fast_math))
Chunosov5124be52017-11-22 20:42:13 +070064 {
Gian Marco Iodicee52a3002018-04-11 15:59:10 +010065 case ConvolutionMethod::WINOGRAD:
66 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010067 ARM_COMPUTE_ERROR_ON(num_groups != 1);
Georgios Pinitas40f51a62020-11-21 03:04:18 +000068 auto f = std::make_unique<CLWinogradConvolutionLayer>(_memory_manager);
Manuel Bottini2b84be52020-04-08 10:15:51 +010069 f->configure(compile_context, input, weights, biases, output, conv_info, act_info, enable_fast_math);
Gian Marco Iodicee52a3002018-04-11 15:59:10 +010070 _function = std::move(f);
71 break;
72 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +000073 case ConvolutionMethod::DIRECT:
Gian Marco20d78482018-01-11 15:10:58 +000074 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010075 ARM_COMPUTE_ERROR_ON(num_groups != 1);
Georgios Pinitas40f51a62020-11-21 03:04:18 +000076 auto f = std::make_unique<CLDirectConvolutionLayer>();
Manuel Bottini2b84be52020-04-08 10:15:51 +010077 f->configure(compile_context, input, weights, biases, output, conv_info, act_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000078 _function = std::move(f);
79 break;
Gian Marco20d78482018-01-11 15:10:58 +000080 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +000081 case ConvolutionMethod::GEMM:
Gian Marco20d78482018-01-11 15:10:58 +000082 {
Georgios Pinitas40f51a62020-11-21 03:04:18 +000083 auto f = std::make_unique<CLGEMMConvolutionLayer>(_memory_manager);
Manuel Bottini2b84be52020-04-08 10:15:51 +010084 f->configure(compile_context, input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000085 _function = std::move(f);
86 break;
Gian Marco20d78482018-01-11 15:10:58 +000087 }
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +010088 case ConvolutionMethod::FFT:
89 {
Georgios Pinitas40f51a62020-11-21 03:04:18 +000090 auto f = std::make_unique<CLFFTConvolutionLayer>(_memory_manager);
Giorgio Arenaea7de7b2020-12-10 16:49:39 +000091 f->configure(compile_context, input, weights, biases, output, conv_info, act_info, enable_fast_math);
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +010092 _function = std::move(f);
93 break;
94 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +000095 default:
96 ARM_COMPUTE_ERROR("Not supported.");
97 break;
Chunosov5124be52017-11-22 20:42:13 +070098 }
99}
100
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000101Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100102 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103{
Georgios Pinitas78c00902018-01-09 17:33:11 +0000104 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100105 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
Chunosov5124be52017-11-22 20:42:13 +0700106
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000107 const GPUTarget gpu_target = CLScheduler::get().target();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100108
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100109 switch(CLConvolutionLayer::get_convolution_method(input, weights, output, conv_info, weights_info, act_info, gpu_target, dilation, enable_fast_math))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100110 {
Gian Marco Iodicee52a3002018-04-11 15:59:10 +0100111 case ConvolutionMethod::WINOGRAD:
112 {
113 //Validate Winograd
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100114 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "Grouping (num_groups != 1) with CLWinogradConvolutionLayer is not supported");
Isabella Gottardi96b86a92018-05-14 15:52:07 +0100115 ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math));
Gian Marco Iodicee52a3002018-04-11 15:59:10 +0100116 break;
117 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000118 case ConvolutionMethod::DIRECT:
Chunosov5124be52017-11-22 20:42:13 +0700119 {
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000120 // Validate direct convolution layer
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100121 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "Grouping (num_groups != 1) with CLDirectConvolutionLayer is not supported");
Isabella Gottardi96b86a92018-05-14 15:52:07 +0100122 ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000123 break;
Chunosov5124be52017-11-22 20:42:13 +0700124 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000125 case ConvolutionMethod::GEMM:
Chunosov5124be52017-11-22 20:42:13 +0700126 {
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000127 // Validate gemm-based convolution layer
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100128 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000129 break;
Chunosov5124be52017-11-22 20:42:13 +0700130 }
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +0100131 case ConvolutionMethod::FFT:
132 {
133 // Validate FFT-based convolution layer
Giorgio Arenaea7de7b2020-12-10 16:49:39 +0000134 ARM_COMPUTE_RETURN_ON_ERROR(CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math));
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +0100135 break;
136 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000137 default:
138 ARM_COMPUTE_ERROR("Not supported.");
139 break;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100140 }
141
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000142 return Status{};
143}
Gian Marco Iodice368da832017-07-03 12:33:49 +0100144
Gian Marco Iodicee52a3002018-04-11 15:59:10 +0100145ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100146 const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation, bool enable_fast_math)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000147{
Gian Marco Iodicee52a3002018-04-11 15:59:10 +0100148 ARM_COMPUTE_ERROR_ON_NULLPTR(input);
149 ARM_COMPUTE_ERROR_ON_NULLPTR(output);
150 ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000151 ARM_COMPUTE_UNUSED(weights_info);
Isabella Gottardicac4a2e2018-02-19 16:42:22 +0000152 ARM_COMPUTE_UNUSED(gpu_target);
Gian Marco Iodice1246b632017-08-16 18:38:32 +0100153
Giorgio Arenae0837712018-06-12 11:30:50 +0100154 const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
155 const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
Gian Marco Iodicea8aef292018-05-14 14:21:39 +0100156 const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
157
Giorgio Arenae0837712018-06-12 11:30:50 +0100158 /* Input spatial dims, kernel size, IFM/OFM, conv info*/
Gian Marco Iodiceff4bbce2018-07-31 12:04:57 +0100159 using ConvolutionConfiguration = std::tuple<Size2D, Size2D, Size2D, PadStrideInfo, DataLayout>;
Giorgio Arenae0837712018-06-12 11:30:50 +0100160 using ConfigurationMethod = std::pair<ConvolutionConfiguration, ConvolutionMethod>;
161
162 const std::vector<ConfigurationMethod> known_configs =
163 {
164 // Alexnet
Gian Marco Iodiceff4bbce2018-07-31 12:04:57 +0100165 ConfigurationMethod(ConvolutionConfiguration(Size2D(27U, 27U), Size2D(5U, 5U), Size2D(48U, 128U), PadStrideInfo(1U, 1U, 2U, 2U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
Giorgio Arenae0837712018-06-12 11:30:50 +0100166 // VGG16 / VGG19
Gian Marco Iodiceff4bbce2018-07-31 12:04:57 +0100167 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 64U), PadStrideInfo(1U, 1U, 1U, 1U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
Giorgio Arenae0837712018-06-12 11:30:50 +0100168 // Mobilenet 224
Gian Marco Iodiceff4bbce2018-07-31 12:04:57 +0100169 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
Giorgio Arenae0837712018-06-12 11:30:50 +0100170 // Mobilenet 160
Gian Marco Iodiceff4bbce2018-07-31 12:04:57 +0100171 ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
172 // Mobilenet 224
173 ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
174 // Mobilenet 160
175 ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
Giorgio Arenae0837712018-06-12 11:30:50 +0100176 };
177
178 const auto find_config = [&](ConfigurationMethod c)
179 {
Gian Marco Iodiceff4bbce2018-07-31 12:04:57 +0100180 const ConvolutionConfiguration config = c.first;
181 const PadStrideInfo info = std::get<3>(config);
182 const DataLayout data_layout = std::get<4>(config);
Giorgio Arenae0837712018-06-12 11:30:50 +0100183
184 return std::get<0>(config) == Size2D(input->dimension(idx_w), input->dimension(idx_h)) && std::get<1>(config) == Size2D(weights->dimension(idx_w), weights->dimension(idx_h))
185 && std::get<2>(config) == Size2D(weights->dimension(idx_c), weights->dimension(3)) && info.pad_top() == conv_info.pad_top() && info.pad_right() == conv_info.pad_right()
Gian Marco Iodiceff4bbce2018-07-31 12:04:57 +0100186 && info.pad_bottom() == conv_info.pad_bottom() && info.pad_left() == conv_info.pad_left() && info.stride() == conv_info.stride() && (data_layout == input->data_layout());
Giorgio Arenae0837712018-06-12 11:30:50 +0100187 };
188
189 std::vector<ConfigurationMethod>::const_iterator found;
190 if((found = std::find_if(known_configs.begin(), known_configs.end(), find_config)) != known_configs.end())
191 {
192 return (*found).second;
193 }
194
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +0100195 if(dilation != Size2D(1U, 1U))
Gian Marco Iodicee52a3002018-04-11 15:59:10 +0100196 {
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100197 return ConvolutionMethod::GEMM;
Gian Marco Iodicee52a3002018-04-11 15:59:10 +0100198 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100199 else
200 {
Gian Marco Iodiceff1fe3e2021-01-02 09:58:51 +0000201 if(input->data_layout() == DataLayout::NCHW)
Michalis Spyrou26dcbc72019-06-04 17:23:04 +0100202 {
Gian Marco Iodiceff1fe3e2021-01-02 09:58:51 +0000203 // SRGAN
204 if((input->dimension(idx_h) > 720U) && (output->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
205 && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
206 {
207 return ConvolutionMethod::DIRECT;
208 }
209 if((weights->dimension(idx_h) > 5) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)))
210 {
211 return ConvolutionMethod::FFT;
212 }
213 if(input->dimension(idx_c) < 16)
214 {
215 return ConvolutionMethod::GEMM;
216 }
217 return bool(CLWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
Michalis Spyrou26dcbc72019-06-04 17:23:04 +0100218 }
Gian Marco Iodiceff1fe3e2021-01-02 09:58:51 +0000219 else
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +0100220 {
Gian Marco Iodiceff1fe3e2021-01-02 09:58:51 +0000221 // SRGAN
222 if((input->dimension(idx_h) > 720U) && (output->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
223 && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
224 {
225 return ConvolutionMethod::DIRECT;
226 }
227 if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
228 {
229 return ConvolutionMethod::DIRECT;
230 }
231 if(input->dimension(idx_c) < 16)
232 {
233 return ConvolutionMethod::GEMM;
234 }
235 return bool(CLWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +0100236 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100237 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100238}
239
240void CLConvolutionLayer::run()
241{
Georgios Pinitase0437672018-05-02 14:07:55 +0100242 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000243 _function->run();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100244}
Georgios Pinitase0437672018-05-02 14:07:55 +0100245
246void CLConvolutionLayer::prepare()
247{
248 _function->prepare();
249}
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100250} // namespace arm_compute