blob: 7af42904e8a1ec112015ebfc79d1879e70fc0011 [file] [log] [blame]
Gian Marco Iodiced2fab732018-03-02 11:18:12 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
Gian Marco Iodiced2fab732018-03-02 11:18:12 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
25
26#include "arm_compute/core/CL/ICLTensor.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/core/utils/misc/ShapeCalculator.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010031#include "src/core/CL/kernels/CLFillBorderKernel.h"
32#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
33#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
34#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
35#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
36#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
37#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
38#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
39#include "support/MemorySupport.h"
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000040
41using namespace arm_compute;
42
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010043namespace
44{
Gian Marco Iodiced30714a2018-08-15 16:53:27 +010045Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims, DataLayout data_layout)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010046{
47 Size2D output_tile = Size2D{};
48
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +010049 const unsigned int kernel_max_dim = std::max(kernel_dims.width, kernel_dims.height);
50
51 // Check if the input spatial dimensions are smaller than 4
Gian Marco Iodiced30714a2018-08-15 16:53:27 +010052 const bool is_input_lt4_nchw = (input_dims.width <= 4 && input_dims.height <= 4) && (data_layout == DataLayout::NCHW);
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +010053
54 if(kernel_max_dim == 3U)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010055 {
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +010056 if(kernel_dims == Size2D(3U, 3U))
57 {
Gian Marco Iodiced30714a2018-08-15 16:53:27 +010058 output_tile = is_input_lt4_nchw ? Size2D(2U, 2U) : Size2D(4U, 4U);
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +010059 }
60 else if(kernel_dims == Size2D(3U, 1U))
61 {
Gian Marco Iodiced30714a2018-08-15 16:53:27 +010062 output_tile = is_input_lt4_nchw ? Size2D(2U, 1U) : Size2D(4U, 1U);
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +010063 }
64 else
65 {
Gian Marco Iodiced30714a2018-08-15 16:53:27 +010066 output_tile = is_input_lt4_nchw ? Size2D(1U, 2U) : Size2D(1U, 4U);
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +010067 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010068 }
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +010069 else if(kernel_max_dim == 5U)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010070 {
Gian Marco Iodice876be2a2018-07-03 12:22:09 +010071 output_tile = Size2D(kernel_dims.width == 1 ? 1U : 4U,
72 kernel_dims.height == 1 ? 1U : 4U);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010073 }
giuros013bfacb22019-04-01 12:07:02 +010074 else if(kernel_max_dim == 7U)
75 {
Gian Marco Iodice4fbcac62019-04-04 10:14:58 +010076 output_tile = Size2D(kernel_dims.width == 1 ? 1U : 2U,
77 kernel_dims.height == 1 ? 1U : 2U);
giuros013bfacb22019-04-01 12:07:02 +010078 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010079
80 return output_tile;
81}
82
83bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_size)
84{
85 // Check if we want to configure a Winograd configuration which requires fast math
86 using WinogradConfiguration = std::pair<std::pair<int, int>, std::pair<int, int>>;
87
88 std::vector<WinogradConfiguration> fast_math_winograd =
89 {
giuros013bfacb22019-04-01 12:07:02 +010090 WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5)),
91 WinogradConfiguration(std::pair<int, int>(2, 2), std::pair<int, int>(7, 7))
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010092 };
93
94 auto p = std::make_pair(std::pair<int, int>(output_tile.width, output_tile.height),
95 std::pair<int, int>(kernel_size.width, kernel_size.height));
96
97 return std::find(fast_math_winograd.begin(), fast_math_winograd.end(), p) != fast_math_winograd.end();
98}
99} // namespace
100
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000101CLWinogradConvolutionLayer::CLWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100102 : _memory_group(memory_manager), _batched_mm(memory_manager), _input_transform(), _filter_transform(support::cpp14::make_unique<CLWinogradFilterTransformKernel>()),
103 _output_transform(support::cpp14::make_unique<CLWinogradOutputTransformKernel>()), _input0(), _input1(), _batched_mm_output(), _original_weights(nullptr), _is_prepared(false)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000104{
105}
106
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100107CLWinogradConvolutionLayer::~CLWinogradConvolutionLayer() = default;
108
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100109void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info,
110 bool enable_fast_math)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000111{
Manuel Bottini2b84be52020-04-08 10:15:51 +0100112 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, act_info, enable_fast_math);
113}
114
115void CLWinogradConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +0100116 const PadStrideInfo &conv_info,
Manuel Bottini2b84be52020-04-08 10:15:51 +0100117 const ActivationLayerInfo &act_info, bool enable_fast_math)
118{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000119 // Get indices for the width and height
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000120 const size_t idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
121 const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
122
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100123 // Input shape, kernel size and output tile
124 const Size2D input_dims = Size2D(input->info()->tensor_shape()[idx_width], input->info()->tensor_shape()[idx_height]);
125 const Size2D kernel_size = Size2D(weights->info()->tensor_shape()[idx_width], weights->info()->tensor_shape()[idx_height]);
Gian Marco Iodiced30714a2018-08-15 16:53:27 +0100126 const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, input->info()->data_layout());
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000127
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100128 // Check if the Winograd configuration requires fast math
129 if(!enable_fast_math)
130 {
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000131 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100132 ARM_COMPUTE_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
133 }
Gian Marco Iodicee52a3002018-04-11 15:59:10 +0100134 const WinogradInfo winograd_info = WinogradInfo(output_tile,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100135 kernel_size,
136 input_dims,
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000137 conv_info,
138 input->info()->data_layout());
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000139
Georgios Pinitase0437672018-05-02 14:07:55 +0100140 _is_prepared = false;
141 _original_weights = weights;
142
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000143 // Manage intermediate tensors
144 _memory_group.manage(&_input0);
145 _memory_group.manage(&_batched_mm_output);
146
147 // Do not manage _input1 as it contains the weights
148
149 // Configure input transform
Manuel Bottini2b84be52020-04-08 10:15:51 +0100150 _input_transform.configure(compile_context, input, &_input0, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000151
152 // Configure filter transform
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100153 _filter_transform->configure(compile_context, weights, &_input1, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000154
155 // Configure batched matrix multiply
Manuel Bottini2b84be52020-04-08 10:15:51 +0100156 _batched_mm.configure(compile_context, &_input0, &_input1, nullptr, &_batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0, false, false,
157 GEMMLowpOutputStageInfo(),
158 (input->info()->data_type() == DataType::F16)));
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000159
160 // Configure output transform
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100161 _output_transform->configure(compile_context, &_batched_mm_output, biases, output, winograd_info, act_info);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000162
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000163 // Allocate temporary tensors
164 _input0.allocator()->allocate();
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000165 _batched_mm_output.allocator()->allocate();
166}
167
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000168Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100169 const ActivationLayerInfo &act_info, bool enable_fast_math)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000170{
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000171 // Get indeces for the width and height
172 const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
173 const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
174
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100175 // Input shape, kernel size and output tile
176 const Size2D input_dims = Size2D(input->tensor_shape()[idx_width], input->tensor_shape()[idx_height]);
177 const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
Gian Marco Iodiced30714a2018-08-15 16:53:27 +0100178 const Size2D output_tile = winograd_output_tile(input_dims, kernel_size, input->data_layout());
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000179
Pablo Tello7786f252019-08-05 12:22:37 +0100180 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_left() > (kernel_size.x() / 2u)) || (conv_info.pad_right() > (kernel_size.x() / 2u))), "Winograd only supports padding up to half kernel size");
181 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((conv_info.pad_top() > (kernel_size.y() / 2u)) || (conv_info.pad_bottom() > (kernel_size.y() / 2u))), "Winograd only supports padding up to half kernel size");
182
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100183 // Check if the Winograd configuration requires fast math
184 if(!enable_fast_math)
185 {
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000186 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); //disable winograd for fp16 if fast math is false.
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100187 ARM_COMPUTE_RETURN_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true");
188 }
Gian Marco Iodicee52a3002018-04-11 15:59:10 +0100189
190 const WinogradInfo winograd_info = WinogradInfo(output_tile,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100191 kernel_size,
192 input_dims,
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000193 conv_info,
194 input->data_layout());
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000195
196 // Validate input transform
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000197 const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000198 const TensorInfo input0 = input->clone()->set_tensor_shape(input0_shape);
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000199 ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransform::validate(input, &input0, winograd_info));
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000200
201 // Validate filter transform
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000202 const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000203 const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000204 ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradFilterTransformKernel::validate(weights, &input1, winograd_info));
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000205
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000206 // Validate batched matrix multiply
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000207 TensorShape batched_mm_output_shape = input0.tensor_shape();
208 batched_mm_output_shape[0] = input1.tensor_shape()[0];
209 const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000210 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input0, &input1, nullptr, &batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, 0, false, false,
211 GEMMLowpOutputStageInfo(), (input->data_type() == DataType::F16))));
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000212
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000213 // Configure output transform
Gian Marco Iodice607339f2019-08-01 15:53:23 +0100214 ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradOutputTransformKernel::validate(&batched_mm_output, biases, output, winograd_info, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000215
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000216 return Status{};
217}
218
219void CLWinogradConvolutionLayer::run()
220{
Georgios Pinitase0437672018-05-02 14:07:55 +0100221 prepare();
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000222
Georgios Pinitasda953f22019-04-02 17:27:03 +0100223 MemoryGroupResourceScope scope_mg(_memory_group);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000224
225 // Run input transform
226 _input_transform.run();
227
228 // Run batched matrix multiplication
229 _batched_mm.run();
230
231 // Run output transform
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100232 CLScheduler::get().enqueue(*_output_transform);
Georgios Pinitase0437672018-05-02 14:07:55 +0100233}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100234
Georgios Pinitase0437672018-05-02 14:07:55 +0100235void CLWinogradConvolutionLayer::prepare()
236{
237 if(!_is_prepared)
238 {
239 // Run filter transform and mark original weights as unused
240 _input1.allocator()->allocate();
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100241 CLScheduler::get().enqueue(*_filter_transform, false);
Georgios Pinitase0437672018-05-02 14:07:55 +0100242 _original_weights->mark_as_unused();
243
244 // Prepare GEMM and release reshaped weights if marked unused by CLGEMM
245 _batched_mm.prepare();
246 if(!_input1.is_used())
247 {
248 _input1.allocator()->free();
249 }
250
251 CLScheduler::get().queue().finish();
252 _is_prepared = true;
253 }
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000254}