blob: 135a3bb2b9525000aee2faa0b051245d029647be [file] [log] [blame]
Manuel Bottini327225d2021-04-13 13:09:30 +01001/*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitas7891a732021-08-20 21:39:25 +010024#include "src/cpu/operators/CpuDirectConv2d.h"
Manuel Bottini327225d2021-04-13 13:09:30 +010025
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/runtime/NEON/NEScheduler.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010030
ramelg013ae3d882021-09-12 23:07:47 +010031#include "src/common/utils/Log.h"
Manuel Bottini327225d2021-04-13 13:09:30 +010032
33namespace arm_compute
34{
35namespace cpu
36{
Manuel Bottinib4bb6a02021-05-24 16:01:32 +010037CpuDirectConv2d::~CpuDirectConv2d() = default;
Manuel Bottini327225d2021-04-13 13:09:30 +010038
Manuel Bottinib4bb6a02021-05-24 16:01:32 +010039CpuDirectConv2d::CpuDirectConv2d(std::shared_ptr<IMemoryManager> memory_manager)
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010040 : _memory_group(std::move(memory_manager)),
41 _output_stage_kernel(),
42 _conv_kernel(),
43 _input_border_handler(),
44 _activationlayer_function(),
45 _accumulator(),
46 _has_bias(false),
47 _is_activationlayer_enabled(false),
48 _dim_split(Window::DimZ),
49 _is_padding_required()
Manuel Bottini327225d2021-04-13 13:09:30 +010050{
51}
52
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010053void CpuDirectConv2d::configure(ITensorInfo *src,
54 ITensorInfo *weights,
55 const ITensorInfo *bias,
56 ITensorInfo *dst,
57 const PadStrideInfo &conv_info,
58 const ActivationLayerInfo &act_info)
Manuel Bottini327225d2021-04-13 13:09:30 +010059{
60 ARM_COMPUTE_ERROR_ON(src->data_layout() == DataLayout::UNKNOWN);
ramelg013ae3d882021-09-12 23:07:47 +010061 ARM_COMPUTE_LOG_PARAMS(src, weights, bias, dst, conv_info, act_info);
62
Manuel Bottinib4bb6a02021-05-24 16:01:32 +010063 _output_stage_kernel = std::make_unique<kernels::CpuDirectConv2dOutputStageKernel>();
64 _conv_kernel = std::make_unique<kernels::CpuDirectConv2dKernel>();
Manuel Bottini327225d2021-04-13 13:09:30 +010065 _input_border_handler = std::make_unique<NEFillBorderKernel>();
66
67 // Free accumulator
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010068 if (_accumulator.buffer() != nullptr)
Manuel Bottini327225d2021-04-13 13:09:30 +010069 {
70 _accumulator.allocator()->free();
71 }
72
73 _dim_split = src->data_layout() == DataLayout::NCHW ? Window::DimZ : Window::DimY;
74
75 // Check if bias should be added in the convolution result
76 _has_bias = (bias != nullptr);
77
78 _conv_kernel->configure(src, weights, dst, conv_info);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010079 if (_has_bias)
Manuel Bottini327225d2021-04-13 13:09:30 +010080 {
81 _output_stage_kernel->configure(dst, bias);
82 }
83 _is_padding_required = !_conv_kernel->border_size().empty();
84
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010085 if (_is_padding_required)
Manuel Bottini327225d2021-04-13 13:09:30 +010086 {
87 // Add zero padding XY
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010088 _input_border_handler->configure(src, _conv_kernel->border_size(), BorderMode::CONSTANT,
89 PixelValue(static_cast<float>(0.f)));
Manuel Bottini327225d2021-04-13 13:09:30 +010090 }
91
92 //Configure Activation Layer
93 _is_activationlayer_enabled = act_info.enabled();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010094 if (_is_activationlayer_enabled)
Manuel Bottini327225d2021-04-13 13:09:30 +010095 {
96 _activationlayer_function = std::make_unique<CpuActivation>();
97 _activationlayer_function->configure(dst, dst, act_info);
98 }
99}
100
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100101Status CpuDirectConv2d::validate(const ITensorInfo *src,
102 const ITensorInfo *weights,
103 const ITensorInfo *bias,
104 const ITensorInfo *dst,
105 const PadStrideInfo &conv_info,
Manuel Bottinib4bb6a02021-05-24 16:01:32 +0100106 const ActivationLayerInfo &act_info)
Manuel Bottini327225d2021-04-13 13:09:30 +0100107{
108 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
109
110 // output might not be initialized since it can be an intermediate tensor of another layer
111 DataType data_type = src->data_type();
112 TensorInfo accumulator(dst->clone()->set_is_resizable(true).reset_padding().set_data_type(data_type));
113
114 // Validate Convolution kernel
Manuel Bottinib4bb6a02021-05-24 16:01:32 +0100115 ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuDirectConv2dKernel::validate(src, weights, &accumulator, conv_info));
Manuel Bottini327225d2021-04-13 13:09:30 +0100116
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100117 if (bias != nullptr)
Manuel Bottini327225d2021-04-13 13:09:30 +0100118 {
119 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, bias);
120 ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != weights->dimension(3),
121 "Biases size and number of input feature maps should match");
122 ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->num_dimensions() > 1, "Biases should be one dimensional");
123 }
124
125 // Validate bias kernel
Manuel Bottinib4bb6a02021-05-24 16:01:32 +0100126 ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuDirectConv2dOutputStageKernel::validate(&accumulator, bias, dst));
Manuel Bottini327225d2021-04-13 13:09:30 +0100127
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100128 if (act_info.enabled())
Manuel Bottini327225d2021-04-13 13:09:30 +0100129 {
130 ARM_COMPUTE_RETURN_ON_ERROR(CpuActivation::validate(dst, nullptr, act_info));
131 }
132
133 return Status{};
134}
135
Manuel Bottinib4bb6a02021-05-24 16:01:32 +0100136void CpuDirectConv2d::run(ITensorPack &tensors)
Manuel Bottini327225d2021-04-13 13:09:30 +0100137{
138 MemoryGroupResourceScope scope_mg(_memory_group);
139
140 auto src = tensors.get_tensor(TensorType::ACL_SRC_0);
141 auto bias = tensors.get_const_tensor(TensorType::ACL_SRC_2);
142 auto dst = tensors.get_tensor(TensorType::ACL_DST);
143
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100144 if (_is_padding_required)
Manuel Bottini327225d2021-04-13 13:09:30 +0100145 {
146 ITensorPack pack;
147 pack.add_tensor(TensorType::ACL_SRC_DST, src);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100148 NEScheduler::get().schedule_op(_input_border_handler.get(), Window::DimZ, _input_border_handler->window(),
149 pack);
Manuel Bottini327225d2021-04-13 13:09:30 +0100150 }
151 NEScheduler::get().schedule_op(_conv_kernel.get(), _dim_split, _conv_kernel->window(), tensors);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100152 if (_has_bias)
Manuel Bottini327225d2021-04-13 13:09:30 +0100153 {
154 ITensorPack pack;
155 pack.add_tensor(TensorType::ACL_SRC_0, dst);
156 pack.add_tensor(TensorType::ACL_SRC_1, bias);
157 pack.add_tensor(TensorType::ACL_DST, dst);
158 NEScheduler::get().schedule_op(_output_stage_kernel.get(), Window::DimY, _output_stage_kernel->window(), pack);
159 }
160
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100161 if (_is_activationlayer_enabled)
Manuel Bottini327225d2021-04-13 13:09:30 +0100162 {
163 ITensorPack pack;
164 pack.add_tensor(TensorType::ACL_SRC, dst);
165 pack.add_tensor(TensorType::ACL_DST, dst);
166 _activationlayer_function->run(pack);
167 }
168}
169} // namespace cpu
170} // namespace arm_compute