blob: a02b84fba19ea1c7e12dcea15f57b09aedf52185 [file] [log] [blame]
Giorgio Arenadfca60b2018-01-31 10:30:59 +00001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/CLHelpers.h"
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/CL/ICLKernel.h"
30#include "arm_compute/core/CL/ICLTensor.h"
31#include "arm_compute/core/Error.h"
32#include "arm_compute/core/Helpers.h"
33#include "arm_compute/core/TensorInfo.h"
34#include "arm_compute/core/Types.h"
35#include "arm_compute/core/Utils.h"
36#include "arm_compute/core/utils/misc/ShapeCalculator.h"
37#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
38
39using namespace arm_compute;
40using namespace arm_compute::misc::shape_calculator;
41
42namespace
43{
Giorgio Arena76572242018-04-04 17:44:26 +010044Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
Giorgio Arenadfca60b2018-01-31 10:30:59 +000045 const ActivationLayerInfo &act_info)
46{
47 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8);
48 ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.enabled()) && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
49 && (act_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
50 && (act_info.activation() != ActivationLayerInfo::ActivationFunction::RELU),
51 "For QASYMM8 only relu, lower bounded relu and lower-upper bounded relu are supported");
52 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
Giorgio Arena76572242018-04-04 17:44:26 +010053 ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier > 1); // COMPMID-1071 Add depth multiplier support for NHWC
Giorgio Arenadfca60b2018-01-31 10:30:59 +000054 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(1) != 3 || weights->dimension(2) != 3);
55
56 if(biases != nullptr)
57 {
58 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
59 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(0));
60 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
61 }
62
63 if(output->total_size() != 0)
64 {
Giorgio Arena76572242018-04-04 17:44:26 +010065 const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier);
Giorgio Arenadfca60b2018-01-31 10:30:59 +000066 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
67 }
68
69 return Status{};
70}
71
72std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info)
73{
74 const unsigned int num_rows_processed_per_iteration = 4;
75 const unsigned int num_elems_accessed_per_iteration = 4;
76 const unsigned int num_rows_read_per_iteration = num_rows_processed_per_iteration + 2;
77 const unsigned int num_rows_written_per_iteration = num_rows_processed_per_iteration / conv_info.stride().first;
78
79 const BorderSize border_size(conv_info.pad_left() + num_rows_read_per_iteration * std::max(conv_info.pad_top(), conv_info.pad_bottom()), 0, conv_info.pad_right(), 0);
80
81 // Configure kernel window
82 Window win = calculate_max_window(*output, Steps(num_elems_accessed_per_iteration, num_rows_written_per_iteration));
83
84 AccessWindowStatic input_access(input, 0, -border_size.top, ceil_to_multiple(input->dimension(0), num_elems_accessed_per_iteration),
85 ceil_to_multiple(input->dimension(1) + border_size.bottom, num_rows_read_per_iteration));
86 AccessWindowRectangle output_access(output, 0, 0, num_elems_accessed_per_iteration, num_rows_written_per_iteration);
87 AccessWindowHorizontal weights_access(weights, 0, num_elems_accessed_per_iteration);
88
89 bool window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
90
91 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
92
93 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
94 return std::make_pair(err, win);
95}
96} // namespace
97
98CLDepthwiseConvolutionLayer3x3NHWCKernel::CLDepthwiseConvolutionLayer3x3NHWCKernel()
99 : _num_rows_processed_per_iteration(1)
100{
101}
102
103BorderSize CLDepthwiseConvolutionLayer3x3NHWCKernel::border_size() const
104{
105 return _border_size;
106}
107
108void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
Giorgio Arena76572242018-04-04 17:44:26 +0100109 unsigned int depth_multiplier,
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000110 ActivationLayerInfo act_info)
111{
112 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
113
114 // Get convolved dimensions
Giorgio Arena76572242018-04-04 17:44:26 +0100115 const TensorShape output_shape = compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info, depth_multiplier);
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000116
117 // Output auto inizialitation if not yet initialized
118 auto_init_if_empty(*output->info(),
119 output_shape,
120 1,
121 input->info()->data_type(),
122 input->info()->fixed_point_position(),
123 input->info()->quantization_info());
124
Giorgio Arena76572242018-04-04 17:44:26 +0100125 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info));
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000126
127 const unsigned int conv_stride_x = conv_info.stride().first;
128 ARM_COMPUTE_ERROR_ON(conv_stride_x < 1 || conv_stride_x > 2);
129 ARM_COMPUTE_ERROR_ON(std::max(conv_info.pad_top(), conv_info.pad_bottom()) > 1);
130
131 _input = input;
132 _output = output;
133 _weights = weights;
134 _biases = biases;
135 _conv_stride_y = conv_info.stride().second;
136 _conv_pad_left = conv_info.pad_left();
137 _num_rows_processed_per_iteration = 4;
138
139 const unsigned int num_elems_accessed_per_iteration = 4;
140 const unsigned int num_rows_read_per_iteration = _num_rows_processed_per_iteration + 2;
141
142 _border_size = BorderSize(_conv_pad_left + num_rows_read_per_iteration * std::max(conv_info.pad_top(), conv_info.pad_bottom()), 0, conv_info.pad_right(), 0);
143
144 float multiplier = _input->info()->quantization_info().scale * _weights->info()->quantization_info().scale / _output->info()->quantization_info().scale;
145 int output_multiplier = 0;
146 int output_shift = 0;
147 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
148
149 CLBuildOptions build_opts;
150 build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
151 build_opts.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-_input->info()->quantization_info().offset));
152 build_opts.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-_weights->info()->quantization_info().offset));
153 build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(_output->info()->quantization_info().offset));
154 build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(9 * input->info()->quantization_info().offset * weights->info()->quantization_info().offset));
155 build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
156 build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
157 build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_accessed_per_iteration));
158 build_opts.add_option("-DSRC_DEPTH=" + support::cpp11::to_string(_input->info()->dimension(2)));
159 build_opts.add_option("-DCONV_PAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
160 build_opts.add_option("-DROWS_READ=" + support::cpp11::to_string(num_rows_read_per_iteration));
161
162 if(act_info.enabled())
163 {
164 const int a_val = input->info()->quantization_info().quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
165 const int b_val = input->info()->quantization_info().quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
166 const int o1 = input->info()->quantization_info().offset;
167
168 build_opts.add_option("-DFUSED_ACTIVATION=" + lower_string(string_from_activation_func(act_info.activation())));
169 build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val));
170 build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val));
171 build_opts.add_option("-DCONST_0=" + support::cpp11::to_string(o1));
172
173 if(output != nullptr)
174 {
175 const float s1 = input->info()->quantization_info().scale;
176 const float s2 = output->info()->quantization_info().scale;
177 const int o2 = output->info()->quantization_info().offset;
178
179 if(o1 != o2 || s1 != s2)
180 {
181 build_opts.add_option("-DS1_VAL=" + float_to_string_with_full_precision(s1));
182 build_opts.add_option("-DS2_VAL=" + float_to_string_with_full_precision(s2));
183 build_opts.add_option("-DO1_VAL=" + support::cpp11::to_string(o1));
184 build_opts.add_option("-DO2_VAL=" + support::cpp11::to_string(o2));
185 }
186 }
187 }
188
189 // Create kernel
190 std::string kernel_name = std::string("depthwise_convolution_3x3_quantized_nhwc_stride") + support::cpp11::to_string(conv_stride_x);
191 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
192
193 // Configure kernel window
194 auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info);
195 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
196 ICLKernel::configure(win_config.second);
197
198 // Set config_id for enabling LWS tuning
199 _config_id = kernel_name;
200 _config_id += "_";
201 _config_id += support::cpp11::to_string(input->info()->dimension(0));
202 _config_id += "_";
203 _config_id += support::cpp11::to_string(input->info()->dimension(1));
204 _config_id += "_";
205 _config_id += support::cpp11::to_string(input->info()->dimension(2));
206 _config_id += "_";
207 _config_id += support::cpp11::to_string(output->info()->dimension(0));
208 _config_id += "_";
209 _config_id += support::cpp11::to_string(output->info()->dimension(1));
210}
211
212Status CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Giorgio Arena76572242018-04-04 17:44:26 +0100213 unsigned int depth_multiplier,
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000214 ActivationLayerInfo act_info)
215{
Giorgio Arena76572242018-04-04 17:44:26 +0100216 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info));
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000217 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info).first);
218
219 return Status{};
220}
221
222void CLDepthwiseConvolutionLayer3x3NHWCKernel::run(const Window &window, cl::CommandQueue &queue)
223{
224 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
225 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
226
227 // Create input window and adjust
228 Window win_in = window;
229 win_in.adjust(Window::DimY, -_conv_pad_left, true);
230 win_in.set_dimension_step(Window::DimY, _num_rows_processed_per_iteration);
231 win_in.set_dimension_step(Window::DimZ, _conv_stride_y);
232
233 ARM_COMPUTE_ERROR_ON((win_in.y().step() < window.y().step()) || (win_in.z().step() < window.z().step()));
234
235 Window slice_in = win_in.first_slice_window_3D();
236 Window slice_out = window.first_slice_window_3D();
237
238 if(_biases != nullptr)
239 {
240 unsigned int idx = 3 * num_arguments_per_3D_tensor();
241 Window win_biases;
242 win_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
243 win_biases.set_dimension_step(Window::DimX, window.x().step());
244 add_1D_tensor_argument(idx, _biases, win_biases);
245 }
246
247 do
248 {
249 unsigned int idx = 0;
250 add_3D_tensor_argument(idx, _input, slice_in);
251 add_3D_tensor_argument(idx, _output, slice_out);
252 add_3D_tensor_argument(idx, _weights, slice_out);
253
254 enqueue(queue, *this, slice_out, _lws_hint);
255 }
256 while(window.slide_window_slice_3D(slice_out) && win_in.slide_window_slice_3D(slice_in));
257}