blob: a7d721d035799c0160bca2ae9d37a2f65b23d94b [file] [log] [blame]
Anthony Barbier7068f992017-10-26 15:23:08 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/GLES_COMPUTE/GCHelpers.h"
29#include "arm_compute/core/GLES_COMPUTE/GCKernelLibrary.h"
30#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h"
31#include "arm_compute/core/Helpers.h"
32#include "arm_compute/core/IAccessWindow.h"
33#include "arm_compute/core/ITensor.h"
34#include "arm_compute/core/Types.h"
35#include "arm_compute/core/Validate.h"
36#include "support/ToolchainSupport.h"
37
38using namespace arm_compute;
39
40template <unsigned int kernel_size>
41GCDirectConvolutionLayerKernel<kernel_size>::GCDirectConvolutionLayerKernel()
42 : _input(nullptr), _bias(nullptr), _weights(nullptr), _output(nullptr), _border_size(0), _conv_stride_x(0), _conv_stride_y(0), _conv_pad_x(0), _conv_pad_y(0), _lws(gles::NDRange(1U, 1U, 1U))
43{
44}
45
46template <unsigned int kernel_size>
47BorderSize GCDirectConvolutionLayerKernel<kernel_size>::border_size() const
48{
49 return _border_size;
50}
51
52template <unsigned int kernel_size>
53void GCDirectConvolutionLayerKernel<kernel_size>::configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *bias, IGCTensor *output, const PadStrideInfo &conv_info)
54{
55 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
Anthony Barbier7068f992017-10-26 15:23:08 +010056 ARM_COMPUTE_ERROR_ON(weights->info()->dimension(2) != input->info()->dimension(2));
57 ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != weights->info()->dimension(1));
58 ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
59 ARM_COMPUTE_ERROR_ON_MSG((kernel_size == 3 && std::get<0>(conv_info.stride()) > 2), "Strides larger than 2 not supported in 3x3 direct convolution!");
60 ARM_COMPUTE_ERROR_ON(kernel_size != weights->info()->dimension(0));
61
62 if(bias != nullptr)
63 {
64 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, bias);
65 // FIXME: Bug in framework, workaround it in tests currently.
66 //ARM_COMPUTE_ERROR_ON(bias->info()->dimension(0) != weights->info()->dimension(3));
67 ARM_COMPUTE_ERROR_ON(bias->info()->num_dimensions() > 1);
68 }
69
Frank Leib9d38ee2017-12-05 10:43:33 +080070 // Get convolved dimensions
71 unsigned int owidth = 0;
72 unsigned int oheight = 0;
73 std::tie(owidth, oheight) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_size, kernel_size, conv_info);
74
75 TensorShape output_shape = input->info()->tensor_shape();
76 output_shape.set(0, owidth);
77 output_shape.set(1, oheight);
78 output_shape.set(2, weights->info()->dimension(3));
79
80 // Output auto inizialitation if not yet initialized
81 auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
82
83 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
84 ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
85 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
86 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
87
Anthony Barbier7068f992017-10-26 15:23:08 +010088 _conv_stride_x = std::get<0>(conv_info.stride());
89 _conv_stride_y = std::get<1>(conv_info.stride());
90 _conv_pad_x = std::get<0>(conv_info.pad());
91 _conv_pad_y = std::get<1>(conv_info.pad());
92
93 _input = input;
94 _weights = weights;
95 _output = output;
96 _bias = bias;
97 _border_size = BorderSize(_conv_pad_y, _conv_pad_x);
98
99 std::set<std::string> options;
100
101 options.emplace("#define LOCAL_SIZE_X " + support::cpp11::to_string(_lws[0]));
102 options.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(_lws[1]));
103 options.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(_lws[2]));
104 options.emplace("#define STRIDE_X " + support::cpp11::to_string(_conv_stride_x));
105
106 std::string dt_name = (input->info()->data_type() == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16";
107 options.emplace(("#define " + dt_name));
108
109 unsigned int num_elems_read_per_iteration_x = kernel_size * _conv_stride_x;
110 unsigned int num_elems_read_per_iteration_y = 1;
111 unsigned int num_elems_written_per_iteration_x = 1;
112 unsigned int num_elems_written_per_iteration_y = 1;
113 unsigned int num_elems_written_per_iteration_z = 1;
114
115 if(kernel_size == 3)
116 {
117 if((_conv_stride_x == 1) && (_conv_stride_y == 1))
118 {
119 switch(input->info()->data_type())
120 {
121 // TODO(APPBROWSER-299): Choose the most optimal path and remove others.
122#define PROCESS_X_4ELEMENTS_Y_3ELEMENTS_FP16
123
124 case DataType::F16:
125#if defined(PROCESS_X_8ELEMENTS_Y_3ELEMENTS_FP16)
126 options.emplace("#define PROCESS_X_8ELEMENTS_Y_3ELEMENTS_FP16");
127 num_elems_read_per_iteration_x = 16;
128 num_elems_read_per_iteration_y = 5;
129 num_elems_written_per_iteration_x = 8;
130 num_elems_written_per_iteration_y = 3;
131#elif defined(PROCESS_X_4ELEMENTS_Y_3ELEMENTS_FP16)
132 options.emplace("#define PROCESS_X_4ELEMENTS_Y_3ELEMENTS_FP16");
133 num_elems_read_per_iteration_x = 8;
134 num_elems_read_per_iteration_y = 5;
135 num_elems_written_per_iteration_x = 4;
136 num_elems_written_per_iteration_y = 3;
137#elif defined(PROCESS_X_4ELEMENTS_Y_4ELEMENTS_FP16)
138 options.emplace("#define PROCESS_X_4ELEMENTS_Y_4ELEMENTS_FP16");
139 num_elems_read_per_iteration_x = 8;
140 num_elems_read_per_iteration_y = 6;
141 num_elems_written_per_iteration_x = 4;
142 num_elems_written_per_iteration_y = 4;
143#elif defined(PROCESS_X_4ELEMENTS_Y_3ELEMENTS_Z_2ELEMENTS_FP16)
144 options.emplace("#define PROCESS_X_4ELEMENTS_Y_3ELEMENTS_Z_2ELEMENTS_FP16");
145 num_elems_read_per_iteration_x = 8;
146 num_elems_read_per_iteration_y = 5;
147 num_elems_written_per_iteration_x = 4;
148 num_elems_written_per_iteration_y = 3;
149 num_elems_written_per_iteration_z = 2;
150#endif /* PROCESS_X_8ELEMENTS_Y_3ELEMENTS_FP16 */
151 break;
152
153 case DataType::F32:
154 options.emplace("#define PROCESS_X_4ELEMENTS_Y_3ELEMENTS");
155 num_elems_read_per_iteration_x = 8;
156 num_elems_read_per_iteration_y = 5;
157 num_elems_written_per_iteration_x = 4;
158 num_elems_written_per_iteration_y = 3;
159 break;
160
161 default:
162 ARM_COMPUTE_ERROR("Current data type is not supported");
163 break;
164 }
165 }
166 // FIXME: Just keep one in release
167 else
168 {
169 switch(input->info()->data_type())
170 {
171 case DataType::F16:
172 options.emplace("#define PROCESS_X_4ELEMENTS_FP16");
173 num_elems_read_per_iteration_x = 8;
174 num_elems_written_per_iteration_x = 4;
175 break;
176
177 case DataType::F32:
178 // TODO(APPBROWSER-299): Choose the most optimal path and remove others.
179#define PROCESS_4_ELEMENT
180
181#if defined(PROCESS_1_ELEMENT)
182 options.emplace("#define PROCESS_1_ELEMENT");
183 num_elems_read_per_iteration_x = 3;
184 num_elems_written_per_iteration_x = 1;
185#elif defined(PROCESS_4_ELEMENT)
186 options.emplace("#define PROCESS_4_ELEMENT");
187 num_elems_read_per_iteration_x = 8;
188 num_elems_written_per_iteration_x = 4;
189#elif defined(PROCESS_8_ELEMENT)
190 options.emplace("#define PROCESS_8_ELEMENT");
191 num_elems_read_per_iteration_x = 12;
192 num_elems_written_per_iteration_x = 8;
193#else /* PROCESS_1_ELEMENT */
194#error Have to declare how many elements to process in one thread.
195#endif /* PROCESS_1_ELEMENT */
196 break;
197
198 default:
199 ARM_COMPUTE_ERROR("Current data type is not supported");
200 break;
201 }
202 }
203 }
204 else if(kernel_size == 1)
205 {
206 switch(input->info()->data_type())
207 {
208 case DataType::F16:
209 num_elems_read_per_iteration_x = 8;
210 num_elems_written_per_iteration_x = 8;
zhenglin666635c2017-12-04 14:38:09 +0800211 if(weights->info()->dimension(2) % 2 == 0)
212 {
213 options.emplace("#define WEIGHTS_OPTIMIZATION");
214 }
Anthony Barbier7068f992017-10-26 15:23:08 +0100215 break;
216
217 case DataType::F32:
218 num_elems_read_per_iteration_x = 1;
219 num_elems_written_per_iteration_x = 1;
220 break;
221
222 default:
223 break;
224 }
225 }
226 else if(kernel_size == 5)
227 {
228 switch(input->info()->data_type())
229 {
230 case DataType::F16:
ASIAPAC\steli0123ac91b2017-11-07 16:14:44 +0800231 options.emplace("#define PROCESS_4X_1Y_1Z");
Anthony Barbier7068f992017-10-26 15:23:08 +0100232 num_elems_read_per_iteration_x = 8;
233 num_elems_written_per_iteration_x = 4;
234
235 default:
236 break;
237 }
238 }
239 else
240 {
241 }
242
243 if(_bias != nullptr)
244 {
245 options.emplace("#define BIAS");
246 }
247
248 std::stringstream kernel_name;
249 kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
250
251 _kernel = static_cast<GCKernel>(GCKernelLibrary::get().create_kernel(kernel_name.str(), options));
252
Anthony Barbier7068f992017-10-26 15:23:08 +0100253 unsigned int idx = (_bias == nullptr) ? 3 * num_arguments_per_3D_tensor() : (num_arguments_per_1D_tensor() + 3 * num_arguments_per_3D_tensor());
254
255 // Calculate output right and bottom border
256 const int output_width = output->info()->dimension(0);
257 const int output_height = output->info()->dimension(1);
258 const int output_padding_right = ceil_to_multiple(output_width, num_elems_written_per_iteration_x * _lws[0]) - output_width;
259 const int output_padding_bottom = ceil_to_multiple(output_height, num_elems_written_per_iteration_y * _lws[1]) - output_height;
260
261 // Calculate input right and bottom border
262 const int input_width = input->info()->dimension(0);
263 const int input_height = input->info()->dimension(1);
264 const int upper_bound_w = ceil_to_multiple(((output_width + output_padding_right) * _conv_stride_x + (kernel_size - 1)), num_elems_read_per_iteration_x * _lws[0]) - _conv_pad_x - input_width;
265 const int upper_bound_h = ceil_to_multiple(((output_height + output_padding_bottom) * _conv_stride_y + (kernel_size - 1)), num_elems_read_per_iteration_y * _lws[1]) - _conv_pad_y - input_height;
266 const int padding_right = std::max(upper_bound_w, _conv_pad_x);
267 const int padding_bottom = std::max(upper_bound_h, _conv_pad_y);
268
269 BorderSize border = BorderSize(0, output_padding_right, output_padding_bottom, 0);
270
271 Window win = calculate_max_enlarged_window(*output->info(), Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y, num_elems_written_per_iteration_z), border);
272
273 AccessWindowStatic input_access(input->info(), -_conv_pad_x, -_conv_pad_y, input_width + padding_right, input_height + padding_bottom);
274 AccessWindowStatic weights_access = AccessWindowStatic(nullptr, 0, 0, 0, 0);
275 AccessWindowStatic bias_access = AccessWindowStatic(nullptr, 0, 0, 0, 1);
276
277 switch(weights->info()->data_type())
278 {
279 case DataType::F16:
zhenglin666635c2017-12-04 14:38:09 +0800280 if((weights->info()->dimension(2) % 2 != 0) || (kernel_size != 1))
281 {
282 weights_access = AccessWindowStatic(weights->info(), 0, 0, kernel_size + 1, kernel_size);
283 }
Anthony Barbier7068f992017-10-26 15:23:08 +0100284 if(_bias != nullptr)
285 {
286 bias_access = AccessWindowStatic(_bias->info(), 0, 0, _bias->info()->dimension(0) + 1, 1);
287 }
288 break;
289
290 case DataType::F32:
291 weights_access = AccessWindowStatic(weights->info(), 0, 0, kernel_size, kernel_size);
292 if(_bias != nullptr)
293 {
294 bias_access = AccessWindowStatic(_bias->info(), 0, 0, _bias->info()->dimension(0), 1);
295 }
296 break;
297
298 default:
299 ARM_COMPUTE_ERROR("Current data type is not supported");
300 break;
301 }
302
303 AccessWindowStatic output_access(output->info(), 0, 0, output_width + output_padding_right, output_height + output_padding_bottom);
304
305 if(_bias != nullptr)
306 {
307 update_window_and_padding(win, input_access, weights_access, bias_access, output_access);
308 }
309 else
310 {
311 update_window_and_padding(win, input_access, weights_access, output_access);
312 }
313
314 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
315
Joel Liangf1f3ebd2017-11-10 09:59:19 +0800316 _kernel.set_argument(idx++, _weights->info()->strides_in_bytes()[3]); // weights_stride_w
317 _kernel.set_argument(idx++, _weights->info()->dimension(2)); // weights_depth
Anthony Barbier7068f992017-10-26 15:23:08 +0100318
319 IGCKernel::configure(win);
320}
321
322template <unsigned int kernel_size>
323void GCDirectConvolutionLayerKernel<kernel_size>::run(const Window &window)
324{
325 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
326 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
327
328 _kernel.use();
329
330 // Get initial windows
331 Window slice = window.first_slice_window_3D();
332 Window win_in = window;
333
334 win_in.adjust(Window::DimX, -_conv_pad_x, true);
335 win_in.adjust(Window::DimY, -_conv_pad_y, true);
336 win_in.set_dimension_step(Window::DimX, window.x().step() * _conv_stride_x);
337 win_in.set_dimension_step(Window::DimY, window.y().step() * _conv_stride_y);
338
339 Window slice_in = win_in.first_slice_window_3D();
340
341 unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
342 add_3D_tensor_argument(idx1, _weights, BufferParam(3, 2), slice);
343
344 if(_bias != nullptr)
345 {
346 Window slice_bias;
347 slice_bias.use_tensor_dimensions(_bias->info()->tensor_shape());
348 add_1D_tensor_argument(idx1, _bias, BufferParam(4, 2), slice_bias);
349 }
350
351 do
352 {
353 unsigned int idx = 0;
354
355 switch(_input->info()->data_type())
356 {
357 case DataType::F16:
358 switch(kernel_size)
359 {
360 case 1:
361 add_3D_tensor_argument(idx, _input, BufferParam(1, 4), slice_in);
362 add_3D_tensor_argument(idx, _output, BufferParam(2, 4), slice);
363 break;
364
365 case 3:
366 add_3D_tensor_argument(idx, _input, BufferParam(1, 3), slice_in);
367 add_3D_tensor_argument(idx, _output, BufferParam(2, 3), slice);
368 break;
369
370 case 5:
371 add_3D_tensor_argument(idx, _input, BufferParam(1, 3), slice_in);
372 add_3D_tensor_argument(idx, _output, BufferParam(2, 3), slice);
373 break;
374
375 default:
376 ARM_COMPUTE_ERROR("Current kernel size %d is not supported", kernel_size);
377 break;
378 }
379 break;
380
381 case DataType::F32:
382 switch(kernel_size)
383 {
384 case 1:
385 case 5:
386 add_3D_tensor_argument(idx, _input, BufferParam(1, 2), slice_in);
387 add_3D_tensor_argument(idx, _output, BufferParam(2, 2), slice);
388 break;
389
390 case 3:
391 add_3D_tensor_argument(idx, _input, BufferParam(1, 4), slice_in);
392 add_3D_tensor_argument(idx, _output, BufferParam(2, 4), slice);
393 break;
394
395 default:
396 ARM_COMPUTE_ERROR("Current kernel size %d is not supported", kernel_size);
397 break;
398 }
399 break;
400
401 default:
402 ARM_COMPUTE_ERROR("Current data type is not supported");
403 break;
404 }
405
406 _kernel.update_shader_params();
407 enqueue(*this, slice, _lws);
408 }
409 while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
410}
411
412template class arm_compute::GCDirectConvolutionLayerKernel<1>;
413template class arm_compute::GCDirectConvolutionLayerKernel<3>;
414template class arm_compute::GCDirectConvolutionLayerKernel<5>;