blob: f97569e9581efc53ba5d07e3bace1e54996641a8 [file] [log] [blame]
Michele Di Giorgiod02d5ed2021-01-22 09:47:04 +00001/*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#pragma once
26
27#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
28
29#ifdef CYCLE_PROFILING
30#include "profiler.hpp"
31#endif
32
33namespace arm_conv {
34namespace depthwise {
35
36namespace
37{
38
39// We have two sets of quantized kernels; those which use the dot-product
40// instructions and which require the biases and quantisation parameters to be
41// ravelled into weights/parameter array, and those which use the MLAL
42// instructions and which consume separate bias and quantisation parameter
43// arrays. The following code adapts these two sets of kernels to use the same
44// API - allowing the same driver loop to call them both.
45
46template <typename TIn, typename TWeight, typename TOut>
47using UnravelledKernFn = std::function<void(unsigned int, const TIn *const *, const TWeight *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, TOut *const *)>;
48
49template <typename TIn, typename TOut>
50using RavelledKernFn = std::function<void(const TIn *const *, TOut *const *, const void *, uint64_t, const arm_gemm::Requantize32 &)>;
51
52template <typename TIn, typename TWeight, typename TOut>
53const UnravelledKernFn<TIn, TWeight, TOut> get_unified_kernel(const UnravelledKernFn<TIn, TWeight, TOut> &f) { return f; }
54
55template <typename TIn, typename TWeight, typename TOut>
56const UnravelledKernFn<TIn, TWeight, TOut> get_unified_kernel(const RavelledKernFn<TIn, TOut> &f)
57{
58 return [f] (const unsigned int n_channels,
59 const TIn *const *const inptrs,
60 const TWeight *const weights,
61 const int32_t *, // Bias (ravelled)
62 const arm_gemm::Requantize32 &qp,
63 const int32_t *, // Requantisation muls (ravelled)
64 const int32_t *, // Requantisation shifts (ravelled)
65 TOut *const *const outptrs) {
66 return f(inptrs, outptrs, weights, n_channels, qp);
67 };
68}
69
70template <typename T>
71using UnravelledPackingFn = std::function<void(unsigned int, void *, const T *, size_t, size_t)>;
72
73template <typename T>
74using RavelledPackingFn = std::function<void(unsigned int, void *, const int32_t *, const T *, const arm_gemm::Requantize32 &, size_t, size_t)>;
75
76template <typename T>
77const RavelledPackingFn<T> get_unified_packer(const UnravelledPackingFn<T> &f)
78{
79 return [f] (const unsigned int n_channels,
80 void *buffer,
81 const int32_t *, // Bias
82 const T *weights,
83 const arm_gemm::Requantize32 &,
84 size_t ld_weight_col,
85 size_t ld_weight_row)
86 {
87 return f(n_channels, buffer, weights, ld_weight_col, ld_weight_row);
88 };
89}
90
91template <typename T>
92const RavelledPackingFn<T> get_unified_packer(const RavelledPackingFn<T> &f) { return f; }
93
94template <typename T>
95constexpr bool requires_unravelled_bias_and_quant_params(const UnravelledPackingFn<T> &) { return true; }
96
97template <typename T>
98constexpr bool requires_unravelled_bias_and_quant_params(const RavelledPackingFn<T> &) { return false; }
99
100template <class strategy>
101constexpr bool strategy_requires_unravelled_bias_and_quant_params(void)
102{
103 return requires_unravelled_bias_and_quant_params<typename strategy::weight_type>(strategy::pack_parameters);
104}
105
106}
107
108template <class strategy>
109class DepthwiseDepthfirstQuantized :
110 public DepthwiseCommon<typename strategy::input_type,
111 typename strategy::weight_type,
112 typename strategy::return_type>
113{
114 using TInput = typename strategy::input_type;
115 using TWeight = typename strategy::weight_type;
116 using TOutput = typename strategy::return_type;
117 using TAccum = typename strategy::bias_type;
118
119 arm_gemm::Requantize32 m_qp;
120
121 size_t sizeof_input_buffer(unsigned int n_channels) const
122 {
123 const unsigned int vl = arm_gemm::utils::get_vector_length<TInput>(strategy::vl_type);
124 const auto rounded_channels = arm_gemm::roundup(n_channels, vl);
125 return sizeof(TInput) * rounded_channels;
126 }
127
128 size_t sizeof_output_buffer(unsigned int n_channels) const
129 {
130 const unsigned int vl = arm_gemm::utils::get_vector_length<TOutput>(strategy::vl_type);
131 const auto rounded_channels = arm_gemm::roundup(n_channels, vl);
132 return sizeof(TOutput) * rounded_channels;
133 }
134
135 size_t sizeof_bias_buffer(unsigned int n_channels) const
136 {
137 if (strategy_requires_unravelled_bias_and_quant_params<strategy>())
138 {
139 return (m_qp.bias == nullptr) ? sizeof(TAccum) * n_channels : 0;
140 }
141
142 return 0;
143 }
144
145 size_t sizeof_requant_mul_buffer(unsigned int n_channels) const
146 {
147 if (strategy_requires_unravelled_bias_and_quant_params<strategy>())
148 {
149 return m_qp.per_channel_requant ? 0 : sizeof(int32_t) * n_channels;
150 }
151
152 return 0;
153 }
154
155 size_t sizeof_requant_shift_buffer(unsigned int n_channels) const
156 {
157 if (strategy_requires_unravelled_bias_and_quant_params<strategy>())
158 {
159 return m_qp.per_channel_requant ? 0 : sizeof(int32_t) * n_channels;
160 }
161
162 return 0;
163 }
164
165 public:
166 DepthwiseDepthfirstQuantized(const DepthwiseArgs &args, const arm_gemm::Requantize32 &qp)
167 : DepthwiseCommon<TInput, TWeight, TOutput>(args), m_qp(qp)
168 {
169 }
170
171 DepthwiseDepthfirstQuantized(DepthwiseDepthfirstQuantized &) = delete;
172 DepthwiseDepthfirstQuantized &operator=(DepthwiseDepthfirstQuantized &) = delete;
173
174 size_t get_storage_size(void) const override
175 {
176 return strategy::get_packed_size(this->m_args);
177 }
178
179 void pack_parameters(void *buffer, const void *const bias, const void *weights, size_t ld_weight_col, size_t ld_weight_row) override
180 {
181 if (strategy_requires_unravelled_bias_and_quant_params<strategy>())
182 {
183 m_qp.bias = static_cast<const int32_t *>(bias);
184 }
185
186 get_unified_packer<TWeight>(strategy::pack_parameters)(
187 this->m_args.input_channels,
188 buffer,
189 static_cast<const int32_t *>(bias),
190 reinterpret_cast<const TWeight *>(weights),
191 m_qp,
192 ld_weight_col,
193 ld_weight_row
194 );
195 }
196
197 size_t get_working_size(const unsigned int n_threads, const unsigned int n_channels) const override
198 {
199 const unsigned int n_output_channels = n_channels * this->m_args.channel_multiplier;
200 return n_threads * (
201 sizeof_output_buffer(n_output_channels) +
202 sizeof_input_buffer(n_channels) +
203 sizeof_bias_buffer(n_channels) +
204 sizeof_requant_mul_buffer(n_channels) +
205 sizeof_requant_shift_buffer(n_channels)
206 );
207 }
208
209 using DepthwiseCommon<typename strategy::input_type, typename strategy::weight_type, typename strategy::return_type>::execute;
210 void execute(
211 const unsigned int batches,
212 const unsigned int input_height,
213 const unsigned int input_width,
214 const unsigned int input_channels,
215 const PaddingValues &padding,
216 const void *const _input,
217 const size_t ld_input_col,
218 const size_t ld_input_row,
219 const size_t ld_input_batch,
220 const void *const parameters,
221 const unsigned int output_height,
222 const unsigned int output_width,
223 void *const _output,
224 const size_t ld_output_col,
225 const size_t ld_output_row,
226 const size_t ld_output_batch,
227 void *_working_space,
228 const unsigned int thread_id,
229 const unsigned int n_threads
230 ) const override
231 {
232 strategy strat(this->m_args.cpu_info);
233#ifdef CYCLE_PROFILING
234 arm_gemm::profiler prof;
235#endif
236 // Get a unified API for the kernel function
237 auto kernel = get_unified_kernel<TInput, TWeight, TOutput>(strat.kernel);
238
239 // Determine what portion of the work to do.
240 const unsigned int n_rows_per_thread = arm_gemm::iceildiv(output_height, n_threads);
241 const int start_out_height = std::min(thread_id * n_rows_per_thread, output_height);
242 const int end_out_height = std::min(start_out_height + n_rows_per_thread, output_height);
243
244 // Cast input and output pointers into the right types
245 const TInput *const inptr = static_cast<const TInput *>(_input);
246 TOutput *const outptr = static_cast<TOutput *>(_output);
247
248 // Create an array for the input pointers
249 const TInput * _inptr_array[strategy::input_rows * strategy::input_cols];
250 const TInput **const inptr_array = _inptr_array;
251
252 // Create an array for the output pointers
253 TOutput * _outptr_array[strategy::output_rows * strategy::output_cols];
254 TOutput **const outptr_array = _outptr_array;
255
256 // Allocate portions of the working space
257 uint8_t *working_space = static_cast<uint8_t *>(_working_space) + get_working_size(thread_id, input_channels);
258
259 TOutput *const output_buffer = reinterpret_cast<TOutput *>(working_space);
260 working_space += sizeof_output_buffer(input_channels * this->m_args.channel_multiplier);
261
262 TInput *const input_buffer = reinterpret_cast<TInput *>(working_space);
263 working_space += sizeof_input_buffer(input_channels);
264
265 const int32_t *const bias_ptr = (m_qp.bias == nullptr) ? reinterpret_cast<int32_t *>(working_space)
266 : m_qp.bias;
267 working_space += sizeof_bias_buffer(input_channels * this->m_args.channel_multiplier);
268
269 const int32_t *const requant_mul_vec = !m_qp.per_channel_requant ? reinterpret_cast<int32_t *>(working_space)
270 : m_qp.per_channel_muls;
271 working_space += sizeof_requant_mul_buffer(input_channels * this->m_args.channel_multiplier);
272
273 const int32_t *const requant_shift_vec = !m_qp.per_channel_requant ? reinterpret_cast<int32_t *>(working_space)
274 : m_qp.per_channel_right_shifts;
275
276 if (strategy_requires_unravelled_bias_and_quant_params<strategy>())
277 {
278 // Initialise the bias buffer
279 if (m_qp.bias == nullptr)
280 {
281 for (unsigned int c = 0; c < input_channels * this->m_args.channel_multiplier; c++)
282 {
283 const_cast<int32_t *>(bias_ptr)[c] = 0;
284 }
285 }
286
287 // Initialise the requantisation parameters
288 if (!m_qp.per_channel_requant)
289 {
290 for (unsigned int c = 0; c < input_channels * this->m_args.channel_multiplier; c++)
291 {
292 const_cast<int32_t *>(requant_mul_vec)[c] = m_qp.per_layer_mul;
293 const_cast<int32_t *>(requant_shift_vec)[c] = m_qp.per_layer_right_shift;
294 }
295 }
296 }
297
298 // Initialise the input buffer
299 for (unsigned int c = 0; c < input_channels; c++)
300 {
301 input_buffer[c] = static_cast<TInput>(m_qp.a_offset);
302 }
303
304 // For each output tile, construct the requisite set of pointers and call
305 // into the kernel.
306 for (unsigned int batch = 0; batch < batches; batch++)
307 {
308 // Get batch pointers
309 const auto inptr_batch = inptr + batch * ld_input_batch;
310 const auto outptr_batch = outptr + batch * ld_output_batch;
311
312 for (int start_out_i = start_out_height;
313 start_out_i < end_out_height;
314 start_out_i += static_cast<int>(strategy::output_rows))
315 {
316 const int end_out_i = start_out_i + strategy::output_rows;
317 const int start_in_i = start_out_i * strategy::stride_rows - padding.top;
318 const int end_in_i = start_in_i + strategy::input_rows;
319
320 // Compute top/bottom padding
321 const auto pad_top = static_cast<unsigned int>(-std::min(start_in_i, 0));
322 const auto pad_bottom = static_cast<unsigned int>(-std::min(static_cast<int>(input_height) - end_in_i, 0));
323 const unsigned int valid_output_rows = std::min(
324 end_out_i - start_out_i,
325 static_cast<int>(output_height) - start_out_i
326 );
327
328 // Fill the input pointer array with padding values
329 for (auto index = 0u; index < strategy::input_rows * strategy::input_cols; index++)
330 {
331 inptr_array[index] = input_buffer;
332 }
333
334 for (int start_out_j = 0; start_out_j < static_cast<int>(output_width);)
335 {
336 const int start_in_j = start_out_j * strategy::stride_cols - this->m_args.padding.left;
337 const int pad_left = -std::min(0, start_in_j);
338
339 const int end_out_j = start_out_j + strategy::output_cols;
340 const int end_in_j = start_in_j + strategy::input_cols;
341
342 const auto pad_right = static_cast<unsigned int>(-std::min(static_cast<int>(input_width) - end_in_j, 0));
343 const unsigned int valid_output_cols = std::min(
344 end_out_j - start_out_j,
345 static_cast<int>(output_width) - start_out_j
346 );
347
348 // Construct the input pointer array - fill the array with pointers to
349 // the input buffer and then fill in the required values.
350 for (auto i = pad_top; i < strategy::input_rows - pad_bottom; i++)
351 {
352 // Can skip over the left padding because we will have either the
353 // same or less than the previous tile.
354 unsigned int j = pad_left;
355 const TInput *colptr = inptr_batch + (start_in_i + i) * ld_input_row + (start_in_j + j) * ld_input_col;
356 const TInput **ptrs = inptr_array + i * strategy::input_cols + j;
357 for (; j < strategy::input_cols - pad_right; j++)
358 {
359 *(ptrs++) = colptr;
360 colptr += ld_input_col;
361 }
362 for (; j < strategy::input_cols; j++)
363 {
364 *(ptrs++) = input_buffer;
365 }
366 }
367
368 // Construct the output pointer array.
369 TOutput **outptr_pos = outptr_array;
370 for (auto i = 0u; i < valid_output_rows; i++)
371 {
372 unsigned int j = 0u;
373 TOutput *colptr = outptr_batch + (start_out_i + i) * ld_output_row + start_out_j * ld_output_col;
374 for (; j < valid_output_cols; j++)
375 {
376 *(outptr_pos++) = colptr;
377 colptr += ld_output_col;
378 }
379 for (; j < strategy::output_cols; j++)
380 {
381 *(outptr_pos++) = output_buffer;
382 }
383 }
384 for (auto i = valid_output_rows; i < strategy::output_rows; i++)
385 {
386 for (auto j = 0u; j < strategy::output_cols; j++)
387 {
388 *(outptr_pos++) = output_buffer;
389 }
390 }
391
392 start_out_j += strategy::output_cols;
393
394#ifdef CYCLE_PROFILING
395 // TODO Work number
396 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(strategy::output_rows * strategy::output_cols * this->m_args.kernel_rows * this->m_args.kernel_cols));
397#endif
398 kernel(
399 this->m_args.input_channels,
400 inptr_array,
401 reinterpret_cast<const TWeight *>(parameters),
402 bias_ptr, m_qp, requant_mul_vec, requant_shift_vec,
403 outptr_array
404 );
405 }
406 }
407 }
408 }
409};
410
411} // namespace depthwise
412} // namespace arm_conv