blob: 69d8cc68517570f4fc4d5527d99021f16db13803 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Pablo Tello9ceebbe2018-01-10 16:44:13 +00002 * Copyright (c) 2017-2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__
25#define __ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__
26
27#include "arm_compute/core/NEON/INEKernel.h"
Georgios Pinitas4074c992018-01-30 18:13:46 +000028#include "arm_compute/core/NEON/kernels/convolution/common/convolution.hpp"
29#include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp"
30#include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp"
31#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
Pablo Tello89519332017-11-17 11:52:36 +000032
33namespace arm_compute
34{
35class ITensor;
Pablo Tello02541fb2017-12-15 09:48:59 +000036
Alex Gildayc357c472018-03-21 13:54:09 +000037/** Interface for the NEON kernel to perform Winograd input transform. */
Pablo Tellof6c572c2018-02-14 12:47:30 +000038template <typename T>
39class INEWinogradLayerTransformInputKernel : public INEKernel
Pablo Tello3d4968a2017-12-04 15:03:35 +000040{
41public:
Pablo Tello52140b42018-01-30 14:48:11 +000042 /** Determine how much memory (in units of TIn) to allocate for the
43 * transformed input.
Pablo Tello6c6e77a2018-01-23 10:03:27 +000044 *
Pablo Tello52140b42018-01-30 14:48:11 +000045 * @param[in] n_batches Number of batches in the input tensor.
46 * @param[in] n_channels Number of feature maps in the input tensor.
47 * @param[in] n_rows Number of rows in each feature map.
48 * @param[in] n_cols Number of columns in each feature map.
49 * @param[in] same_padding Use "SAME" padding, otherwise use "VALID".
Alex Gildayc357c472018-03-21 13:54:09 +000050 *
51 * @return Storage size (in units of TIn) required.
Pablo Tello6c6e77a2018-01-23 10:03:27 +000052 */
Pablo Tellof6c572c2018-02-14 12:47:30 +000053 virtual unsigned int get_input_storage_size(int n_batches, int n_channels, int n_rows, int n_cols, bool same_padding) const = 0;
54
55 /** Gets the stride between matrices in the input worspace
56 *
57 * @param[in] kernel_shape The shape of the weights tensor.
58 * @param[in] input_shape The shape of the input tensor.
59 * @param[in] padding_type The type of padding to be used.
60 *
61 * @return Stride expressed in bytes.
62 */
63 virtual int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const = 0;
64
65 /** Configure the output transform kernel.
66 *
67 * @param[in] input Input tensor data
68 * @param[in] n_batches Number of batches in input tensor.
69 * @param[in] n_rows Number of rows in input tensor.
70 * @param[in] n_cols Number of columns in input tensor.
71 * @param[in] n_channels Number of channels in input tensor.
72 * @param[in] padding Padding type.
73 * @param[out] output Base of output matrices.
74 * @param[in] matrix_stride Stride between output matrices.
75 */
76 virtual void configure(const T *const input, const int n_batches, const int n_rows, const int n_cols, const int n_channels, const PaddingType padding, T *const output, const int matrix_stride) = 0;
77
Alex Gildayc357c472018-03-21 13:54:09 +000078 /** Destructor */
Pablo Tellof6c572c2018-02-14 12:47:30 +000079 virtual ~INEWinogradLayerTransformInputKernel()
80 {
81 }
82};
83
Alex Gildayc357c472018-03-21 13:54:09 +000084/** NEON kernel to perform Winograd input transform. */
Pablo Tellof6c572c2018-02-14 12:47:30 +000085template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
86class NEWinogradLayerTransformInputKernel : public INEWinogradLayerTransformInputKernel<T>
87{
88public:
89 /** Determine how much memory (in units of TIn) to allocate for the
90 * transformed input.
91 *
92 * @param[in] n_batches Number of batches in the input tensor.
93 * @param[in] n_channels Number of feature maps in the input tensor.
94 * @param[in] n_rows Number of rows in each feature map.
95 * @param[in] n_cols Number of columns in each feature map.
96 * @param[in] same_padding Use "SAME" padding, otherwise use "VALID".
Alex Gildayc357c472018-03-21 13:54:09 +000097 *
98 * @return Storage size (in units of TIn) required.
Pablo Tellof6c572c2018-02-14 12:47:30 +000099 */
100 unsigned int get_input_storage_size(
Pablo Tello52140b42018-01-30 14:48:11 +0000101 int n_batches,
102 int n_channels,
103 int n_rows,
104 int n_cols,
Pablo Tellof6c572c2018-02-14 12:47:30 +0000105 bool same_padding) const override;
106
107 /** Gets the stride between matrices in the input worspace
108 *
109 * @param[in] kernel_shape The shape of the weights tensor.
110 * @param[in] input_shape The shape of the input tensor.
111 * @param[in] padding_type The type of padding to be used.
112 *
113 * @return Stride expressed in bytes.
114 */
115 int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const override;
Pablo Tellod6ca4782018-01-23 09:36:04 +0000116
Alex Gildayc357c472018-03-21 13:54:09 +0000117 /** Default constructor */
Pablo Tello52140b42018-01-30 14:48:11 +0000118 NEWinogradLayerTransformInputKernel();
Pablo Tellof6c572c2018-02-14 12:47:30 +0000119
Pablo Tellod6ca4782018-01-23 09:36:04 +0000120 const char *name() const override
121 {
122 return "NEWinogradLayerTransformInputKernel";
123 }
Pablo Tello52140b42018-01-30 14:48:11 +0000124
125 /** Configure the output transform kernel.
126 *
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +0100127 * @param[in] input Input tensor data. Data types supported: F32.
Pablo Tello52140b42018-01-30 14:48:11 +0000128 * @param[in] n_batches Number of batches in input tensor.
129 * @param[in] n_rows Number of rows in input tensor.
130 * @param[in] n_cols Number of columns in input tensor.
131 * @param[in] n_channels Number of channels in input tensor.
132 * @param[in] padding Padding type.
133 * @param[out] output Base of output matrices.
134 * @param[in] matrix_stride Stride between output matrices.
135 */
136 void configure(
Pablo Tellof6c572c2018-02-14 12:47:30 +0000137 const T *const input,
138 const int n_batches,
139 const int n_rows,
140 const int n_cols,
141 const int n_channels,
142 const PaddingType padding,
143 T *const output,
144 const int matrix_stride) override;
Pablo Tello52140b42018-01-30 14:48:11 +0000145
Pablo Tellod6ca4782018-01-23 09:36:04 +0000146 // Inherited methods overridden:
Pablo Tellod6ca4782018-01-23 09:36:04 +0000147 void run(const Window &window, const ThreadInfo &info) override;
148 bool is_parallelisable() const override;
Pablo Tello52140b42018-01-30 14:48:11 +0000149
Alex Gildayc357c472018-03-21 13:54:09 +0000150 /** Winograd base kernel */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000151 using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelCols, KernelCols>;
Alex Gildayc357c472018-03-21 13:54:09 +0000152 /** Winograd convolution kernel */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000153 using WinogradConv = typename WinogradBase::template Convolution<T, T>;
154
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +0100155 /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformInputKernel
156 *
157 * @param[in] input First tensor input info. Data types supported: F32.
158 * @param[in] output Output tensor info. Data types supported: same as @p input.
159 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
160 * @param[in] kernel_dims Kernel dimensions. Currently only 3x3 and 5x5 kernels are supported
161 *
162 * @return a status
163 */
164 static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
165
Pablo Tello52140b42018-01-30 14:48:11 +0000166private:
Pablo Tellof6c572c2018-02-14 12:47:30 +0000167 using InputTransform = typename WinogradBase::template InputTransform<T>;
Pablo Tello52140b42018-01-30 14:48:11 +0000168 std::unique_ptr<InputTransform> _transform;
Pablo Tellod6ca4782018-01-23 09:36:04 +0000169};
170
Alex Gildayc357c472018-03-21 13:54:09 +0000171/** Interface for the NEON kernel to perform Winograd output transform. */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000172template <typename T>
173class INEWinogradLayerTransformOutputKernel : public INEKernel
Pablo Tellod6ca4782018-01-23 09:36:04 +0000174{
175public:
Pablo Tello52140b42018-01-30 14:48:11 +0000176 /** Determine how much memory (in units of TOut) to allocate for the
177 * (Winograd domain) output.
178 *
179 * @param[in] n_batches Number of batches in the output tensor.
180 * @param[in] n_rows Number of rows in each feature map of the input tensor.
181 * @param[in] n_cols Number of columns in each feature map of the input tensor.
182 * @param[in] n_output_channels Number of feature maps in the output tensor.
183 * @param[in] same_padding Use "SAME" padding, otherwise use "VALID".
Alex Gildayc357c472018-03-21 13:54:09 +0000184 *
185 * @return Storage size (in units of TOut) required.
Pablo Tello52140b42018-01-30 14:48:11 +0000186 */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000187 virtual unsigned int get_output_storage_size(int n_batches, int n_rows, int n_cols, int n_output_channels, bool same_padding) const = 0;
Pablo Tello52140b42018-01-30 14:48:11 +0000188
Pablo Tellof6c572c2018-02-14 12:47:30 +0000189 /** Gets the stride between matrices in the output worspace
190 *
191 * @param[in] kernel_shape The shape of the weights tensor.
192 * @param[in] input_shape The shape of the input tensor.
193 * @param[in] padding_type The type of padding to be used.
194 *
195 * @return Stride expressed in bytes.
196 */
197 virtual int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const = 0;
198
199 /** Get the output shape of a convolution.
200 *
201 * @param[in] kernel_shape The shape of the weights tensor.
202 * @param[in] in_shape The shape of the input tensor.
203 * @param[in] padding The type of padding to be used.
204 *
205 * @return Stride expressed in bytes.
206 */
207 virtual Tensor4DShape get_output_shape(const KernelShape &kernel_shape, const Tensor4DShape &in_shape, const PaddingType padding) const = 0;
208
209 /** Configure the output transform kernel.
210 *
211 * @param[in] biases Pointer to the biases tensor.
212 * @param[in] output_workingspace Pointer to working space for the output tensor in the Winograd domain.
213 * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride()
214 * @param[out] output Pointer to NHWC ordered output tensor, in the spatial domain.
215 * @param[in] n_batches Number of batches in the input tensor.
216 * @param[in] n_rows Number of rows in output tensor.
217 * @param[in] n_cols Number of columns in output tensor.
218 * @param[in] n_channels Number of feature maps in the output tensor.
219 */
220 virtual void configure(
221 const ITensor *biases,
222 const T *const output_workingspace,
223 const int matrix_stride,
224 T *const output,
225 const int n_batches,
226 const int n_rows,
227 const int n_cols,
228 const int n_channels) = 0;
229
230 virtual ~INEWinogradLayerTransformOutputKernel()
231 {
232 }
233};
234
Alex Gildayc357c472018-03-21 13:54:09 +0000235/** NEON kernel to perform Winograd output transform. */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000236template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
237class NEWinogradLayerTransformOutputKernel : public INEWinogradLayerTransformOutputKernel<T>
238{
239public:
Pablo Tellod6ca4782018-01-23 09:36:04 +0000240 const char *name() const override
241 {
242 return "NEWinogradLayerTransformOutputKernel";
243 }
244 /** Constructor */
245 NEWinogradLayerTransformOutputKernel();
246
247 /** Prevent instances of this class from being copied (As this class contains pointers) */
248 NEWinogradLayerTransformOutputKernel(const NEWinogradLayerTransformOutputKernel &) = delete;
249 /** Prevent instances of this class from being copied (As this class contains pointers) */
250 NEWinogradLayerTransformOutputKernel &operator=(const NEWinogradLayerTransformOutputKernel &) = delete;
251 /** Allow instances of this class to be moved */
252 NEWinogradLayerTransformOutputKernel(NEWinogradLayerTransformOutputKernel &&) = default;
253 /** Allow instances of this class to be moved */
254 NEWinogradLayerTransformOutputKernel &operator=(NEWinogradLayerTransformOutputKernel &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000255 /** Default destructor */
Pablo Tellod6ca4782018-01-23 09:36:04 +0000256 ~NEWinogradLayerTransformOutputKernel() = default;
257
Pablo Tellof6c572c2018-02-14 12:47:30 +0000258 // Inherited methods overridden:
259 /** Determine how much memory (in units of TOut) to allocate for the
260 * (Winograd domain) output.
261 *
262 * @param[in] n_batches Number of batches in the output tensor.
263 * @param[in] n_rows Number of rows in each feature map of the input tensor.
264 * @param[in] n_cols Number of columns in each feature map of the input tensor.
265 * @param[in] n_output_channels Number of feature maps in the output tensor.
266 * @param[in] same_padding Use "SAME" padding, otherwise use "VALID".
Alex Gildayc357c472018-03-21 13:54:09 +0000267 *
268 * @return Storage size (in units of TOut) required.
Pablo Tellof6c572c2018-02-14 12:47:30 +0000269 */
270 unsigned int get_output_storage_size(int n_batches, int n_rows, int n_cols, int n_output_channels, bool same_padding) const override;
271
272 /** Gets the stride between matrices in the output worspace
273 *
274 * @param[in] kernel_shape The shape of the weights tensor.
275 * @param[in] input_shape The shape of the input tensor.
276 * @param[in] padding_type The type of padding to be used.
277 *
278 * @return Stride expressed in bytes.
279 */
280 int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const override;
281 /** Get the output shape of a convolution.
282 *
283 * @param[in] kernel_shape The shape of the weights tensor.
284 * @param[in] in_shape The shape of the input tensor.
285 * @param[in] padding The type of padding to be used.
286 *
287 * @return Stride expressed in bytes.
288 */
289 Tensor4DShape get_output_shape(const KernelShape &kernel_shape, const Tensor4DShape &in_shape, const PaddingType padding) const override;
290
Pablo Tellod6ca4782018-01-23 09:36:04 +0000291 /** Configure the output transform kernel.
292 *
293 * @param[in] biases Pointer to the biases tensor.
294 * @param[in] output_workingspace Pointer to working space for the output tensor in the Winograd domain.
295 * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride()
296 * @param[out] output Pointer to NHWC ordered output tensor, in the spatial domain.
297 * @param[in] n_batches Number of batches in the input tensor.
298 * @param[in] n_rows Number of rows in output tensor.
299 * @param[in] n_cols Number of columns in output tensor.
300 * @param[in] n_channels Number of feature maps in the output tensor.
301 */
302 void configure(
Pablo Tellof6c572c2018-02-14 12:47:30 +0000303 const ITensor *biases,
304 const T *const output_workingspace,
305 const int matrix_stride,
306 T *const output,
307 const int n_batches,
308 const int n_rows,
309 const int n_cols,
310 const int n_channels) override;
Pablo Tellod6ca4782018-01-23 09:36:04 +0000311
Pablo Tellod6ca4782018-01-23 09:36:04 +0000312 void run(const Window &window, const ThreadInfo &info) override;
313 bool is_parallelisable() const override;
314
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +0100315 /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformOutputKernel
316 *
317 * @param[in] input Source tensor with shape [C, N, 16, batches] or [C, N, 36, batches]. Data types supported: F32.
318 * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input
319 * @param[out] output Destination tensor with shape [output_convolved_dims.width, output_convolved_dims.height, C, batches]. Data type supported: same as @p input
320 * @param[in] kernel_dims Kernel dimensions (Width and height). Currently only supported 3x3 and 5x5 kernels
321 * @param[in] output_convolved_dims Output dimensions after the convolution (Width and height)
322 * @param[in] num_tiles Number of tiles of size 2x2 or 4x4 in the output tensor along the X and Y direction
323 *
324 * @return a status
325 */
326 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const Size2D &kernel_dims, const Size2D &output_convolved_dims, const Size2D &num_tiles);
327
Pablo Tellod6ca4782018-01-23 09:36:04 +0000328private:
Pablo Tello52140b42018-01-30 14:48:11 +0000329 using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000330 using WinogradConv = typename WinogradBase::template Convolution<T, T>;
331 using OutputTransform = typename WinogradBase::template OutputTransform<T>;
Pablo Tello52140b42018-01-30 14:48:11 +0000332
Pablo Tellod6ca4782018-01-23 09:36:04 +0000333 const ITensor *_biases;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000334 const T *_output_workspace;
Pablo Tellod6ca4782018-01-23 09:36:04 +0000335 int _matrix_stride;
336 int _matrix_row_stride;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000337 T *_output;
Pablo Tellod6ca4782018-01-23 09:36:04 +0000338 int _n_batches;
339 int _n_rows;
340 int _n_cols;
341 int _n_channels;
342};
343
Alex Gildayc357c472018-03-21 13:54:09 +0000344/** Interface for the NEON kernel to perform Winograd weights transform. */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000345template <typename T>
346class INEWinogradLayerTransformWeightsKernel : public INEKernel
Pablo Tellod6ca4782018-01-23 09:36:04 +0000347{
348public:
Pablo Tellof6c572c2018-02-14 12:47:30 +0000349 /** Determine how much memory (in units of T) to allocate for the
Pablo Tello52140b42018-01-30 14:48:11 +0000350 * transformed weights.
351 *
352 * @param[in] n_output_channels Number of output feature maps.
353 * @param[in] n_input_channels Number of input feature maps.
Alex Gildayc357c472018-03-21 13:54:09 +0000354 *
355 * @return Storage size (in units of T) required.
Pablo Tello52140b42018-01-30 14:48:11 +0000356 */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000357 virtual unsigned int get_weight_storage_size(int n_output_channels, int n_input_channels) const = 0;
358 /** Gets the stride between matrices in the kernel worspace
359 *
360 * @param[in] kernel_shape The shape of the weights tensor.
361 *
362 * @return Stride expressed in bytes.
363 */
364 virtual int get_matrix_stride(const KernelShape &kernel_shape) const = 0;
Pablo Tello52140b42018-01-30 14:48:11 +0000365
Pablo Tellof6c572c2018-02-14 12:47:30 +0000366 /** Configure the weights transform kernel.
Pablo Tello52140b42018-01-30 14:48:11 +0000367 *
368 * @param[in] weights_hwio Pointer to the weights tensor
369 * @param[in] output Pointer to working space for the output tensor in the Winograd domain.
370 * @param[in] matrix_stride Stride across matrices in the output workspace.
371 * @param[in] n_output_channels Number of filters.
372 * @param[in] n_input_channels Number of channels in each filter.
373 */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000374 virtual void configure(const ITensor *weights_hwio, T *const output, const int matrix_stride, const int n_output_channels, const int n_input_channels) = 0;
375
376 virtual ~INEWinogradLayerTransformWeightsKernel()
377 {
378 }
379};
380
Alex Gildayc357c472018-03-21 13:54:09 +0000381/** NEON kernel to perform Winograd weights transform. */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000382template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
383class NEWinogradLayerTransformWeightsKernel final : public INEWinogradLayerTransformWeightsKernel<T>
384{
385public:
Alex Gildayc357c472018-03-21 13:54:09 +0000386 /** Default constructor. */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000387 NEWinogradLayerTransformWeightsKernel();
388 const char *name() const override
389 {
390 return "NEWinogradLayerTransformWeightsKernel";
391 }
Pablo Tello52140b42018-01-30 14:48:11 +0000392
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +0100393 /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformWeightsKernel
394 *
395 * @param[in] input Source tensor info. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout).
396 * kernel_x must be 3 and equal to kernel_y. Data types supported: F32.
397 * @param[in] output Destination tensor info. The output is a 3D tensor with dimensions [OFM, IFM, 16] or [OFM, IFM, 36]. Data type supported: same as @p input
398 * @param[in] output_tile Output tile. Currently only 2x2 and 4x4 tiles are supported.
399 *
400 * @return a status
401 */
402 static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &output_tile);
403
Pablo Tellod6ca4782018-01-23 09:36:04 +0000404 // Inherited methods overridden:
Pablo Tellof6c572c2018-02-14 12:47:30 +0000405 void configure(const ITensor *weights_hwio, T *const output, const int matrix_stride, const int n_output_channels, const int n_input_channels) override;
406 unsigned int get_weight_storage_size(int n_output_channels, int n_input_channels) const override;
407 int get_matrix_stride(const KernelShape &kernel_shape) const override;
Pablo Tellod6ca4782018-01-23 09:36:04 +0000408 void run(const Window &window, const ThreadInfo &info) override;
409 bool is_parallelisable() const override;
Pablo Tello52140b42018-01-30 14:48:11 +0000410
411private:
412 using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000413 using WinogradConv = typename WinogradBase::template Convolution<T, T>;
414 using WeightsTransform = typename WinogradBase::template WeightsTransform<T>;
Pablo Tello52140b42018-01-30 14:48:11 +0000415 std::unique_ptr<WeightsTransform> _transform;
Pablo Tellod6ca4782018-01-23 09:36:04 +0000416};
417
Alex Gildayc357c472018-03-21 13:54:09 +0000418/** Interface for the NEON kernel to perform Winograd. */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000419template <typename TIn, typename TOut>
420class INEWinogradLayerBatchedGEMMKernel : public INEKernel
421{
422public:
423 /** Get the number of GEMMs to compute
424 */
425 virtual unsigned int get_number_gemms() const = 0;
426 /** Initialise the kernel
427 *
428 * @param[in] n_gemms Number of GEMMs to compute.
429 * @param[in] M in_shape.n_batches * tile_rows * tile_cols.
430 * @param[in] K Number of channels in the input tensor.
431 * @param[in] N Number of channels in the output tensor.
432 * @param[in] a_matrix_stride Stride between input matrices.
433 * @param[in] a_row_stride Row stride inside input matrix.
434 * @param[in] b_matrix_stride Stride between weights matrices.
435 * @param[in] b_row_stride Row stride inside the weights matrix.
436 * @param[in] c_matrix_stride Stride between output matrices.
437 * @param[in] c_row_stride Row stride inside the output matrix.
438 * @param[out] a_ptr Input workspace.
439 * @param[out] b_ptr Kernel workspace.
440 * @param[out] c_ptr Output workspace.
441 */
442 virtual void configure(
443 const unsigned int n_gemms,
444 const int M, const int K, const int N,
445 const int a_matrix_stride,
446 const int a_row_stride,
447 const int b_matrix_stride,
448 const int b_row_stride,
449 const int c_matrix_stride,
450 const int c_row_stride,
451 const TIn *const a_ptr,
452 const TIn *const b_ptr,
453 TOut *const c_ptr) = 0;
454
455 /** Get the number of tiles per row
456 */
457 virtual int get_output_tile_rows() const = 0;
458 /** Get the number of tiles per columns
459 */
460 virtual int get_output_tile_cols() const = 0;
461 /** Get the number of blocks
462 */
463 virtual int get_number_blocks() const = 0;
464};
465
Alex Gildayc357c472018-03-21 13:54:09 +0000466/** NEON kernel to perform Winograd. */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000467template <typename TIn, typename TOut, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
468class NEWinogradLayerBatchedGEMMKernel : public INEWinogradLayerBatchedGEMMKernel<TIn, TOut>
Pablo Tello89519332017-11-17 11:52:36 +0000469{
470public:
Alex Gildayc357c472018-03-21 13:54:09 +0000471 /** Winograd base kernel */
Pablo Tello52140b42018-01-30 14:48:11 +0000472 using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>;
Alex Gildayc357c472018-03-21 13:54:09 +0000473 /** Winograd convolution kernel */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000474 using WinogradConv = typename WinogradBase::template Convolution<TIn, TOut>;
Alex Gildayc357c472018-03-21 13:54:09 +0000475 /** Winograd batched blocked GEMM operator */
476 using MultiGEMM = winograd::BatchedBlockedGemm<WinogradConv::M_BLOCK, WinogradConv::N_BLOCK, TIn, TOut>;
Pablo Tello52140b42018-01-30 14:48:11 +0000477
Anthony Barbiere8a49832018-01-18 10:04:05 +0000478 const char *name() const override
479 {
Pablo Tellof6c572c2018-02-14 12:47:30 +0000480 return "NEWinogradLayerBatchedGEMMKernel";
Anthony Barbiere8a49832018-01-18 10:04:05 +0000481 }
Pablo Tello89519332017-11-17 11:52:36 +0000482 /** Constructor */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000483 NEWinogradLayerBatchedGEMMKernel();
Pablo Tello89519332017-11-17 11:52:36 +0000484
485 /** Prevent instances of this class from being copied (As this class contains pointers) */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000486 NEWinogradLayerBatchedGEMMKernel(const NEWinogradLayerBatchedGEMMKernel &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000487 /** Prevent instances of this class from being copied (As this class contains pointers) */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000488 NEWinogradLayerBatchedGEMMKernel &operator=(const NEWinogradLayerBatchedGEMMKernel &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000489 /** Allow instances of this class to be moved */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000490 NEWinogradLayerBatchedGEMMKernel(NEWinogradLayerBatchedGEMMKernel &&) = default;
Pablo Tello89519332017-11-17 11:52:36 +0000491 /** Allow instances of this class to be moved */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000492 NEWinogradLayerBatchedGEMMKernel &operator=(NEWinogradLayerBatchedGEMMKernel &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000493 /** Default destructor. */
Pablo Tellof6c572c2018-02-14 12:47:30 +0000494 ~NEWinogradLayerBatchedGEMMKernel() = default;
495
496 // Inherited methods overridden:
497
498 unsigned int get_number_gemms() const override;
499 int get_output_tile_rows() const override;
500 int get_output_tile_cols() const override;
501 int get_number_blocks() const override;
Pablo Tello89519332017-11-17 11:52:36 +0000502
503 /** Initialise the kernel
504 *
Pablo Tello52140b42018-01-30 14:48:11 +0000505 * @param[in] n_gemms Number of GEMMs to compute.
506 * @param[in] M in_shape.n_batches * tile_rows * tile_cols.
507 * @param[in] K Number of channels in the input tensor.
508 * @param[in] N Number of channels in the output tensor.
509 * @param[in] a_matrix_stride Stride between input matrices.
510 * @param[in] a_row_stride Row stride inside input matrix.
511 * @param[in] b_matrix_stride Stride between weights matrices.
512 * @param[in] b_row_stride Row stride inside the weights matrix.
513 * @param[in] c_matrix_stride Stride between output matrices.
514 * @param[in] c_row_stride Row stride inside the output matrix.
515 * @param[out] a_ptr Input workspace.
516 * @param[out] b_ptr Kernel workspace.
517 * @param[out] c_ptr Output workspace.
Pablo Tello89519332017-11-17 11:52:36 +0000518 */
Pablo Tello52140b42018-01-30 14:48:11 +0000519 void configure(
520 const unsigned int n_gemms,
521 const int M, const int K, const int N,
Pablo Tellof6c572c2018-02-14 12:47:30 +0000522 const int a_matrix_stride,
523 const int a_row_stride,
524 const int b_matrix_stride,
525 const int b_row_stride,
526 const int c_matrix_stride,
527 const int c_row_stride,
528 const TIn *const a_ptr,
529 const TIn *const b_ptr,
530 TOut *const c_ptr) override;
Pablo Tello89519332017-11-17 11:52:36 +0000531
Pablo Tello89519332017-11-17 11:52:36 +0000532 void run(const Window &window, const ThreadInfo &info) override;
533
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +0100534 /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerBatchedGEMMKernel.
535 *
536 * @param[in] a First input tensor (Matrix or Vector A). Data types supported: F32
537 * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a.
538 * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a.
539 * @param[out] output Output tensor. Data type supported: same as @p a
540 * @param[in] alpha Weight of the matrix product
541 * @param[in] beta Weight of matrix C
542 * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
543 * if the reshape of matrix B should happen only for the first run
544 *
545 * @return a status
546 */
547 static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensor *c, const ITensorInfo *output, const float alpha, const float beta, const GEMMInfo &gemm_info = GEMMInfo());
548
Pablo Tello52140b42018-01-30 14:48:11 +0000549private:
Alex Gildayc357c472018-03-21 13:54:09 +0000550 static const int _output_tile_rows = OutputTileRows;
551 static const int _output_tile_cols = OutputTileCols;
Pablo Tello52140b42018-01-30 14:48:11 +0000552 std::unique_ptr<MultiGEMM> _gemms;
Pablo Tello89519332017-11-17 11:52:36 +0000553};
554
555} // namespace arm_compute
556#endif /*__ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__*/