Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 1 | /* |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 2 | * Copyright (c) 2017-2018 ARM Limited. |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #ifndef __ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__ |
| 25 | #define __ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__ |
| 26 | |
| 27 | #include "arm_compute/core/NEON/INEKernel.h" |
Georgios Pinitas | 4074c99 | 2018-01-30 18:13:46 +0000 | [diff] [blame] | 28 | #include "arm_compute/core/NEON/kernels/convolution/common/convolution.hpp" |
| 29 | #include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp" |
| 30 | #include "arm_compute/core/NEON/kernels/convolution/winograd/batched_blocked_gemm.hpp" |
| 31 | #include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp" |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 32 | |
| 33 | namespace arm_compute |
| 34 | { |
| 35 | class ITensor; |
Pablo Tello | 02541fb | 2017-12-15 09:48:59 +0000 | [diff] [blame] | 36 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 37 | /** Interface for the NEON kernel to perform Winograd input transform. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 38 | template <typename T> |
| 39 | class INEWinogradLayerTransformInputKernel : public INEKernel |
Pablo Tello | 3d4968a | 2017-12-04 15:03:35 +0000 | [diff] [blame] | 40 | { |
| 41 | public: |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 42 | /** Determine how much memory (in units of TIn) to allocate for the |
| 43 | * transformed input. |
Pablo Tello | 6c6e77a | 2018-01-23 10:03:27 +0000 | [diff] [blame] | 44 | * |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 45 | * @param[in] n_batches Number of batches in the input tensor. |
| 46 | * @param[in] n_channels Number of feature maps in the input tensor. |
| 47 | * @param[in] n_rows Number of rows in each feature map. |
| 48 | * @param[in] n_cols Number of columns in each feature map. |
| 49 | * @param[in] same_padding Use "SAME" padding, otherwise use "VALID". |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 50 | * |
| 51 | * @return Storage size (in units of TIn) required. |
Pablo Tello | 6c6e77a | 2018-01-23 10:03:27 +0000 | [diff] [blame] | 52 | */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 53 | virtual unsigned int get_input_storage_size(int n_batches, int n_channels, int n_rows, int n_cols, bool same_padding) const = 0; |
| 54 | |
| 55 | /** Gets the stride between matrices in the input worspace |
| 56 | * |
| 57 | * @param[in] kernel_shape The shape of the weights tensor. |
| 58 | * @param[in] input_shape The shape of the input tensor. |
| 59 | * @param[in] padding_type The type of padding to be used. |
| 60 | * |
| 61 | * @return Stride expressed in bytes. |
| 62 | */ |
| 63 | virtual int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const = 0; |
| 64 | |
| 65 | /** Configure the output transform kernel. |
| 66 | * |
| 67 | * @param[in] input Input tensor data |
| 68 | * @param[in] n_batches Number of batches in input tensor. |
| 69 | * @param[in] n_rows Number of rows in input tensor. |
| 70 | * @param[in] n_cols Number of columns in input tensor. |
| 71 | * @param[in] n_channels Number of channels in input tensor. |
| 72 | * @param[in] padding Padding type. |
| 73 | * @param[out] output Base of output matrices. |
| 74 | * @param[in] matrix_stride Stride between output matrices. |
| 75 | */ |
| 76 | virtual void configure(const T *const input, const int n_batches, const int n_rows, const int n_cols, const int n_channels, const PaddingType padding, T *const output, const int matrix_stride) = 0; |
| 77 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 78 | /** Destructor */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 79 | virtual ~INEWinogradLayerTransformInputKernel() |
| 80 | { |
| 81 | } |
| 82 | }; |
| 83 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 84 | /** NEON kernel to perform Winograd input transform. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 85 | template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols> |
| 86 | class NEWinogradLayerTransformInputKernel : public INEWinogradLayerTransformInputKernel<T> |
| 87 | { |
| 88 | public: |
| 89 | /** Determine how much memory (in units of TIn) to allocate for the |
| 90 | * transformed input. |
| 91 | * |
| 92 | * @param[in] n_batches Number of batches in the input tensor. |
| 93 | * @param[in] n_channels Number of feature maps in the input tensor. |
| 94 | * @param[in] n_rows Number of rows in each feature map. |
| 95 | * @param[in] n_cols Number of columns in each feature map. |
| 96 | * @param[in] same_padding Use "SAME" padding, otherwise use "VALID". |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 97 | * |
| 98 | * @return Storage size (in units of TIn) required. |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 99 | */ |
| 100 | unsigned int get_input_storage_size( |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 101 | int n_batches, |
| 102 | int n_channels, |
| 103 | int n_rows, |
| 104 | int n_cols, |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 105 | bool same_padding) const override; |
| 106 | |
| 107 | /** Gets the stride between matrices in the input worspace |
| 108 | * |
| 109 | * @param[in] kernel_shape The shape of the weights tensor. |
| 110 | * @param[in] input_shape The shape of the input tensor. |
| 111 | * @param[in] padding_type The type of padding to be used. |
| 112 | * |
| 113 | * @return Stride expressed in bytes. |
| 114 | */ |
| 115 | int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const override; |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 116 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 117 | /** Default constructor */ |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 118 | NEWinogradLayerTransformInputKernel(); |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 119 | |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 120 | const char *name() const override |
| 121 | { |
| 122 | return "NEWinogradLayerTransformInputKernel"; |
| 123 | } |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 124 | |
| 125 | /** Configure the output transform kernel. |
| 126 | * |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 127 | * @param[in] input Input tensor data. Data types supported: F32. |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 128 | * @param[in] n_batches Number of batches in input tensor. |
| 129 | * @param[in] n_rows Number of rows in input tensor. |
| 130 | * @param[in] n_cols Number of columns in input tensor. |
| 131 | * @param[in] n_channels Number of channels in input tensor. |
| 132 | * @param[in] padding Padding type. |
| 133 | * @param[out] output Base of output matrices. |
| 134 | * @param[in] matrix_stride Stride between output matrices. |
| 135 | */ |
| 136 | void configure( |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 137 | const T *const input, |
| 138 | const int n_batches, |
| 139 | const int n_rows, |
| 140 | const int n_cols, |
| 141 | const int n_channels, |
| 142 | const PaddingType padding, |
| 143 | T *const output, |
| 144 | const int matrix_stride) override; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 145 | |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 146 | // Inherited methods overridden: |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 147 | void run(const Window &window, const ThreadInfo &info) override; |
| 148 | bool is_parallelisable() const override; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 149 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 150 | /** Winograd base kernel */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 151 | using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelCols, KernelCols>; |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 152 | /** Winograd convolution kernel */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 153 | using WinogradConv = typename WinogradBase::template Convolution<T, T>; |
| 154 | |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 155 | /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformInputKernel |
| 156 | * |
| 157 | * @param[in] input First tensor input info. Data types supported: F32. |
| 158 | * @param[in] output Output tensor info. Data types supported: same as @p input. |
| 159 | * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported. |
| 160 | * @param[in] kernel_dims Kernel dimensions. Currently only 3x3 and 5x5 kernels are supported |
| 161 | * |
| 162 | * @return a status |
| 163 | */ |
| 164 | static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PadStrideInfo &conv_info, const Size2D &kernel_dims); |
| 165 | |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 166 | private: |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 167 | using InputTransform = typename WinogradBase::template InputTransform<T>; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 168 | std::unique_ptr<InputTransform> _transform; |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 169 | }; |
| 170 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 171 | /** Interface for the NEON kernel to perform Winograd output transform. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 172 | template <typename T> |
| 173 | class INEWinogradLayerTransformOutputKernel : public INEKernel |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 174 | { |
| 175 | public: |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 176 | /** Determine how much memory (in units of TOut) to allocate for the |
| 177 | * (Winograd domain) output. |
| 178 | * |
| 179 | * @param[in] n_batches Number of batches in the output tensor. |
| 180 | * @param[in] n_rows Number of rows in each feature map of the input tensor. |
| 181 | * @param[in] n_cols Number of columns in each feature map of the input tensor. |
| 182 | * @param[in] n_output_channels Number of feature maps in the output tensor. |
| 183 | * @param[in] same_padding Use "SAME" padding, otherwise use "VALID". |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 184 | * |
| 185 | * @return Storage size (in units of TOut) required. |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 186 | */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 187 | virtual unsigned int get_output_storage_size(int n_batches, int n_rows, int n_cols, int n_output_channels, bool same_padding) const = 0; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 188 | |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 189 | /** Gets the stride between matrices in the output worspace |
| 190 | * |
| 191 | * @param[in] kernel_shape The shape of the weights tensor. |
| 192 | * @param[in] input_shape The shape of the input tensor. |
| 193 | * @param[in] padding_type The type of padding to be used. |
| 194 | * |
| 195 | * @return Stride expressed in bytes. |
| 196 | */ |
| 197 | virtual int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const = 0; |
| 198 | |
| 199 | /** Get the output shape of a convolution. |
| 200 | * |
| 201 | * @param[in] kernel_shape The shape of the weights tensor. |
| 202 | * @param[in] in_shape The shape of the input tensor. |
| 203 | * @param[in] padding The type of padding to be used. |
| 204 | * |
| 205 | * @return Stride expressed in bytes. |
| 206 | */ |
| 207 | virtual Tensor4DShape get_output_shape(const KernelShape &kernel_shape, const Tensor4DShape &in_shape, const PaddingType padding) const = 0; |
| 208 | |
| 209 | /** Configure the output transform kernel. |
| 210 | * |
| 211 | * @param[in] biases Pointer to the biases tensor. |
| 212 | * @param[in] output_workingspace Pointer to working space for the output tensor in the Winograd domain. |
| 213 | * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride() |
| 214 | * @param[out] output Pointer to NHWC ordered output tensor, in the spatial domain. |
| 215 | * @param[in] n_batches Number of batches in the input tensor. |
| 216 | * @param[in] n_rows Number of rows in output tensor. |
| 217 | * @param[in] n_cols Number of columns in output tensor. |
| 218 | * @param[in] n_channels Number of feature maps in the output tensor. |
| 219 | */ |
| 220 | virtual void configure( |
| 221 | const ITensor *biases, |
| 222 | const T *const output_workingspace, |
| 223 | const int matrix_stride, |
| 224 | T *const output, |
| 225 | const int n_batches, |
| 226 | const int n_rows, |
| 227 | const int n_cols, |
| 228 | const int n_channels) = 0; |
| 229 | |
| 230 | virtual ~INEWinogradLayerTransformOutputKernel() |
| 231 | { |
| 232 | } |
| 233 | }; |
| 234 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 235 | /** NEON kernel to perform Winograd output transform. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 236 | template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols> |
| 237 | class NEWinogradLayerTransformOutputKernel : public INEWinogradLayerTransformOutputKernel<T> |
| 238 | { |
| 239 | public: |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 240 | const char *name() const override |
| 241 | { |
| 242 | return "NEWinogradLayerTransformOutputKernel"; |
| 243 | } |
| 244 | /** Constructor */ |
| 245 | NEWinogradLayerTransformOutputKernel(); |
| 246 | |
| 247 | /** Prevent instances of this class from being copied (As this class contains pointers) */ |
| 248 | NEWinogradLayerTransformOutputKernel(const NEWinogradLayerTransformOutputKernel &) = delete; |
| 249 | /** Prevent instances of this class from being copied (As this class contains pointers) */ |
| 250 | NEWinogradLayerTransformOutputKernel &operator=(const NEWinogradLayerTransformOutputKernel &) = delete; |
| 251 | /** Allow instances of this class to be moved */ |
| 252 | NEWinogradLayerTransformOutputKernel(NEWinogradLayerTransformOutputKernel &&) = default; |
| 253 | /** Allow instances of this class to be moved */ |
| 254 | NEWinogradLayerTransformOutputKernel &operator=(NEWinogradLayerTransformOutputKernel &&) = default; |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 255 | /** Default destructor */ |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 256 | ~NEWinogradLayerTransformOutputKernel() = default; |
| 257 | |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 258 | // Inherited methods overridden: |
| 259 | /** Determine how much memory (in units of TOut) to allocate for the |
| 260 | * (Winograd domain) output. |
| 261 | * |
| 262 | * @param[in] n_batches Number of batches in the output tensor. |
| 263 | * @param[in] n_rows Number of rows in each feature map of the input tensor. |
| 264 | * @param[in] n_cols Number of columns in each feature map of the input tensor. |
| 265 | * @param[in] n_output_channels Number of feature maps in the output tensor. |
| 266 | * @param[in] same_padding Use "SAME" padding, otherwise use "VALID". |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 267 | * |
| 268 | * @return Storage size (in units of TOut) required. |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 269 | */ |
| 270 | unsigned int get_output_storage_size(int n_batches, int n_rows, int n_cols, int n_output_channels, bool same_padding) const override; |
| 271 | |
| 272 | /** Gets the stride between matrices in the output worspace |
| 273 | * |
| 274 | * @param[in] kernel_shape The shape of the weights tensor. |
| 275 | * @param[in] input_shape The shape of the input tensor. |
| 276 | * @param[in] padding_type The type of padding to be used. |
| 277 | * |
| 278 | * @return Stride expressed in bytes. |
| 279 | */ |
| 280 | int get_matrix_stride(const KernelShape &kernel_shape, const Tensor4DShape &input_shape, const PaddingType padding_type) const override; |
| 281 | /** Get the output shape of a convolution. |
| 282 | * |
| 283 | * @param[in] kernel_shape The shape of the weights tensor. |
| 284 | * @param[in] in_shape The shape of the input tensor. |
| 285 | * @param[in] padding The type of padding to be used. |
| 286 | * |
| 287 | * @return Stride expressed in bytes. |
| 288 | */ |
| 289 | Tensor4DShape get_output_shape(const KernelShape &kernel_shape, const Tensor4DShape &in_shape, const PaddingType padding) const override; |
| 290 | |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 291 | /** Configure the output transform kernel. |
| 292 | * |
| 293 | * @param[in] biases Pointer to the biases tensor. |
| 294 | * @param[in] output_workingspace Pointer to working space for the output tensor in the Winograd domain. |
| 295 | * @param[in] matrix_stride Output matrix stride, can be computed with winograd::WinogradGEMM<2, 2, 3, 3>::Convolution<float, float>::get_output_matrix_stride() |
| 296 | * @param[out] output Pointer to NHWC ordered output tensor, in the spatial domain. |
| 297 | * @param[in] n_batches Number of batches in the input tensor. |
| 298 | * @param[in] n_rows Number of rows in output tensor. |
| 299 | * @param[in] n_cols Number of columns in output tensor. |
| 300 | * @param[in] n_channels Number of feature maps in the output tensor. |
| 301 | */ |
| 302 | void configure( |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 303 | const ITensor *biases, |
| 304 | const T *const output_workingspace, |
| 305 | const int matrix_stride, |
| 306 | T *const output, |
| 307 | const int n_batches, |
| 308 | const int n_rows, |
| 309 | const int n_cols, |
| 310 | const int n_channels) override; |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 311 | |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 312 | void run(const Window &window, const ThreadInfo &info) override; |
| 313 | bool is_parallelisable() const override; |
| 314 | |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 315 | /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformOutputKernel |
| 316 | * |
| 317 | * @param[in] input Source tensor with shape [C, N, 16, batches] or [C, N, 36, batches]. Data types supported: F32. |
| 318 | * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input |
| 319 | * @param[out] output Destination tensor with shape [output_convolved_dims.width, output_convolved_dims.height, C, batches]. Data type supported: same as @p input |
| 320 | * @param[in] kernel_dims Kernel dimensions (Width and height). Currently only supported 3x3 and 5x5 kernels |
| 321 | * @param[in] output_convolved_dims Output dimensions after the convolution (Width and height) |
| 322 | * @param[in] num_tiles Number of tiles of size 2x2 or 4x4 in the output tensor along the X and Y direction |
| 323 | * |
| 324 | * @return a status |
| 325 | */ |
| 326 | static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const Size2D &kernel_dims, const Size2D &output_convolved_dims, const Size2D &num_tiles); |
| 327 | |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 328 | private: |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 329 | using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>; |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 330 | using WinogradConv = typename WinogradBase::template Convolution<T, T>; |
| 331 | using OutputTransform = typename WinogradBase::template OutputTransform<T>; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 332 | |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 333 | const ITensor *_biases; |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 334 | const T *_output_workspace; |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 335 | int _matrix_stride; |
| 336 | int _matrix_row_stride; |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 337 | T *_output; |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 338 | int _n_batches; |
| 339 | int _n_rows; |
| 340 | int _n_cols; |
| 341 | int _n_channels; |
| 342 | }; |
| 343 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 344 | /** Interface for the NEON kernel to perform Winograd weights transform. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 345 | template <typename T> |
| 346 | class INEWinogradLayerTransformWeightsKernel : public INEKernel |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 347 | { |
| 348 | public: |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 349 | /** Determine how much memory (in units of T) to allocate for the |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 350 | * transformed weights. |
| 351 | * |
| 352 | * @param[in] n_output_channels Number of output feature maps. |
| 353 | * @param[in] n_input_channels Number of input feature maps. |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 354 | * |
| 355 | * @return Storage size (in units of T) required. |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 356 | */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 357 | virtual unsigned int get_weight_storage_size(int n_output_channels, int n_input_channels) const = 0; |
| 358 | /** Gets the stride between matrices in the kernel worspace |
| 359 | * |
| 360 | * @param[in] kernel_shape The shape of the weights tensor. |
| 361 | * |
| 362 | * @return Stride expressed in bytes. |
| 363 | */ |
| 364 | virtual int get_matrix_stride(const KernelShape &kernel_shape) const = 0; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 365 | |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 366 | /** Configure the weights transform kernel. |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 367 | * |
| 368 | * @param[in] weights_hwio Pointer to the weights tensor |
| 369 | * @param[in] output Pointer to working space for the output tensor in the Winograd domain. |
| 370 | * @param[in] matrix_stride Stride across matrices in the output workspace. |
| 371 | * @param[in] n_output_channels Number of filters. |
| 372 | * @param[in] n_input_channels Number of channels in each filter. |
| 373 | */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 374 | virtual void configure(const ITensor *weights_hwio, T *const output, const int matrix_stride, const int n_output_channels, const int n_input_channels) = 0; |
| 375 | |
| 376 | virtual ~INEWinogradLayerTransformWeightsKernel() |
| 377 | { |
| 378 | } |
| 379 | }; |
| 380 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 381 | /** NEON kernel to perform Winograd weights transform. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 382 | template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols> |
| 383 | class NEWinogradLayerTransformWeightsKernel final : public INEWinogradLayerTransformWeightsKernel<T> |
| 384 | { |
| 385 | public: |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 386 | /** Default constructor. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 387 | NEWinogradLayerTransformWeightsKernel(); |
| 388 | const char *name() const override |
| 389 | { |
| 390 | return "NEWinogradLayerTransformWeightsKernel"; |
| 391 | } |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 392 | |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 393 | /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerTransformWeightsKernel |
| 394 | * |
| 395 | * @param[in] input Source tensor info. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout). |
| 396 | * kernel_x must be 3 and equal to kernel_y. Data types supported: F32. |
| 397 | * @param[in] output Destination tensor info. The output is a 3D tensor with dimensions [OFM, IFM, 16] or [OFM, IFM, 36]. Data type supported: same as @p input |
| 398 | * @param[in] output_tile Output tile. Currently only 2x2 and 4x4 tiles are supported. |
| 399 | * |
| 400 | * @return a status |
| 401 | */ |
| 402 | static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &output_tile); |
| 403 | |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 404 | // Inherited methods overridden: |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 405 | void configure(const ITensor *weights_hwio, T *const output, const int matrix_stride, const int n_output_channels, const int n_input_channels) override; |
| 406 | unsigned int get_weight_storage_size(int n_output_channels, int n_input_channels) const override; |
| 407 | int get_matrix_stride(const KernelShape &kernel_shape) const override; |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 408 | void run(const Window &window, const ThreadInfo &info) override; |
| 409 | bool is_parallelisable() const override; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 410 | |
| 411 | private: |
| 412 | using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>; |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 413 | using WinogradConv = typename WinogradBase::template Convolution<T, T>; |
| 414 | using WeightsTransform = typename WinogradBase::template WeightsTransform<T>; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 415 | std::unique_ptr<WeightsTransform> _transform; |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 416 | }; |
| 417 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 418 | /** Interface for the NEON kernel to perform Winograd. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 419 | template <typename TIn, typename TOut> |
| 420 | class INEWinogradLayerBatchedGEMMKernel : public INEKernel |
| 421 | { |
| 422 | public: |
| 423 | /** Get the number of GEMMs to compute |
| 424 | */ |
| 425 | virtual unsigned int get_number_gemms() const = 0; |
| 426 | /** Initialise the kernel |
| 427 | * |
| 428 | * @param[in] n_gemms Number of GEMMs to compute. |
| 429 | * @param[in] M in_shape.n_batches * tile_rows * tile_cols. |
| 430 | * @param[in] K Number of channels in the input tensor. |
| 431 | * @param[in] N Number of channels in the output tensor. |
| 432 | * @param[in] a_matrix_stride Stride between input matrices. |
| 433 | * @param[in] a_row_stride Row stride inside input matrix. |
| 434 | * @param[in] b_matrix_stride Stride between weights matrices. |
| 435 | * @param[in] b_row_stride Row stride inside the weights matrix. |
| 436 | * @param[in] c_matrix_stride Stride between output matrices. |
| 437 | * @param[in] c_row_stride Row stride inside the output matrix. |
| 438 | * @param[out] a_ptr Input workspace. |
| 439 | * @param[out] b_ptr Kernel workspace. |
| 440 | * @param[out] c_ptr Output workspace. |
| 441 | */ |
| 442 | virtual void configure( |
| 443 | const unsigned int n_gemms, |
| 444 | const int M, const int K, const int N, |
| 445 | const int a_matrix_stride, |
| 446 | const int a_row_stride, |
| 447 | const int b_matrix_stride, |
| 448 | const int b_row_stride, |
| 449 | const int c_matrix_stride, |
| 450 | const int c_row_stride, |
| 451 | const TIn *const a_ptr, |
| 452 | const TIn *const b_ptr, |
| 453 | TOut *const c_ptr) = 0; |
| 454 | |
| 455 | /** Get the number of tiles per row |
| 456 | */ |
| 457 | virtual int get_output_tile_rows() const = 0; |
| 458 | /** Get the number of tiles per columns |
| 459 | */ |
| 460 | virtual int get_output_tile_cols() const = 0; |
| 461 | /** Get the number of blocks |
| 462 | */ |
| 463 | virtual int get_number_blocks() const = 0; |
| 464 | }; |
| 465 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 466 | /** NEON kernel to perform Winograd. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 467 | template <typename TIn, typename TOut, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols> |
| 468 | class NEWinogradLayerBatchedGEMMKernel : public INEWinogradLayerBatchedGEMMKernel<TIn, TOut> |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 469 | { |
| 470 | public: |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 471 | /** Winograd base kernel */ |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 472 | using WinogradBase = winograd::WinogradGEMM<OutputTileRows, OutputTileCols, KernelRows, KernelCols>; |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 473 | /** Winograd convolution kernel */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 474 | using WinogradConv = typename WinogradBase::template Convolution<TIn, TOut>; |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 475 | /** Winograd batched blocked GEMM operator */ |
| 476 | using MultiGEMM = winograd::BatchedBlockedGemm<WinogradConv::M_BLOCK, WinogradConv::N_BLOCK, TIn, TOut>; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 477 | |
Anthony Barbier | e8a4983 | 2018-01-18 10:04:05 +0000 | [diff] [blame] | 478 | const char *name() const override |
| 479 | { |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 480 | return "NEWinogradLayerBatchedGEMMKernel"; |
Anthony Barbier | e8a4983 | 2018-01-18 10:04:05 +0000 | [diff] [blame] | 481 | } |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 482 | /** Constructor */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 483 | NEWinogradLayerBatchedGEMMKernel(); |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 484 | |
| 485 | /** Prevent instances of this class from being copied (As this class contains pointers) */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 486 | NEWinogradLayerBatchedGEMMKernel(const NEWinogradLayerBatchedGEMMKernel &) = delete; |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 487 | /** Prevent instances of this class from being copied (As this class contains pointers) */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 488 | NEWinogradLayerBatchedGEMMKernel &operator=(const NEWinogradLayerBatchedGEMMKernel &) = delete; |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 489 | /** Allow instances of this class to be moved */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 490 | NEWinogradLayerBatchedGEMMKernel(NEWinogradLayerBatchedGEMMKernel &&) = default; |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 491 | /** Allow instances of this class to be moved */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 492 | NEWinogradLayerBatchedGEMMKernel &operator=(NEWinogradLayerBatchedGEMMKernel &&) = default; |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 493 | /** Default destructor. */ |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 494 | ~NEWinogradLayerBatchedGEMMKernel() = default; |
| 495 | |
| 496 | // Inherited methods overridden: |
| 497 | |
| 498 | unsigned int get_number_gemms() const override; |
| 499 | int get_output_tile_rows() const override; |
| 500 | int get_output_tile_cols() const override; |
| 501 | int get_number_blocks() const override; |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 502 | |
| 503 | /** Initialise the kernel |
| 504 | * |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 505 | * @param[in] n_gemms Number of GEMMs to compute. |
| 506 | * @param[in] M in_shape.n_batches * tile_rows * tile_cols. |
| 507 | * @param[in] K Number of channels in the input tensor. |
| 508 | * @param[in] N Number of channels in the output tensor. |
| 509 | * @param[in] a_matrix_stride Stride between input matrices. |
| 510 | * @param[in] a_row_stride Row stride inside input matrix. |
| 511 | * @param[in] b_matrix_stride Stride between weights matrices. |
| 512 | * @param[in] b_row_stride Row stride inside the weights matrix. |
| 513 | * @param[in] c_matrix_stride Stride between output matrices. |
| 514 | * @param[in] c_row_stride Row stride inside the output matrix. |
| 515 | * @param[out] a_ptr Input workspace. |
| 516 | * @param[out] b_ptr Kernel workspace. |
| 517 | * @param[out] c_ptr Output workspace. |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 518 | */ |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 519 | void configure( |
| 520 | const unsigned int n_gemms, |
| 521 | const int M, const int K, const int N, |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 522 | const int a_matrix_stride, |
| 523 | const int a_row_stride, |
| 524 | const int b_matrix_stride, |
| 525 | const int b_row_stride, |
| 526 | const int c_matrix_stride, |
| 527 | const int c_row_stride, |
| 528 | const TIn *const a_ptr, |
| 529 | const TIn *const b_ptr, |
| 530 | TOut *const c_ptr) override; |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 531 | |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 532 | void run(const Window &window, const ThreadInfo &info) override; |
| 533 | |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 534 | /** Static function to check if given info will lead to a valid configuration of @ref NEWinogradLayerBatchedGEMMKernel. |
| 535 | * |
| 536 | * @param[in] a First input tensor (Matrix or Vector A). Data types supported: F32 |
| 537 | * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a. |
| 538 | * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a. |
| 539 | * @param[out] output Output tensor. Data type supported: same as @p a |
| 540 | * @param[in] alpha Weight of the matrix product |
| 541 | * @param[in] beta Weight of matrix C |
| 542 | * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and |
| 543 | * if the reshape of matrix B should happen only for the first run |
| 544 | * |
| 545 | * @return a status |
| 546 | */ |
| 547 | static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensor *c, const ITensorInfo *output, const float alpha, const float beta, const GEMMInfo &gemm_info = GEMMInfo()); |
| 548 | |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 549 | private: |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 550 | static const int _output_tile_rows = OutputTileRows; |
| 551 | static const int _output_tile_cols = OutputTileCols; |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 552 | std::unique_ptr<MultiGEMM> _gemms; |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 553 | }; |
| 554 | |
| 555 | } // namespace arm_compute |
| 556 | #endif /*__ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__*/ |