Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 1 | /* |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 2 | * Copyright (c) 2017-2018 ARM Limited. |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
Georgios Pinitas | 9fb1159 | 2018-04-26 20:34:58 +0100 | [diff] [blame] | 24 | #include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h" |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 25 | |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 26 | #include "arm_compute/core/Error.h" |
Anthony Barbier | 71d9b57 | 2018-07-06 17:05:59 +0100 | [diff] [blame] | 27 | #include "arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h" |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 28 | #include "arm_compute/core/Utils.h" |
| 29 | #include "arm_compute/core/Validate.h" |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 30 | #include "arm_compute/core/Validate.h" |
| 31 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 32 | #include "arm_compute/runtime/NEON/NEScheduler.h" |
Anthony Barbier | 71d9b57 | 2018-07-06 17:05:59 +0100 | [diff] [blame] | 33 | #include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h" |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 34 | #include "support/ToolchainSupport.h" |
| 35 | |
Georgios Pinitas | 4074c99 | 2018-01-30 18:13:46 +0000 | [diff] [blame] | 36 | #include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp" |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 37 | |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 38 | namespace arm_compute |
| 39 | { |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 40 | namespace |
| 41 | { |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 42 | inline Status validate_kernel_3x3(const Size2D input_dims, const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output, |
| 43 | const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) |
| 44 | { |
| 45 | if(input_dims.width > 4 && input_dims.height > 4) |
| 46 | { |
| 47 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 4, 4, 3, 3>::validate(input, input0, winograd_info))); |
| 48 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 4, 4, 3, 3>::validate(weights, input1, winograd_info))); |
| 49 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 4, 4, 3, 3>::validate(batched_mm_output, biases, output, winograd_info))); |
| 50 | } |
| 51 | else |
| 52 | { |
| 53 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 3, 3>::validate(input, input0, winograd_info))); |
| 54 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 3, 3>::validate(weights, input1, winograd_info))); |
| 55 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 3, 3>::validate(batched_mm_output, biases, output, winograd_info))); |
| 56 | } |
| 57 | |
| 58 | if(act_info.enabled()) |
| 59 | { |
| 60 | NEActivationLayer::validate(output, nullptr, act_info); |
| 61 | } |
| 62 | return Status{}; |
| 63 | } |
| 64 | |
| 65 | inline Status validate_kernel_5x5(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output, |
| 66 | const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) |
| 67 | { |
| 68 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 5, 5>::validate(input, input0, winograd_info))); |
| 69 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 5, 5>::validate(weights, input1, winograd_info))); |
| 70 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 5, 5>::validate(batched_mm_output, biases, output, winograd_info))); |
| 71 | if(act_info.enabled()) |
| 72 | { |
| 73 | NEActivationLayer::validate(output, nullptr, act_info); |
| 74 | } |
| 75 | return Status{}; |
| 76 | } |
| 77 | |
| 78 | inline Status validate_kernel_3x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output, |
| 79 | const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) |
| 80 | { |
| 81 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 1, 6, 1, 3>::validate(input, input0, winograd_info))); |
| 82 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 1, 6, 1, 3>::validate(weights, input1, winograd_info))); |
| 83 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 1, 6, 1, 3>::validate(batched_mm_output, biases, output, winograd_info))); |
| 84 | if(act_info.enabled()) |
| 85 | { |
| 86 | NEActivationLayer::validate(output, nullptr, act_info); |
| 87 | } |
| 88 | return Status{}; |
| 89 | } |
| 90 | |
| 91 | inline Status validate_kernel_1x3(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output, |
| 92 | const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) |
| 93 | { |
| 94 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 6, 1, 3, 1>::validate(input, input0, winograd_info))); |
| 95 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 6, 1, 3, 1>::validate(weights, input1, winograd_info))); |
| 96 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 6, 1, 3, 1>::validate(batched_mm_output, biases, output, winograd_info))); |
| 97 | |
| 98 | if(act_info.enabled()) |
| 99 | { |
| 100 | NEActivationLayer::validate(output, nullptr, act_info); |
| 101 | } |
| 102 | return Status{}; |
| 103 | } |
| 104 | |
| 105 | inline Status validate_kernel_5x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output, |
| 106 | const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) |
| 107 | { |
| 108 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 1, 4, 1, 5>::validate(input, input0, winograd_info))); |
| 109 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 1, 4, 1, 5>::validate(weights, input1, winograd_info))); |
| 110 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 1, 4, 1, 5>::validate(batched_mm_output, biases, output, winograd_info))); |
| 111 | if(act_info.enabled()) |
| 112 | { |
| 113 | NEActivationLayer::validate(output, nullptr, act_info); |
| 114 | } |
| 115 | return Status{}; |
| 116 | } |
| 117 | inline Status validate_kernel_1x5(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output, |
| 118 | const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) |
| 119 | { |
| 120 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 4, 1, 5, 1>::validate(input, input0, winograd_info))); |
| 121 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 4, 1, 5, 1>::validate(weights, input1, winograd_info))); |
| 122 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 4, 1, 5, 1>::validate(batched_mm_output, biases, output, winograd_info))); |
| 123 | if(act_info.enabled()) |
| 124 | { |
| 125 | NEActivationLayer::validate(output, nullptr, act_info); |
| 126 | } |
| 127 | return Status{}; |
| 128 | } |
| 129 | |
| 130 | inline Status validate_kernel_7x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output, |
| 131 | const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) |
| 132 | { |
| 133 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 1, 2, 1, 7>::validate(input, input0, winograd_info))); |
| 134 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 1, 2, 1, 7>::validate(weights, input1, winograd_info))); |
| 135 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 1, 2, 1, 7>::validate(batched_mm_output, biases, output, winograd_info))); |
| 136 | if(act_info.enabled()) |
| 137 | { |
| 138 | NEActivationLayer::validate(output, nullptr, act_info); |
| 139 | } |
| 140 | return Status{}; |
| 141 | } |
| 142 | |
| 143 | inline Status validate_kernel_1x7(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output, |
| 144 | const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info) |
| 145 | { |
| 146 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 1, 7, 1>::validate(input, input0, winograd_info))); |
| 147 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 1, 7, 1>::validate(weights, input1, winograd_info))); |
| 148 | ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 1, 7, 1>::validate(batched_mm_output, biases, output, winograd_info))); |
| 149 | |
| 150 | if(act_info.enabled()) |
| 151 | { |
| 152 | NEActivationLayer::validate(output, nullptr, act_info); |
| 153 | } |
| 154 | return Status{}; |
| 155 | } |
| 156 | |
Vidhya Sudhan Loganathan | cb0010b | 2018-05-11 16:23:53 +0100 | [diff] [blame] | 157 | inline Tensor4DShape internal_get_input_shape(const arm_compute::ITensor *input) |
| 158 | { |
| 159 | const DataLayout data_layout = input->info()->data_layout(); |
| 160 | const int in_width = input->info()->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)); |
| 161 | const int in_height = input->info()->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)); |
| 162 | const int in_channels = input->info()->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)); |
| 163 | const int in_batches = input->info()->dimension(3); |
| 164 | |
| 165 | return Tensor4DShape({ in_batches, in_height, in_width, in_channels }); |
| 166 | } |
| 167 | |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 168 | Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info) |
| 169 | { |
Vidhya Sudhan Loganathan | cb0010b | 2018-05-11 16:23:53 +0100 | [diff] [blame] | 170 | ARM_COMPUTE_UNUSED(output); |
Vidhya Sudhan Loganathan | cb0010b | 2018-05-11 16:23:53 +0100 | [diff] [blame] | 171 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd layer only supports unit strides."); |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 172 | if(biases != nullptr) |
| 173 | { |
| 174 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases); |
| 175 | ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); |
| 176 | } |
Pablo Tello | bda6e4b | 2018-08-22 11:40:33 +0100 | [diff] [blame] | 177 | return INEWinogradLayerTransformWeightsKernel<float>::validate(input, weights); |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 178 | } |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 179 | |
| 180 | Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims) |
| 181 | { |
| 182 | Size2D output_tile = Size2D{}; |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 183 | if(kernel_dims == Size2D(3U, 3U)) |
| 184 | { |
| 185 | output_tile = (input_dims.width <= 4 && input_dims.height <= 4) ? Size2D(2U, 2U) : Size2D(4U, 4U); |
| 186 | } |
| 187 | else if(kernel_dims == Size2D(5U, 5U)) |
| 188 | { |
| 189 | output_tile = Size2D(2U, 2U); |
| 190 | } |
Pablo Tello | bda6e4b | 2018-08-22 11:40:33 +0100 | [diff] [blame] | 191 | else if(kernel_dims == Size2D(1U, 3U)) |
| 192 | { |
| 193 | output_tile = Size2D(1U, 6U); |
| 194 | } |
| 195 | else if(kernel_dims == Size2D(3U, 1U)) |
| 196 | { |
| 197 | output_tile = Size2D(6U, 1U); |
| 198 | } |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 199 | else if(kernel_dims == Size2D(1U, 5U)) |
| 200 | { |
| 201 | output_tile = Size2D(1U, 4U); |
| 202 | } |
| 203 | else if(kernel_dims == Size2D(5U, 1U)) |
| 204 | { |
| 205 | output_tile = Size2D(4U, 1U); |
| 206 | } |
| 207 | else if(kernel_dims == Size2D(7U, 1U)) |
| 208 | { |
| 209 | output_tile = Size2D(2U, 1U); |
| 210 | } |
| 211 | else if(kernel_dims == Size2D(1U, 7U)) |
| 212 | { |
| 213 | output_tile = Size2D(1U, 2U); |
| 214 | } |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 215 | return output_tile; |
| 216 | } |
| 217 | |
| 218 | bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_size) |
| 219 | { |
| 220 | // Check if we want to configure a Winograd configuration which requires fast math |
| 221 | using WinogradConfiguration = std::pair<std::pair<int, int>, std::pair<int, int>>; |
| 222 | |
Pablo Tello | bda6e4b | 2018-08-22 11:40:33 +0100 | [diff] [blame] | 223 | const std::vector<WinogradConfiguration> fast_math_winograd = |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 224 | { |
| 225 | WinogradConfiguration(std::pair<int, int>(2, 2), std::pair<int, int>(5, 5)), |
| 226 | WinogradConfiguration(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5)) |
| 227 | }; |
| 228 | |
| 229 | auto p = std::make_pair(std::pair<int, int>(output_tile.width, output_tile.height), |
| 230 | std::pair<int, int>(kernel_size.width, kernel_size.height)); |
| 231 | |
| 232 | return std::find(fast_math_winograd.begin(), fast_math_winograd.end(), p) != fast_math_winograd.end(); |
| 233 | } |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 234 | |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 235 | } //namespace |
| 236 | |
Georgios Pinitas | 9fb1159 | 2018-04-26 20:34:58 +0100 | [diff] [blame] | 237 | NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) |
Pablo Tello | a518f30 | 2018-09-19 11:33:03 +0100 | [diff] [blame] | 238 | : _memory_group(memory_manager), _gemm_function(memory_manager), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr), _activationlayer_function(), |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 239 | _permute_input(), _permute_weights(), _permute_output(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(), _weights_hwio(), _input(), _weights(), _output(), |
| 240 | _is_prepared(false), _is_activationlayer_enabled(false) |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 241 | { |
| 242 | } /* arm_compute */ |
| 243 | |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 244 | void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, |
| 245 | bool enable_fast_math) |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 246 | { |
Andrew Mundy | 4d9379a | 2018-03-15 16:47:03 +0000 | [diff] [blame] | 247 | ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); |
Andrew Mundy | 4d9379a | 2018-03-15 16:47:03 +0000 | [diff] [blame] | 248 | ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info)); |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 249 | |
Vidhya Sudhan Loganathan | cb0010b | 2018-05-11 16:23:53 +0100 | [diff] [blame] | 250 | // Get indices for the width and height |
| 251 | const DataLayout data_layout = input->info()->data_layout(); |
| 252 | const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); |
| 253 | const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); |
| 254 | const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); |
| 255 | |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 256 | const Size2D input_dims = Size2D(input->info()->dimension(width_idx), input->info()->dimension(height_idx)); |
| 257 | const Size2D kernel_size = Size2D(weights->info()->dimension(width_idx), weights->info()->dimension(height_idx)); |
| 258 | const Size2D output_tile = winograd_output_tile(input_dims, kernel_size); |
| 259 | |
| 260 | // Check if the Winograd configuration requires fast math |
| 261 | if(!enable_fast_math) |
| 262 | { |
| 263 | ARM_COMPUTE_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true"); |
| 264 | } |
| 265 | |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 266 | _weights = weights; |
| 267 | _input = input; |
| 268 | _output = output; |
| 269 | _is_prepared = false; |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 270 | |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 271 | std::unique_ptr<INEWinogradLayerTransformInputKernel<float>> transform_input_kernel; |
| 272 | std::unique_ptr<INEWinogradLayerTransformWeightsKernel<float>> transform_weights_kernel; |
| 273 | std::unique_ptr<INEWinogradLayerTransformOutputKernel<float>> transform_output_kernel; |
| 274 | |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 275 | int n_gemms = 0; |
| 276 | int N_BLOCK = 0; // Size of block used by GEMM. |
Michalis Spyrou | 2b3129e | 2018-04-25 18:10:13 +0100 | [diff] [blame] | 277 | |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 278 | if(kernel_size == Size2D(3, 3)) |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 279 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 280 | if(input->info()->dimension(width_idx) > 4 && input->info()->dimension(height_idx) > 4) |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 281 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 282 | using config = NEWinogradLayerConfiguration<float, float, 4, 4, 3, 3>; |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 283 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 284 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 285 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 286 | n_gemms = config::WinogradBase::N_GEMMS; |
| 287 | N_BLOCK = config::WinogradConv::N_BLOCK; |
| 288 | } |
Pablo Tello | bda6e4b | 2018-08-22 11:40:33 +0100 | [diff] [blame] | 289 | else |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 290 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 291 | using config = NEWinogradLayerConfiguration<float, float, 2, 2, 3, 3>; |
| 292 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 293 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 294 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 295 | n_gemms = config::WinogradBase::N_GEMMS; |
| 296 | N_BLOCK = config::WinogradConv::N_BLOCK; |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 297 | } |
| 298 | } |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 299 | else if(kernel_size == Size2D(5, 5)) |
| 300 | { |
| 301 | using config = NEWinogradLayerConfiguration<float, float, 2, 2, 5, 5>; |
| 302 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 303 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 304 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 305 | n_gemms = config::WinogradBase::N_GEMMS; |
| 306 | N_BLOCK = config::WinogradConv::N_BLOCK; |
| 307 | } |
| 308 | else if(kernel_size == Size2D(1, 3)) |
| 309 | { |
| 310 | using config = NEWinogradLayerConfiguration<float, float, 6, 1, 3, 1>; |
| 311 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 312 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 313 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 314 | n_gemms = config::WinogradBase::N_GEMMS; |
| 315 | N_BLOCK = config::WinogradConv::N_BLOCK; |
| 316 | } |
| 317 | else if(kernel_size == Size2D(3, 1)) |
| 318 | { |
| 319 | using config = NEWinogradLayerConfiguration<float, float, 1, 6, 1, 3>; |
| 320 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 321 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 322 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 323 | n_gemms = config::WinogradBase::N_GEMMS; |
| 324 | N_BLOCK = config::WinogradConv::N_BLOCK; |
| 325 | } |
| 326 | else if(kernel_size == Size2D(1, 5)) |
| 327 | { |
| 328 | using config = NEWinogradLayerConfiguration<float, float, 4, 1, 5, 1>; |
| 329 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 330 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 331 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 332 | n_gemms = config::WinogradBase::N_GEMMS; |
| 333 | N_BLOCK = config::WinogradConv::N_BLOCK; |
| 334 | } |
| 335 | else if(kernel_size == Size2D(5, 1)) |
| 336 | { |
| 337 | using config = NEWinogradLayerConfiguration<float, float, 1, 4, 1, 5>; |
| 338 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 339 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 340 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 341 | n_gemms = config::WinogradBase::N_GEMMS; |
| 342 | N_BLOCK = config::WinogradConv::N_BLOCK; |
| 343 | } |
| 344 | else if(kernel_size == Size2D(1, 7)) |
| 345 | { |
| 346 | using config = NEWinogradLayerConfiguration<float, float, 2, 1, 7, 1>; |
| 347 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 348 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 349 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 350 | n_gemms = config::WinogradBase::N_GEMMS; |
| 351 | N_BLOCK = config::WinogradConv::N_BLOCK; |
| 352 | } |
| 353 | else if(kernel_size == Size2D(7, 1)) |
| 354 | { |
| 355 | using config = NEWinogradLayerConfiguration<float, float, 1, 2, 1, 7>; |
| 356 | transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>(); |
| 357 | transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>(); |
| 358 | transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>(); |
| 359 | n_gemms = config::WinogradBase::N_GEMMS; |
| 360 | N_BLOCK = config::WinogradConv::N_BLOCK; |
| 361 | } |
| 362 | else |
| 363 | { |
| 364 | ARM_COMPUTE_ERROR("Not supported."); |
| 365 | } |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 366 | |
Pablo Tello | bda6e4b | 2018-08-22 11:40:33 +0100 | [diff] [blame] | 367 | const PaddingType use_padding_type = (conv_info.pad_top() != 0u || conv_info.pad_left() != 0) ? PADDING_SAME : PADDING_VALID; |
Pablo Tello | 679463a | 2018-02-06 11:47:59 +0000 | [diff] [blame] | 368 | const bool use_same_padding = use_padding_type == PADDING_SAME; |
| 369 | |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 370 | // Get convolved dimensions |
Vidhya Sudhan Loganathan | cb0010b | 2018-05-11 16:23:53 +0100 | [diff] [blame] | 371 | const int in_channels = input->info()->dimension(channel_idx); |
| 372 | const int out_channels = output->info()->dimension(channel_idx); |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 373 | |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 374 | const Tensor4DShape in_shape(internal_get_input_shape(input)); |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 375 | const DataType data_type = input->info()->data_type(); |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 376 | const size_t data_type_size = input->info()->element_size(); |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 377 | // Get the memory required to instantiate a new Winograd operator. |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 378 | constexpr size_t storage_alignment = 64; |
| 379 | |
| 380 | // Kernel Storage |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 381 | const size_t kernel_storage_size = transform_weights_kernel->get_weight_storage_size(out_channels, |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 382 | in_channels) |
| 383 | * data_type_size |
| 384 | + storage_alignment - 1; /* FIXME: remove alignment after COMPMID-1088 */ |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 385 | |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 386 | // Input storage |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 387 | const size_t input_storage_size = transform_input_kernel->get_input_storage_size(in_shape.n_batches, in_shape.n_channels, in_shape.n_rows, in_shape.n_cols, |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 388 | use_same_padding) |
| 389 | * data_type_size |
| 390 | + storage_alignment - 1; /* FIXME: remove alignment after COMPMID-1088 */ |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 391 | |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 392 | // Output storage |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 393 | const size_t output_storage_size = transform_output_kernel->get_output_storage_size(in_shape.n_batches, in_shape.n_rows, in_shape.n_cols, out_channels, |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 394 | use_same_padding) |
| 395 | * data_type_size |
| 396 | + storage_alignment - 1; /* FIXME: remove alignment after COMPMID-1088 */ |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 397 | ; |
| 398 | const KernelShape kernel_shape({ out_channels, static_cast<int>(kernel_size.height), static_cast<int>(kernel_size.width), in_channels }); |
| 399 | const int kernel_matrix_stride = transform_weights_kernel->get_matrix_stride(kernel_shape); |
| 400 | |
| 401 | const int output_matrix_stride = transform_output_kernel->get_matrix_stride(kernel_shape, in_shape, use_padding_type); |
| 402 | const auto output_shape(transform_output_kernel->get_output_shape(kernel_shape, in_shape, use_padding_type)); |
| 403 | |
| 404 | const int input_matrix_stride = transform_input_kernel->get_matrix_stride(kernel_shape, in_shape, use_padding_type); |
| 405 | |
| 406 | // Configure GEMM |
| 407 | const int tile_rows = iceildiv(output_shape.n_rows, output_tile.height); |
| 408 | const int tile_cols = iceildiv(output_shape.n_cols, output_tile.width); |
| 409 | const int m = in_shape.n_batches * tile_rows * tile_cols; |
| 410 | const int k = in_shape.n_channels; |
| 411 | const int n = out_channels; |
| 412 | const int kernel_matrix_row_stride = roundup(out_channels, N_BLOCK); |
| 413 | const int output_matrix_row_stride = kernel_matrix_row_stride; |
| 414 | |
| 415 | TensorShape a_shape(k, m, 1, n_gemms); |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 416 | Strides a_strides(data_type_size); |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 417 | a_strides.set(1, a_strides[0] * k); |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 418 | //a_strides.set(2, data_type_size * input_matrix_stride / n_gemms); FIXME: This is the real batch size, but RSH's code crashes if it's not 0. |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 419 | a_strides.set(2, 0); |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 420 | a_strides.set(3, data_type_size * input_matrix_stride); |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 421 | |
| 422 | TensorShape b_shape(n, k, n_gemms); |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 423 | Strides b_strides(data_type_size); |
| 424 | b_strides.set(1, data_type_size * kernel_matrix_row_stride); |
| 425 | b_strides.set(2, data_type_size * kernel_matrix_stride); |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 426 | |
| 427 | TensorShape d_shape(n, m, 1, n_gemms); |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 428 | Strides d_strides(data_type_size); |
| 429 | d_strides.set(1, data_type_size * output_matrix_row_stride); |
| 430 | //d_strides.set(2, data_type_size * output_matrix_stride / n_gemms); FIXME: This is the real batch size, but RSH's code crashes if it's not 0. |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 431 | d_strides.set(2, 0); |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 432 | d_strides.set(3, data_type_size * output_matrix_stride); |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 433 | |
| 434 | TensorInfo a_info, b_info, d_info; |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 435 | a_info.init(a_shape, 1, data_type, a_strides, 0, input_storage_size); |
| 436 | b_info.init(b_shape, 1, data_type, b_strides, 0, kernel_storage_size); |
| 437 | d_info.init(d_shape, 1, data_type, d_strides, 0, output_storage_size); |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 438 | |
| 439 | _input_workspace.allocator()->init(a_info, storage_alignment); |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 440 | _kernel_storage.allocator()->init(b_info, storage_alignment); |
Anthony Barbier | 578225e | 2018-07-16 18:00:11 +0100 | [diff] [blame] | 441 | _output_workspace.allocator()->init(d_info, storage_alignment); |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 442 | |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 443 | // configure and allocate dst tensor to be used to convert from winograd domain to spatial domain when calling to reshape_output() |
| 444 | TensorInfo info(TensorShape(_output->info()->dimension(2), _output->info()->dimension(0), |
| 445 | _output->info()->dimension(1), _output->info()->dimension(3)), |
| 446 | 1, _output->info()->data_type()); |
| 447 | _output_nhwc.allocator()->init(info); |
Pablo Tello | 02541fb | 2017-12-15 09:48:59 +0000 | [diff] [blame] | 448 | |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 449 | // Configure the InputTransform |
Anthony Barbier | 20394d5 | 2018-08-02 11:29:09 +0100 | [diff] [blame] | 450 | _memory_group.manage(&_input_workspace); |
Pablo Tello | f718ce2 | 2018-10-29 13:13:23 +0000 | [diff] [blame] | 451 | _memory_group.manage(&_output_workspace); |
| 452 | |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 453 | if(data_layout == DataLayout::NCHW) |
| 454 | { |
| 455 | // configure the kernel to transform the input tensor from NCHW -> NHWC |
| 456 | _permute_input.configure(input, &_input_nhwc, PermutationVector(2U, 0U, 1U)); |
| 457 | _input_nhwc.allocator()->allocate(); |
| 458 | transform_input_kernel->configure(&_input_nhwc, in_shape.n_batches, in_shape.n_rows, in_shape.n_cols, in_shape.n_channels, use_padding_type, |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 459 | &_input_workspace, input_matrix_stride); |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 460 | |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 461 | // Re-order a weight tensor from [Output feature map x Input feature map x Height x Width] to [Height x Width x Input feature map x Output feature map] |
| 462 | _permute_weights.configure(weights, &_weights_hwio, PermutationVector(3U, 2U, 0U, 1U)); |
| 463 | |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 464 | transform_weights_kernel->configure(&_weights_hwio, &_kernel_storage, kernel_matrix_stride, out_channels, in_channels); |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 465 | |
Pablo Tello | f718ce2 | 2018-10-29 13:13:23 +0000 | [diff] [blame] | 466 | //The biases tensor has not been allocated at this point in time, the output transform will add the biases to the final result in the run() method |
Georgios Pinitas | ca1250d | 2018-11-22 19:38:27 +0000 | [diff] [blame^] | 467 | _memory_group.manage(&_output_nhwc); |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 468 | transform_output_kernel->configure(biases, &_output_workspace, |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 469 | output_matrix_stride, &_output_nhwc, |
| 470 | in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels); |
| 471 | } |
| 472 | else |
| 473 | { |
Pablo Tello | f718ce2 | 2018-10-29 13:13:23 +0000 | [diff] [blame] | 474 | transform_input_kernel->configure(_input, in_shape.n_batches, in_shape.n_rows, in_shape.n_cols, in_shape.n_channels, use_padding_type, |
| 475 | &_input_workspace, input_matrix_stride); |
| 476 | |
| 477 | // Re-order a weight tensor from [Output feature map x Input feature map x Height x Width] to [Height x Width x Input feature map x Output feature map] |
| 478 | _permute_weights.configure(weights, &_weights_hwio, PermutationVector(3U, 0U, 1U, 2U)); |
| 479 | |
| 480 | transform_weights_kernel->configure(&_weights_hwio, &_kernel_storage, kernel_matrix_stride, out_channels, in_channels); |
| 481 | |
Anthony Barbier | e155337 | 2018-07-16 18:53:52 +0100 | [diff] [blame] | 482 | transform_output_kernel->configure(biases, &_output_workspace, |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 483 | output_matrix_stride, _output, |
| 484 | in_shape.n_batches, output_shape.n_rows, output_shape.n_cols, out_channels); |
| 485 | } |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 486 | |
Pablo Tello | a518f30 | 2018-09-19 11:33:03 +0100 | [diff] [blame] | 487 | _gemm_function.configure(&_input_workspace, &_kernel_storage, nullptr, &_output_workspace, 1.0f, 0.f); |
Anthony Barbier | 20394d5 | 2018-08-02 11:29:09 +0100 | [diff] [blame] | 488 | _input_workspace.allocator()->allocate(); |
Anthony Barbier | 20394d5 | 2018-08-02 11:29:09 +0100 | [diff] [blame] | 489 | _output_workspace.allocator()->allocate(); |
Pablo Tello | 52140b4 | 2018-01-30 14:48:11 +0000 | [diff] [blame] | 490 | |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 491 | // Reorder the convoluted output to ACL's ordering NCHW |
Georgios Pinitas | ca1250d | 2018-11-22 19:38:27 +0000 | [diff] [blame^] | 492 | if(data_layout == DataLayout::NCHW) |
| 493 | { |
| 494 | _permute_output.configure(&_output_nhwc, _output, PermutationVector(1U, 2U, 0U)); |
| 495 | _output_nhwc.allocator()->allocate(); |
| 496 | } |
Anthony Barbier | 20394d5 | 2018-08-02 11:29:09 +0100 | [diff] [blame] | 497 | |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 498 | _transform_input_kernel = std::move(transform_input_kernel); |
| 499 | _transform_weights_kernel = std::move(transform_weights_kernel); |
| 500 | _transform_output_kernel = std::move(transform_output_kernel); |
Isabella Gottardi | 3f217ec | 2018-02-12 14:59:19 +0000 | [diff] [blame] | 501 | |
| 502 | //Configure Activation Layer |
| 503 | _is_activationlayer_enabled = act_info.enabled(); |
Pablo Tello | 7282d56 | 2018-06-14 15:35:49 +0100 | [diff] [blame] | 504 | if(_is_activationlayer_enabled) |
Isabella Gottardi | 3f217ec | 2018-02-12 14:59:19 +0000 | [diff] [blame] | 505 | { |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 506 | _activationlayer_function.configure(_output, nullptr, act_info); |
Isabella Gottardi | 3f217ec | 2018-02-12 14:59:19 +0000 | [diff] [blame] | 507 | } |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 508 | } |
| 509 | |
Georgios Pinitas | 9fb1159 | 2018-04-26 20:34:58 +0100 | [diff] [blame] | 510 | void NEWinogradConvolutionLayer::run() |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 511 | { |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 512 | const DataLayout data_layout = _input->info()->data_layout(); |
| 513 | |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 514 | prepare(); |
| 515 | |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 516 | _memory_group.acquire(); |
Pablo Tello | 679463a | 2018-02-06 11:47:59 +0000 | [diff] [blame] | 517 | |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 518 | if(data_layout == DataLayout::NCHW) |
| 519 | { |
| 520 | //Bring channels to the front as Winograd code expects the tensor to be in the format NHWC |
| 521 | _permute_input.run(); |
| 522 | } |
Pablo Tello | bda6e4b | 2018-08-22 11:40:33 +0100 | [diff] [blame] | 523 | |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 524 | // Transform input tensor to the winograd domain |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 525 | NEScheduler::get().schedule(_transform_input_kernel.get(), Window::DimX); |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 526 | |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 527 | //Run 16 GEMMs in multiple threads, each kernel runs one or more GEMMs |
Pablo Tello | a518f30 | 2018-09-19 11:33:03 +0100 | [diff] [blame] | 528 | _gemm_function.run(); |
Pablo Tello | 9ceebbe | 2018-01-10 16:44:13 +0000 | [diff] [blame] | 529 | // Transform output tensor to the spatial domain |
Pablo Tello | f6c572c | 2018-02-14 12:47:30 +0000 | [diff] [blame] | 530 | NEScheduler::get().schedule(_transform_output_kernel.get(), Window::DimX); |
Pablo Tello | d6ca478 | 2018-01-23 09:36:04 +0000 | [diff] [blame] | 531 | |
Pablo Tello | 7df2786 | 2018-05-30 11:44:26 +0100 | [diff] [blame] | 532 | if(data_layout == DataLayout::NCHW) |
| 533 | { |
| 534 | // Reorder the convoluted output to ACL's ordering NCHW |
| 535 | _permute_output.run(); |
| 536 | } |
Isabella Gottardi | 3f217ec | 2018-02-12 14:59:19 +0000 | [diff] [blame] | 537 | |
| 538 | if(_is_activationlayer_enabled) |
| 539 | { |
| 540 | _activationlayer_function.run(); |
| 541 | } |
Pablo Tello | 7282d56 | 2018-06-14 15:35:49 +0100 | [diff] [blame] | 542 | |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 543 | _memory_group.release(); |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 544 | } |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 545 | |
Georgios Pinitas | 9fb1159 | 2018-04-26 20:34:58 +0100 | [diff] [blame] | 546 | Status NEWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 547 | const ActivationLayerInfo &act_info, bool enable_fast_math) |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 548 | { |
Vidhya Sudhan Loganathan | cb0010b | 2018-05-11 16:23:53 +0100 | [diff] [blame] | 549 | ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 550 | ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info)); |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 551 | |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 552 | // Get indices for the width and height |
| 553 | const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); |
| 554 | const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); |
| 555 | |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 556 | // Input shape, kernel size and output tile |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 557 | const Size2D input_dims = Size2D(input->dimension(idx_width), input->dimension(idx_height)); |
| 558 | const Size2D kernel_size = Size2D(weights->dimension(idx_width), weights->dimension(idx_height)); |
| 559 | const Size2D output_tile = winograd_output_tile(input_dims, kernel_size); |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 560 | |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 561 | // Check if the Winograd configuration requires fast math |
| 562 | if(!enable_fast_math) |
| 563 | { |
| 564 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(check_support_fast_math(output_tile, kernel_size), "This Winograd configuration requires enable_fast_math=true"); |
| 565 | } |
Vidhya Sudhan Loganathan | cb0010b | 2018-05-11 16:23:53 +0100 | [diff] [blame] | 566 | |
| 567 | const WinogradInfo winograd_info = WinogradInfo(output_tile, |
Giorgio Arena | a3221e6 | 2018-05-03 15:57:48 +0100 | [diff] [blame] | 568 | kernel_size, |
| 569 | input_dims, |
Vidhya Sudhan Loganathan | 84ce1f9 | 2018-04-25 13:00:09 +0100 | [diff] [blame] | 570 | conv_info, |
| 571 | input->data_layout()); |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 572 | |
| 573 | // Validate input transform |
Vidhya Sudhan Loganathan | 84ce1f9 | 2018-04-25 13:00:09 +0100 | [diff] [blame] | 574 | const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info); |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 575 | const TensorInfo input0 = input->clone()->set_tensor_shape(input0_shape); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 576 | // Validate filter transform |
| 577 | const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info); |
| 578 | const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape); |
| 579 | // Validate batched matrix multiply |
| 580 | TensorShape batched_mm_output_shape = input0.tensor_shape(); |
| 581 | batched_mm_output_shape[0] = input1.tensor_shape()[0]; |
| 582 | const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape); |
Pablo Tello | 7282d56 | 2018-06-14 15:35:49 +0100 | [diff] [blame] | 583 | |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 584 | if(kernel_size == Size2D(3, 3)) |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 585 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 586 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_top() != 1, "Only SAME or VALID padding supported"); |
| 587 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_bottom() != 0u && conv_info.pad_bottom() != 1, "Only SAME or VALID padding supported"); |
| 588 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_left() != 1, "Only SAME or VALID padding supported"); |
| 589 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != 0u && conv_info.pad_right() != 1, "Only SAME or VALID padding supported"); |
| 590 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != conv_info.pad_left(), "Only SAME or VALID padding supported"); |
| 591 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != conv_info.pad_bottom(), "Only SAME or VALID padding supported"); |
| 592 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != conv_info.pad_left(), "Only SAME or VALID padding supported"); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 593 | return validate_kernel_3x3(input_dims, input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info); |
| 594 | } |
| 595 | else if(kernel_size == Size2D(5, 5)) |
| 596 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 597 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_top() != 2, "Only SAME or VALID padding supported"); |
| 598 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_left() != 2, "Only SAME or VALID padding supported"); |
| 599 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_bottom() != 0u && conv_info.pad_bottom() != 2, "Only SAME or VALID padding supported"); |
| 600 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != 0u && conv_info.pad_right() != 2, "Only SAME or VALID padding supported"); |
| 601 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != conv_info.pad_left(), "Only SAME or VALID padding supported"); |
| 602 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != conv_info.pad_bottom(), "Only SAME or VALID padding supported"); |
| 603 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != conv_info.pad_left(), "Only SAME or VALID padding supported"); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 604 | return validate_kernel_5x5(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info); |
| 605 | } |
| 606 | if(kernel_size == Size2D(3, 1)) |
| 607 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 608 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_left() != 1, "Only SAME or VALID padding supported"); |
| 609 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != 0u && conv_info.pad_right() != 1, "Only SAME or VALID padding supported"); |
| 610 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_bottom() != 0, "Only SAME or VALID padding supported"); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 611 | return validate_kernel_3x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info); |
| 612 | } |
| 613 | else if(kernel_size == Size2D(1, 3)) |
| 614 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 615 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_top() != 1, "Only SAME or VALID padding supported"); |
| 616 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_bottom() != 0u && conv_info.pad_bottom() != 1, "Only SAME or VALID padding supported"); |
| 617 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_right() != 0, "Only SAME or VALID padding supported"); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 618 | return validate_kernel_1x3(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info); |
| 619 | } |
| 620 | else if(kernel_size == Size2D(5, 1)) |
| 621 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 622 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_left() != 2, "Only SAME or VALID padding supported"); |
| 623 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != 0u && conv_info.pad_right() != 2, "Only SAME or VALID padding supported"); |
| 624 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_bottom() != 0, "Only SAME or VALID padding supported"); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 625 | return validate_kernel_5x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info); |
| 626 | } |
| 627 | else if(kernel_size == Size2D(1, 5)) |
| 628 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 629 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_top() != 2, "Only SAME or VALID padding supported"); |
| 630 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_bottom() != 0u && conv_info.pad_bottom() != 2, "Only SAME or VALID padding supported"); |
| 631 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_right() != 0, "Only SAME or VALID padding supported"); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 632 | return validate_kernel_1x5(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info); |
| 633 | } |
| 634 | else if(kernel_size == Size2D(7, 1)) |
| 635 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 636 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_left() != 3, "Only SAME or VALID padding supported"); |
| 637 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_right() != 0u && conv_info.pad_right() != 3, "Only SAME or VALID padding supported"); |
| 638 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_bottom() != 0, "Only SAME or VALID padding supported"); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 639 | return validate_kernel_7x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info); |
| 640 | } |
| 641 | else if(kernel_size == Size2D(1, 7)) |
| 642 | { |
Pablo Tello | fe4b05f | 2018-09-24 16:28:25 +0100 | [diff] [blame] | 643 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_top() != 0u && conv_info.pad_top() != 3, "Only SAME or VALID padding supported"); |
| 644 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_bottom() != 0u && conv_info.pad_bottom() != 3, "Only SAME or VALID padding supported"); |
| 645 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.pad_left() != 0u && conv_info.pad_right() != 0, "Only SAME or VALID padding supported"); |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 646 | return validate_kernel_1x7(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info); |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 647 | } |
Pablo Tello | bda6e4b | 2018-08-22 11:40:33 +0100 | [diff] [blame] | 648 | else |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 649 | { |
Pablo Tello | 000d33a | 2018-09-03 16:59:20 +0100 | [diff] [blame] | 650 | ARM_COMPUTE_RETURN_ERROR_MSG("Kernel shape not supported"); |
Vidhya Sudhan Loganathan | 3ca9786 | 2018-04-23 08:20:04 +0100 | [diff] [blame] | 651 | } |
Isabella Gottardi | 6acc6ad | 2018-02-02 17:19:18 +0000 | [diff] [blame] | 652 | return Status{}; |
| 653 | } |
| 654 | |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 655 | void NEWinogradConvolutionLayer::prepare() |
| 656 | { |
| 657 | if(!_is_prepared) |
| 658 | { |
| 659 | // Permute weights |
Georgios Pinitas | ca1250d | 2018-11-22 19:38:27 +0000 | [diff] [blame^] | 660 | _weights_hwio.allocator()->allocate(); |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 661 | _permute_weights.run(); |
| 662 | _weights->mark_as_unused(); |
| 663 | |
| 664 | // Transform weights |
Georgios Pinitas | ca1250d | 2018-11-22 19:38:27 +0000 | [diff] [blame^] | 665 | _kernel_storage.allocator()->allocate(); |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 666 | NEScheduler::get().schedule(_transform_weights_kernel.get(), Window::DimX); |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 667 | |
Pablo Tello | bda6e4b | 2018-08-22 11:40:33 +0100 | [diff] [blame] | 668 | _weights_hwio.allocator()->free(); |
Georgios Pinitas | 7221933 | 2018-06-05 14:56:06 +0100 | [diff] [blame] | 669 | _is_prepared = true; |
| 670 | } |
| 671 | } |
| 672 | |
Pablo Tello | 8951933 | 2017-11-17 11:52:36 +0000 | [diff] [blame] | 673 | } // namespace arm_compute |