Michalis Spyrou | b55f8e8 | 2021-07-22 11:23:11 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2017-2021 Arm Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
Georgios Pinitas | 7891a73 | 2021-08-20 21:39:25 +0100 | [diff] [blame] | 24 | #include "src/cpu/operators/CpuConv2d.h" |
Michalis Spyrou | b55f8e8 | 2021-07-22 11:23:11 +0100 | [diff] [blame] | 25 | #include "arm_compute/runtime/NEON/NEScheduler.h" |
| 26 | #include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h" |
ramelg01 | 3ae3d88 | 2021-09-12 23:07:47 +0100 | [diff] [blame] | 27 | #include "src/common/utils/Log.h" |
Georgios Pinitas | 7891a73 | 2021-08-20 21:39:25 +0100 | [diff] [blame] | 28 | #include "src/cpu/operators/CpuDirectConv2d.h" |
| 29 | #include "src/cpu/operators/CpuGemm.h" |
| 30 | #include "src/cpu/operators/CpuGemmConv2d.h" |
| 31 | #include "src/cpu/operators/CpuGemmDirectConv2d.h" |
| 32 | #include "src/cpu/operators/CpuWinogradConv2d.h" |
Michalis Spyrou | b55f8e8 | 2021-07-22 11:23:11 +0100 | [diff] [blame] | 33 | |
| 34 | namespace arm_compute |
| 35 | { |
| 36 | namespace cpu |
| 37 | { |
| 38 | CpuConv2d::CpuConv2d() |
| 39 | : _function() |
| 40 | { |
| 41 | } |
| 42 | |
| 43 | CpuConv2d::~CpuConv2d() = default; |
| 44 | |
| 45 | void CpuConv2d::configure(ITensorInfo *input, ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info, |
| 46 | const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups) |
| 47 | { |
| 48 | // Perform validate step |
| 49 | ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); |
| 50 | ARM_COMPUTE_UNUSED(num_groups); |
| 51 | ARM_COMPUTE_ERROR_THROW_ON(CpuConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, |
| 52 | enable_fast_math, num_groups)); |
| 53 | |
ramelg01 | 3ae3d88 | 2021-09-12 23:07:47 +0100 | [diff] [blame] | 54 | ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups); |
| 55 | |
Michalis Spyrou | b55f8e8 | 2021-07-22 11:23:11 +0100 | [diff] [blame] | 56 | const Conv2dInfo info(conv_info, dilation, act_info, enable_fast_math, num_groups); |
| 57 | switch(CpuConv2d::get_convolution_method(input, weights, output, conv_info, weights_info, dilation, act_info, enable_fast_math)) |
| 58 | { |
| 59 | case ConvolutionMethod::WINOGRAD: |
| 60 | { |
| 61 | auto f = std::make_unique<CpuWinogradConv2d>(); |
| 62 | f->configure(input, weights, biases, output, conv_info, act_info, enable_fast_math); |
| 63 | _function = std::move(f); |
| 64 | break; |
| 65 | } |
| 66 | case ConvolutionMethod::GEMM: |
| 67 | { |
Georgios Pinitas | 1988463 | 2021-08-16 12:38:54 +0100 | [diff] [blame] | 68 | auto f = std::make_unique<CpuGemmConv2d>(); |
Georgios Pinitas | df5bcb6 | 2021-08-17 16:30:12 +0100 | [diff] [blame] | 69 | f->configure(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math); |
Michalis Spyrou | b55f8e8 | 2021-07-22 11:23:11 +0100 | [diff] [blame] | 70 | _function = std::move(f); |
| 71 | break; |
| 72 | } |
| 73 | case ConvolutionMethod::GEMM_CONV2D: |
| 74 | { |
| 75 | auto f = std::make_unique<CpuGemmDirectConv2d>(); |
| 76 | f->configure(input, weights, biases, output, info); |
| 77 | _function = std::move(f); |
| 78 | break; |
| 79 | } |
| 80 | case ConvolutionMethod::DIRECT: |
| 81 | { |
| 82 | auto f = std::make_unique<CpuDirectConv2d>(); |
| 83 | f->configure(input, weights, biases, output, conv_info, act_info); |
| 84 | _function = std::move(f); |
| 85 | break; |
| 86 | } |
| 87 | default: |
| 88 | ARM_COMPUTE_ERROR("Not supported."); |
| 89 | break; |
| 90 | } |
| 91 | |
| 92 | _aux_mem = _function->workspace(); |
| 93 | } |
| 94 | |
| 95 | Status CpuConv2d::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, |
| 96 | const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups) |
| 97 | { |
| 98 | ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1), "Grouping (num_groups != 1) is not supported on Neon"); |
| 99 | |
| 100 | const Conv2dInfo info(conv_info, dilation, act_info, enable_fast_math, num_groups); |
| 101 | switch(CpuConv2d::get_convolution_method(input, weights, output, conv_info, weights_info, dilation, act_info, enable_fast_math)) |
| 102 | { |
| 103 | case ConvolutionMethod::WINOGRAD: |
| 104 | ARM_COMPUTE_RETURN_ON_ERROR(CpuWinogradConv2d::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math)); |
| 105 | break; |
| 106 | case ConvolutionMethod::GEMM: |
Georgios Pinitas | 1988463 | 2021-08-16 12:38:54 +0100 | [diff] [blame] | 107 | ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math)); |
Michalis Spyrou | b55f8e8 | 2021-07-22 11:23:11 +0100 | [diff] [blame] | 108 | break; |
| 109 | case ConvolutionMethod::GEMM_CONV2D: |
| 110 | ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmDirectConv2d::validate(input, weights, biases, output, info)); |
| 111 | break; |
| 112 | case ConvolutionMethod::DIRECT: |
| 113 | ARM_COMPUTE_RETURN_ON_ERROR(CpuDirectConv2d::validate(input, weights, biases, output, conv_info, act_info)); |
| 114 | break; |
| 115 | default: |
| 116 | ARM_COMPUTE_ERROR("Not supported."); |
| 117 | break; |
| 118 | } |
| 119 | |
| 120 | return Status{}; |
| 121 | } |
| 122 | |
| 123 | ConvolutionMethod CpuConv2d::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, |
| 124 | const ITensorInfo *output, const PadStrideInfo &conv_info, |
| 125 | const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math) |
| 126 | { |
| 127 | ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, weights); |
| 128 | ARM_COMPUTE_UNUSED(weights_info); |
| 129 | |
| 130 | const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); |
| 131 | const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); |
| 132 | const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); |
| 133 | |
| 134 | const Conv2dInfo info(conv_info, dilation, act_info, enable_fast_math, 1); |
| 135 | |
| 136 | /* Input spatial dims, kernel size, IFM/OFM, conv info*/ |
| 137 | using ConvolutionConfiguration = std::tuple<Size2D, Size2D, Size2D, PadStrideInfo>; |
| 138 | using ConfigurationMethod = std::pair<ConvolutionConfiguration, ConvolutionMethod>; |
| 139 | |
| 140 | const std::vector<ConfigurationMethod> known_configs = |
| 141 | { |
| 142 | // Alexnet |
| 143 | ConfigurationMethod(ConvolutionConfiguration(Size2D(27U, 27U), Size2D(5U, 5U), Size2D(48U, 128U), PadStrideInfo(1U, 1U, 2U, 2U)), ConvolutionMethod::GEMM), |
| 144 | // VGG16 / VGG19 |
| 145 | ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 64U), PadStrideInfo(1U, 1U, 1U, 1U)), ConvolutionMethod::GEMM), |
| 146 | // Mobilenet 224 |
| 147 | ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR)), ConvolutionMethod::GEMM), |
| 148 | // Mobilenet 160 |
| 149 | ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR)), ConvolutionMethod::GEMM) |
| 150 | }; |
| 151 | |
| 152 | const auto find_config = [&](ConfigurationMethod c) |
| 153 | { |
| 154 | const ConvolutionConfiguration config = c.first; |
| 155 | const PadStrideInfo info = std::get<3>(config); |
| 156 | |
| 157 | return std::get<0>(config) == Size2D(input->dimension(idx_w), input->dimension(idx_h)) && std::get<1>(config) == Size2D(weights->dimension(idx_w), weights->dimension(idx_h)) |
| 158 | && std::get<2>(config) == Size2D(weights->dimension(idx_c), weights->dimension(3)) && info.pad_top() == conv_info.pad_top() && info.pad_right() == conv_info.pad_right() |
| 159 | && info.pad_bottom() == conv_info.pad_bottom() && info.pad_left() == conv_info.pad_left() && info.stride() == conv_info.stride(); |
| 160 | }; |
| 161 | |
| 162 | std::vector<ConfigurationMethod>::const_iterator found; |
| 163 | if((found = std::find_if(known_configs.begin(), known_configs.end(), find_config)) != known_configs.end()) |
| 164 | { |
| 165 | return (*found).second; |
| 166 | } |
| 167 | |
| 168 | if(dilation != Size2D(1U, 1U)) |
| 169 | { |
| 170 | return ConvolutionMethod::GEMM; |
| 171 | } |
| 172 | else |
| 173 | { |
| 174 | // SRGAN |
| 175 | // Output might not be initialized when it is an internal tensor of the layer using the convolution |
| 176 | if(input->total_size() > 1e7 && (weights->dimension(idx_h) > 7) |
| 177 | && (CpuDirectConv2d::validate(input, weights, nullptr, output, conv_info, act_info))) |
| 178 | { |
| 179 | return ConvolutionMethod::DIRECT; |
| 180 | } |
| 181 | if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (NEFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info))) |
| 182 | { |
| 183 | return ConvolutionMethod::FFT; |
| 184 | } |
| 185 | if(input->dimension(idx_c) < 16) |
| 186 | { |
| 187 | return ConvolutionMethod::GEMM; |
| 188 | } |
| 189 | |
| 190 | #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC |
| 191 | // This heuristics only applies to F16 data type on A55r1 |
| 192 | if(NEScheduler::get().cpu_info().get_cpu_model() == CPUModel::A55r1 && enable_fast_math && input->data_type() == DataType::F16) |
| 193 | { |
| 194 | // Exclude known bad winograd configs (and defaults to GEMM) |
| 195 | const std::vector<ConvolutionConfiguration> known_bad_winograd_f16_with_fastmath_configs = |
| 196 | { |
| 197 | // Squeezenet_V1_1 fire2 and fire3 |
| 198 | ConvolutionConfiguration(Size2D(56U, 56U), Size2D(3U, 3U), Size2D(16U, 64U), PadStrideInfo(1U, 1U, 1U, 1U)), |
| 199 | // Squeezenet_V1_1 fire6 and fire7 |
| 200 | ConvolutionConfiguration(Size2D(14U, 14U), Size2D(3U, 3U), Size2D(48U, 192U), PadStrideInfo(1U, 1U, 1U, 1U)), |
| 201 | // Squeezenet_V1_1 fire8 and fire9 |
| 202 | ConvolutionConfiguration(Size2D(14U, 14U), Size2D(3U, 3U), Size2D(64U, 256U), PadStrideInfo(1U, 1U, 1U, 1U)), |
| 203 | }; |
| 204 | const auto find_conv_config = [&](ConvolutionConfiguration c) |
| 205 | { |
| 206 | const PadStrideInfo info = std::get<3>(c); |
| 207 | |
| 208 | return std::get<0>(c) == Size2D(input->dimension(idx_w), input->dimension(idx_h)) && std::get<1>(c) == Size2D(weights->dimension(idx_w), weights->dimension(idx_h)) |
| 209 | && std::get<2>(c) == Size2D(weights->dimension(idx_c), weights->dimension(3)) && info.pad_top() == conv_info.pad_top() && info.pad_right() == conv_info.pad_right() |
| 210 | && info.pad_bottom() == conv_info.pad_bottom() && info.pad_left() == conv_info.pad_left() && info.stride() == conv_info.stride(); |
| 211 | }; |
| 212 | |
| 213 | bool found_bad = std::find_if(known_bad_winograd_f16_with_fastmath_configs.begin(), known_bad_winograd_f16_with_fastmath_configs.end(), |
| 214 | find_conv_config) |
| 215 | != known_bad_winograd_f16_with_fastmath_configs.end(); |
| 216 | if(found_bad) |
| 217 | { |
| 218 | return ConvolutionMethod::GEMM; |
| 219 | } |
| 220 | } |
| 221 | #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC |
| 222 | // For 1x1 convolutions run the default GEMM |
| 223 | if(weights->dimension(idx_w) == 1 && weights->dimension(idx_h) == 1) |
| 224 | { |
| 225 | return ConvolutionMethod::GEMM; |
| 226 | } |
| 227 | |
| 228 | if(bool(CpuWinogradConv2d::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math))) |
| 229 | { |
| 230 | return ConvolutionMethod::WINOGRAD; |
| 231 | } |
| 232 | if(bool(CpuGemmDirectConv2d::validate(input, weights, nullptr, output, info))) |
| 233 | { |
| 234 | return ConvolutionMethod::GEMM_CONV2D; |
| 235 | } |
| 236 | return ConvolutionMethod::GEMM; |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | void CpuConv2d::run(ITensorPack &tensors) |
| 241 | { |
| 242 | prepare(tensors); |
| 243 | _function->run(tensors); |
| 244 | } |
| 245 | |
| 246 | void CpuConv2d::prepare(ITensorPack &tensors) |
| 247 | { |
| 248 | _function->prepare(tensors); |
| 249 | } |
| 250 | |
| 251 | experimental::MemoryRequirements CpuConv2d::workspace() const |
| 252 | { |
| 253 | return _aux_mem; |
| 254 | } |
| 255 | } // namespace cpu |
| 256 | } // namespace arm_compute |