Dana Zlotnik | 149203b | 2022-01-26 12:38:03 +0200 | [diff] [blame] | 1 | /* |
Matthew Bentham | 7d9a78e | 2023-05-31 13:18:33 +0000 | [diff] [blame] | 2 | * Copyright (c) 2020-2023 Arm Limited. |
Dana Zlotnik | 149203b | 2022-01-26 12:38:03 +0200 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "src/cpu/kernels/CpuMaxUnpoolingLayerKernel.h" |
| 25 | |
| 26 | #include "arm_compute/core/TensorInfo.h" |
| 27 | #include "arm_compute/core/Validate.h" |
| 28 | #include "arm_compute/core/Window.h" |
| 29 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
| 30 | #include "src/core/CPP/Validate.h" |
| 31 | #include "src/core/common/Registrars.h" |
| 32 | #include "src/core/helpers/AutoConfiguration.h" |
| 33 | #include "src/core/helpers/WindowHelpers.h" |
| 34 | #include "src/cpu/kernels/maxunpool/list.h" |
Dana Zlotnik | 149203b | 2022-01-26 12:38:03 +0200 | [diff] [blame] | 35 | |
| 36 | namespace arm_compute |
| 37 | { |
| 38 | namespace cpu |
| 39 | { |
| 40 | namespace kernels |
| 41 | { |
| 42 | using namespace misc::shape_calculator; |
| 43 | |
| 44 | namespace |
| 45 | { |
| 46 | static const std::vector<CpuMaxUnpoolingLayerKernel::MaxUnpoolingKernel> available_kernels = |
| 47 | { |
| 48 | { |
| 49 | "neon_fp32_maxunpooling", |
| 50 | [](const DataTypeISASelectorData & data) { return data.dt == DataType::F32; }, |
| 51 | REGISTER_FP32_NEON(neon_fp32_maxunpooling) |
| 52 | }, |
| 53 | { |
| 54 | "neon_fp16_maxunpooling", |
| 55 | [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16; }, |
| 56 | REGISTER_FP16_NEON(neon_fp16_maxunpooling) |
| 57 | }, |
| 58 | { |
| 59 | "neon_qu8_maxunpooling", |
| 60 | [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8; }, |
| 61 | REGISTER_QASYMM8_NEON(neon_qs8_maxunpooling) |
| 62 | }, |
| 63 | { |
| 64 | "neon_qs8_maxunpooling", |
| 65 | [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED; }, |
| 66 | REGISTER_QASYMM8_SIGNED_NEON(neon_qu8_maxunpooling) |
| 67 | }, |
| 68 | }; |
| 69 | |
| 70 | Status validate_arguments(const ITensorInfo *src, const ITensorInfo *indices, const ITensorInfo *dst, const PoolingLayerInfo &pool_info) |
| 71 | { |
| 72 | ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, indices, dst); |
| 73 | ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src); |
| 74 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); |
| 75 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32); |
| 76 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, indices); |
| 77 | |
| 78 | int pool_stride_x = 0; |
| 79 | int pool_stride_y = 0; |
| 80 | PoolingType pool_type = pool_info.pool_type; |
| 81 | const PadStrideInfo pad_stride_info = pool_info.pad_stride_info; |
| 82 | std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride(); |
| 83 | const int pool_size_x = pool_info.pool_size.width; |
| 84 | const int pool_size_y = pool_info.pool_size.height; |
| 85 | const Size2D pool_size(pool_size_x, pool_size_y); |
| 86 | |
| 87 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method"); |
| 88 | ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2"); |
| 89 | if(dst->total_size() != 0) |
| 90 | { |
| 91 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); |
| 92 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst); |
| 93 | } |
| 94 | |
| 95 | return Status{}; |
| 96 | } |
| 97 | } // namespace |
| 98 | |
| 99 | void CpuMaxUnpoolingLayerKernel::configure(const ITensorInfo *src, const ITensorInfo *indices, ITensorInfo *dst, const PoolingLayerInfo &pool_info) |
| 100 | { |
| 101 | ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, indices); |
| 102 | ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, indices, dst, pool_info)); |
| 103 | ARM_COMPUTE_UNUSED(indices); |
| 104 | |
| 105 | const auto uk = CpuMaxUnpoolingLayerKernel::get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() }); |
| 106 | ARM_COMPUTE_ERROR_ON_NULLPTR(uk); |
| 107 | _run_method = uk->ukernel; |
| 108 | |
| 109 | const TensorShape output_shape = compute_unpool_shape(*src, pool_info); |
| 110 | auto_init_if_empty(*dst, src->clone()->set_tensor_shape(output_shape)); |
| 111 | |
| 112 | auto window = calculate_max_window(*src, Steps()); |
| 113 | ICpuKernel::configure(window); |
| 114 | } |
| 115 | |
| 116 | Status CpuMaxUnpoolingLayerKernel::validate(const ITensorInfo *src, const ITensorInfo *indices, const ITensorInfo *dst, const PoolingLayerInfo &pool_info) |
| 117 | { |
| 118 | ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, indices, dst); |
| 119 | ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, indices, dst, pool_info)); |
| 120 | return Status{}; |
| 121 | } |
| 122 | |
| 123 | void CpuMaxUnpoolingLayerKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) |
| 124 | { |
| 125 | ARM_COMPUTE_UNUSED(info); |
| 126 | ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); |
| 127 | ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window); |
| 128 | |
| 129 | const auto src = tensors.get_const_tensor(TensorType::ACL_SRC_0); |
| 130 | const auto indices = tensors.get_const_tensor(TensorType::ACL_SRC_1); |
| 131 | const auto dst = tensors.get_tensor(TensorType::ACL_DST); |
| 132 | |
| 133 | _run_method(src, indices, dst, window); |
| 134 | } |
| 135 | |
| 136 | const char *CpuMaxUnpoolingLayerKernel::name() const |
| 137 | { |
| 138 | return "CpuMaxUnpoolingLayerKernel"; |
| 139 | } |
| 140 | |
| 141 | const std::vector<CpuMaxUnpoolingLayerKernel::MaxUnpoolingKernel> &CpuMaxUnpoolingLayerKernel::get_available_kernels() |
| 142 | { |
| 143 | return available_kernels; |
| 144 | } |
| 145 | } // namespace kernels |
| 146 | } // namespace cpu |
Matthew Bentham | 7d9a78e | 2023-05-31 13:18:33 +0000 | [diff] [blame] | 147 | } // namespace arm_compute |