Michalis Spyrou | 7930db4 | 2018-11-22 17:36:28 +0000 | [diff] [blame] | 1 | /* |
Michele Di Giorgio | d9eaf61 | 2020-07-08 11:12:57 +0100 | [diff] [blame] | 2 | * Copyright (c) 2018-2020 Arm Limited. |
Michalis Spyrou | 7930db4 | 2018-11-22 17:36:28 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | |
| 25 | #include "arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h" |
| 26 | |
Michalis Spyrou | 7930db4 | 2018-11-22 17:36:28 +0000 | [diff] [blame] | 27 | #include "arm_compute/core/Error.h" |
| 28 | #include "arm_compute/core/TensorInfo.h" |
| 29 | #include "arm_compute/core/Types.h" |
| 30 | #include "arm_compute/core/Validate.h" |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 31 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
Sang-Hoon Park | 68dd25f | 2020-10-19 16:00:11 +0100 | [diff] [blame^] | 32 | #include "src/core/CL/CLValidate.h" |
| 33 | #include "src/core/helpers/AutoConfiguration.h" |
| 34 | #include "src/runtime/Utils.h" |
Michalis Spyrou | 7930db4 | 2018-11-22 17:36:28 +0000 | [diff] [blame] | 35 | |
| 36 | namespace arm_compute |
| 37 | { |
Sang-Hoon Park | 2697fd8 | 2019-10-15 16:49:24 +0100 | [diff] [blame] | 38 | CLArgMinMaxLayer::CLArgMinMaxLayer(std::shared_ptr<IMemoryManager> memory_manager) |
Michalis Spyrou | 2aad21a | 2020-07-02 12:43:53 +0100 | [diff] [blame] | 39 | : _memory_group(std::move(memory_manager)), _results_vector(), _not_reshaped_output(), _reduction_kernels_vector(), _reshape(), _num_of_stages(), _reduction_axis() |
Michalis Spyrou | 7930db4 | 2018-11-22 17:36:28 +0000 | [diff] [blame] | 40 | { |
Sang-Hoon Park | 2697fd8 | 2019-10-15 16:49:24 +0100 | [diff] [blame] | 41 | } |
| 42 | |
Michalis Spyrou | 7930db4 | 2018-11-22 17:36:28 +0000 | [diff] [blame] | 43 | Status CLArgMinMaxLayer::validate(const ITensorInfo *input, int axis, const ITensorInfo *output, const ReductionOperation &op) |
| 44 | { |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 45 | ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); |
Sheri Zhang | c5b6d88 | 2020-06-26 14:46:59 +0100 | [diff] [blame] | 46 | ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); |
| 47 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::F16, DataType::F32); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 48 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Invalid reduction operation"); |
| 49 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= static_cast<int>(TensorShape::num_max_dimensions), "Reduction axis greater than max number of dimensions"); |
| 50 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis"); |
Sang-Hoon Park | 68dd25f | 2020-10-19 16:00:11 +0100 | [diff] [blame^] | 51 | const unsigned int num_of_stages = utils::calculate_number_of_stages_only_x_axis(input->dimension(0), axis); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 52 | |
| 53 | DataType output_data_type = DataType::S32; |
| 54 | TensorInfo not_reshaped_output; |
| 55 | const auto input_num_channles = input->num_channels(); |
| 56 | const auto input_qinfo = input->quantization_info(); |
| 57 | |
| 58 | if(output->total_size() != 0) |
| 59 | { |
| 60 | output_data_type = output->data_type(); |
| 61 | const TensorInfo expected_output_shape = output->clone()->set_tensor_shape(arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, false)); |
| 62 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output_shape, output); |
| 63 | } |
| 64 | |
| 65 | auto shape_before_reshape = input->tensor_shape(); |
| 66 | shape_before_reshape.set(axis, 1); |
| 67 | auto initialize_tensorinfo = [](TensorInfo & ti, TensorShape shape, DataType data_type, int num_channels, QuantizationInfo qinfo) |
| 68 | { |
| 69 | ti.set_data_type(data_type).set_tensor_shape(shape).set_num_channels(num_channels).set_quantization_info(qinfo); |
| 70 | }; |
| 71 | |
| 72 | initialize_tensorinfo(not_reshaped_output, shape_before_reshape, output_data_type, input_num_channles, input_qinfo); |
| 73 | |
| 74 | if(num_of_stages == 1) |
| 75 | { |
| 76 | ARM_COMPUTE_RETURN_ON_ERROR(CLArgMinMaxLayerKernel::validate(input, nullptr, ¬_reshaped_output, axis, op)); |
| 77 | } |
| 78 | else |
| 79 | { |
| 80 | // Create temporary tensor infos |
| 81 | std::vector<TensorInfo> sums_vector(num_of_stages - 1); |
| 82 | |
| 83 | // Create intermediate tensor info |
| 84 | TensorShape shape{ input->tensor_shape() }; |
| 85 | |
| 86 | for(unsigned int i = 0; i < num_of_stages - 1; i++) |
| 87 | { |
| 88 | shape.set(0, ceil(shape.x() / 128.f)); |
| 89 | sums_vector[i].set_data_type(input->data_type()); |
| 90 | sums_vector[i].set_tensor_shape(shape); |
| 91 | sums_vector[i].set_num_channels(input->num_channels()); |
| 92 | } |
| 93 | |
| 94 | // Validate ReductionOperation only on first kernel |
| 95 | ARM_COMPUTE_RETURN_ON_ERROR(CLArgMinMaxLayerKernel::validate(input, nullptr, &sums_vector[0], axis, op)); |
| 96 | |
| 97 | // Validate ReductionOperation on intermediate stages |
| 98 | for(unsigned int i = 1; i < num_of_stages - 1; ++i) |
| 99 | { |
| 100 | ARM_COMPUTE_RETURN_ON_ERROR(CLArgMinMaxLayerKernel::validate(input, &sums_vector[i - 1], &sums_vector[i], axis, op)); |
| 101 | } |
| 102 | |
| 103 | // Validate ReductionOperation on the last stage |
| 104 | const unsigned int last_stage = num_of_stages - 1; |
| 105 | ARM_COMPUTE_RETURN_ON_ERROR(CLArgMinMaxLayerKernel::validate(input, &sums_vector[last_stage - 1], ¬_reshaped_output, axis, op)); |
| 106 | } |
Michalis Spyrou | 2aad21a | 2020-07-02 12:43:53 +0100 | [diff] [blame] | 107 | ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayer::validate(¬_reshaped_output, output)); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 108 | return Status{}; |
| 109 | } |
| 110 | |
| 111 | void CLArgMinMaxLayer::configure(const ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op) |
| 112 | { |
Manuel Bottini | 2b84be5 | 2020-04-08 10:15:51 +0100 | [diff] [blame] | 113 | configure(CLKernelLibrary::get().get_compile_context(), input, axis, output, op); |
| 114 | } |
| 115 | |
| 116 | void CLArgMinMaxLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op) |
| 117 | { |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 118 | ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); |
Sang-Hoon Park | 68dd25f | 2020-10-19 16:00:11 +0100 | [diff] [blame^] | 119 | _num_of_stages = utils::calculate_number_of_stages_only_x_axis(input->info()->dimension(0), axis); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 120 | _reduction_axis = axis; |
| 121 | |
| 122 | const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false); |
| 123 | DataType output_data_type = (output->info()->data_type() == DataType::UNKNOWN) ? DataType::S32 : output->info()->data_type(); |
| 124 | auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true)); |
| 125 | |
| 126 | // Configure reduction operation kernels |
| 127 | _reduction_kernels_vector.resize(_num_of_stages); |
| 128 | |
| 129 | _memory_group.manage(&_not_reshaped_output); |
| 130 | // Create temporary tensors |
| 131 | if(_num_of_stages == 1) |
| 132 | { |
Manuel Bottini | 2b84be5 | 2020-04-08 10:15:51 +0100 | [diff] [blame] | 133 | _reduction_kernels_vector[0].configure(compile_context, input, nullptr, &_not_reshaped_output, axis, op); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 134 | } |
| 135 | else |
| 136 | { |
| 137 | _results_vector.resize(_num_of_stages - 1); |
| 138 | TensorShape shape{ input->info()->tensor_shape() }; |
| 139 | for(unsigned int i = 0; i < _num_of_stages - 1; i++) |
| 140 | { |
| 141 | shape.set(0, ceil(shape.x() / 128.f)); |
| 142 | _results_vector[i].allocator()->init(input->info()->clone()->set_tensor_shape(shape).set_data_type(output_data_type)); |
| 143 | } |
| 144 | |
| 145 | // Apply ReductionOperation only on first kernel |
| 146 | _memory_group.manage(&_results_vector[0]); |
Manuel Bottini | 2b84be5 | 2020-04-08 10:15:51 +0100 | [diff] [blame] | 147 | _reduction_kernels_vector[0].configure(compile_context, input, nullptr, &_results_vector[0], axis, op); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 148 | |
| 149 | // Apply ReductionOperation on intermediate stages |
| 150 | for(unsigned int i = 1; i < _num_of_stages - 1; ++i) |
| 151 | { |
| 152 | _memory_group.manage(&_results_vector[i]); |
Manuel Bottini | 2b84be5 | 2020-04-08 10:15:51 +0100 | [diff] [blame] | 153 | _reduction_kernels_vector[i].configure(compile_context, input, &_results_vector[i - 1], &_results_vector[i], axis, op); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 154 | _results_vector[i - 1].allocator()->allocate(); |
| 155 | } |
| 156 | |
| 157 | // Apply ReductionOperation on the last stage |
| 158 | const unsigned int last_stage = _num_of_stages - 1; |
Manuel Bottini | 2b84be5 | 2020-04-08 10:15:51 +0100 | [diff] [blame] | 159 | _reduction_kernels_vector[last_stage].configure(compile_context, input, &_results_vector[last_stage - 1], &_not_reshaped_output, axis, op); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 160 | _results_vector[last_stage - 1].allocator()->allocate(); |
| 161 | } |
Michalis Spyrou | 2aad21a | 2020-07-02 12:43:53 +0100 | [diff] [blame] | 162 | _reshape.configure(compile_context, &_not_reshaped_output, output); |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 163 | _not_reshaped_output.allocator()->allocate(); |
Sang-Hoon Park | 2697fd8 | 2019-10-15 16:49:24 +0100 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | void CLArgMinMaxLayer::run() |
| 167 | { |
Manuel Bottini | 7b9998d | 2019-10-21 17:59:07 +0100 | [diff] [blame] | 168 | MemoryGroupResourceScope scope_mg(_memory_group); |
| 169 | |
| 170 | for(unsigned int i = 0; i < _num_of_stages; ++i) |
| 171 | { |
| 172 | CLScheduler::get().enqueue(_reduction_kernels_vector[i], false); |
| 173 | } |
Michalis Spyrou | 2aad21a | 2020-07-02 12:43:53 +0100 | [diff] [blame] | 174 | _reshape.run(); |
Michalis Spyrou | 7930db4 | 2018-11-22 17:36:28 +0000 | [diff] [blame] | 175 | } |
Sang-Hoon Park | 68dd25f | 2020-10-19 16:00:11 +0100 | [diff] [blame^] | 176 | } // namespace arm_compute |