blob: a3634cd46e2a80953367cb41cca33cc68fde5087 [file] [log] [blame]
Michalis Spyrou7e9391b2018-10-05 14:49:28 +01001/*
Michalis Spyrou8d1b7182019-01-02 15:54:03 +00002 * Copyright (c) 2018-2019 ARM Limited.
Michalis Spyrou7e9391b2018-10-05 14:49:28 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLReduceMean.h"
25
Georgios Pinitasbd17a162019-05-16 14:23:00 +010026#include "arm_compute/core/CL/CLValidate.h"
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010027#include "arm_compute/core/CL/ICLTensor.h"
28#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h"
29#include "arm_compute/core/Types.h"
30#include "arm_compute/core/utils/helpers/tensor_transform.h"
31#include "arm_compute/runtime/CL/CLScheduler.h"
32#include "support/ToolchainSupport.h"
33
34namespace arm_compute
35{
36CLReduceMean::CLReduceMean(std::shared_ptr<IMemoryManager> memory_manager)
37 : _memory_group(std::move(memory_manager)), _reduction_kernels(), _reduced_outs(), _reshape(), _reduction_ops(), _keep_dims()
38{
39}
40void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis, bool keep_dims, ICLTensor *output)
41{
42 ARM_COMPUTE_ERROR_ON_NULLPTR(input);
43
Michalis Spyrou9d0b5f82019-05-01 13:03:59 +010044 _reduction_ops = reduction_axis.num_dimensions();
45 _reduction_kernels.resize(_reduction_ops);
46 _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0));
47 _keep_dims = keep_dims;
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010048
Michalis Spyrou8d1b7182019-01-02 15:54:03 +000049 Coordinates axis_local = reduction_axis;
50 const int input_dims = input->info()->num_dimensions();
51
52 // Convert negative axis
53 for(unsigned int i = 0; i < _reduction_ops; ++i)
54 {
55 axis_local[i] = wrap_around(axis_local[i], input_dims);
56 }
57
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010058 // Perform reduction for every axis
59 for(unsigned int i = 0; i < _reduction_ops; ++i)
60 {
Michalis Spyrou9d0b5f82019-05-01 13:03:59 +010061 TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
Michalis Spyrou8d1b7182019-01-02 15:54:03 +000062 out_shape.set(axis_local[i], 1);
Michalis Spyrou9d0b5f82019-05-01 13:03:59 +010063 auto in = (i == 0) ? input : (&_reduced_outs[i - 1]);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010064
65 if(i == _reduction_ops - 1 && keep_dims)
66 {
Michalis Spyrou8d1b7182019-01-02 15:54:03 +000067 _reduction_kernels[i].configure(in, output, axis_local[i], ReductionOperation::MEAN_SUM);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010068 }
69 else
70 {
71 _reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info()));
Michalis Spyrou9d0b5f82019-05-01 13:03:59 +010072 _memory_group.manage(&_reduced_outs[i]);
73 _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i], ReductionOperation::MEAN_SUM);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010074 }
75 }
76
77 // Allocate intermediate tensors
78 for(unsigned int i = 0; i < _reduction_ops - (keep_dims ? 1 : 0); ++i)
79 {
80 _reduced_outs[i].allocator()->allocate();
81 }
82
83 // Configure reshape layer if we want to drop the dimensions
84 if(!keep_dims)
85 {
86 TensorShape out_shape = input->info()->tensor_shape();
Michalis Spyrou96f84612018-10-24 14:01:04 +010087
88 // We have to sort the reduction axis vectors in order for remove_dimension
89 // to work properly
Michalis Spyrou8d1b7182019-01-02 15:54:03 +000090 std::sort(axis_local.begin(), axis_local.begin() + _reduction_ops);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010091 for(unsigned int i = 0; i < _reduction_ops; ++i)
92 {
Michalis Spyrou8d1b7182019-01-02 15:54:03 +000093 out_shape.remove_dimension(axis_local[i] - i);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010094 }
95 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape));
Michalis Spyrou9d0b5f82019-05-01 13:03:59 +010096 _reshape.configure(&_reduced_outs[_reduction_ops - 1], output);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010097 }
98}
99
100Status CLReduceMean::validate(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
101{
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100102 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
Georgios Pinitasbd17a162019-05-16 14:23:00 +0100103 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
104 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100105 ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() > input->num_dimensions());
106
Michalis Spyrou8d1b7182019-01-02 15:54:03 +0000107 TensorShape out_shape = input->tensor_shape();
108
109 Coordinates axis_sorted = reduction_axis;
110 const unsigned int reduction_ops = reduction_axis.num_dimensions();
111 const int input_dims = input->num_dimensions();
112
113 // Convert negative axis
114 for(unsigned int i = 0; i < reduction_ops; ++i)
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100115 {
Michalis Spyrou8d1b7182019-01-02 15:54:03 +0000116 axis_sorted[i] = wrap_around(axis_sorted[i], input_dims);
117 }
118
119 std::sort(axis_sorted.begin(), axis_sorted.begin() + reduction_ops);
120 for(unsigned int i = 0; i < reduction_ops; ++i)
121 {
122 ARM_COMPUTE_RETURN_ERROR_ON(axis_sorted[i] > 3);
123 ARM_COMPUTE_RETURN_ERROR_ON(static_cast<unsigned int>(axis_sorted[i]) > input->num_dimensions() - 1);
Michalis Spyrou96f84612018-10-24 14:01:04 +0100124 if(output->total_size() > 0 && keep_dims)
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100125 {
Michalis Spyrou8d1b7182019-01-02 15:54:03 +0000126 ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(axis_sorted[i]) != 1);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100127 }
Michalis Spyrou8d1b7182019-01-02 15:54:03 +0000128 if(keep_dims)
129 {
130 out_shape.set(axis_sorted[i], 1);
131 }
132 else
133 {
134 out_shape.remove_dimension(axis_sorted[i] - i);
135 }
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100136 }
137
Michalis Spyrou8d1b7182019-01-02 15:54:03 +0000138 const TensorInfo out_info = input->clone()->set_tensor_shape(out_shape);
139 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &out_info);
140
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100141 return Status{};
142}
143
144void CLReduceMean::run()
145{
Georgios Pinitasda953f22019-04-02 17:27:03 +0100146 MemoryGroupResourceScope scope_mg(_memory_group);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100147
148 for(unsigned int i = 0; i < _reduction_ops; ++i)
149 {
150 _reduction_kernels[i].run();
151 }
152
153 if(!_keep_dims)
154 {
155 _reshape.run();
156 }
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100157}
158} // namespace arm_compute