blob: 8baea2b99092050beabc2757a2ee332800ea5c1a [file] [log] [blame]
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEReorgLayerKernel.h"
25
26#include "arm_compute/core/Error.h"
27#include "arm_compute/core/Helpers.h"
28#include "arm_compute/core/ITensor.h"
29#include "arm_compute/core/TensorInfo.h"
30#include "arm_compute/core/Types.h"
31#include "arm_compute/core/Validate.h"
32#include "arm_compute/core/utils/misc/ShapeCalculator.h"
33
34#include <cstddef>
35#include <cstdint>
36
37namespace arm_compute
38{
39namespace
40{
41Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t stride)
42{
43 //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
44 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1,
45 DataType::U8, DataType::S8, DataType::QASYMM8,
46 DataType::U16, DataType::S16,
47 DataType::U32, DataType::S32,
48 DataType::F16, DataType::F32);
Gian Marco Iodice477531c2018-08-21 17:53:38 +010049 ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010050
Gian Marco Iodice477531c2018-08-21 17:53:38 +010051 const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
52 const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010053
Gian Marco Iodice477531c2018-08-21 17:53:38 +010054 ARM_COMPUTE_RETURN_ERROR_ON(stride <= 0);
55 ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_width] % stride) != 0, "The width of the input tensor must be a multiple of stride");
56 ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_height] % stride) != 0, "The height of the input tensor must be a multiple of stride");
57
58 // Validate output if initialized
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010059 if(output->total_size() != 0)
60 {
Gian Marco Iodice477531c2018-08-21 17:53:38 +010061 const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input, stride));
62 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010063 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
64 }
65
66 return Status{};
67}
68} // namespace
69
70template <typename T>
71void NEReorgLayerKernel::run_reorg(const Window &window)
72{
73 const DataLayout data_layout = _input->info()->data_layout();
74 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
75 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
76 const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
77
78 const unsigned int stride = _stride;
79 const unsigned int out_c = _output->info()->tensor_shape()[idx_c] / (stride * stride);
80 const uint8_t *in_ptr = _input->buffer();
81
82 // Collapse
83 Window collapsed_window = window.collapse_if_possible(window, 4);
84
85 // Create Iterator
86 Iterator out(_output, collapsed_window);
87
88 // Perform reorg
89 execute_window_loop(collapsed_window, [&](const Coordinates & id)
90 {
91 // Get spatial coords and channels
92 const unsigned int w = id[idx_w];
93 const unsigned int h = id[idx_h];
94 const unsigned int c = id[idx_c];
95
96 // Calculate mapping
97 const unsigned int offset = c / out_c;
98 Coordinates map_coords = id;
99 map_coords.set(idx_w, w * stride + offset % stride);
100 map_coords.set(idx_h, h * stride + offset / stride);
101 map_coords.set(idx_c, c % out_c);
102
103 // Perform mapping
104 *(reinterpret_cast<T *>(out.ptr())) = *(reinterpret_cast<const T *>(in_ptr + _input->info()->offset_element_in_bytes(map_coords)));
105 },
106 out);
107}
108
109NEReorgLayerKernel::NEReorgLayerKernel()
110 : _func(nullptr), _input(nullptr), _output(nullptr), _stride(1)
111{
112}
113
114void NEReorgLayerKernel::configure(const ITensor *input, ITensor *output, int32_t stride)
115{
116 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
117
118 // Output auto inizialitation if not yet initialized
119 const TensorShape output_shape = misc::shape_calculator::compute_reorg_output_shape(*input->info(), stride);
120 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
121
122 // Perform validation step
123 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), stride));
124
125 _func = nullptr;
126 _input = input;
127 _output = output;
128 _stride = stride;
129
130 switch(input->info()->element_size())
131 {
132 case 1:
133 _func = &NEReorgLayerKernel::run_reorg<uint8_t>;
134 break;
135 case 2:
136 _func = &NEReorgLayerKernel::run_reorg<uint16_t>;
137 break;
138 case 4:
139 _func = &NEReorgLayerKernel::run_reorg<uint32_t>;
140 break;
141 default:
142 ARM_COMPUTE_ERROR("Element size not supported");
143 break;
144 }
145
146 // The NEReorgLayerKernel doesn't need padding so update_window_and_padding() can be skipped
147 output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
148
149 // Configure kernel window
150 Window win = calculate_max_window(*output->info(), Steps());
151
152 ICPPKernel::configure(win);
153}
154
155Status NEReorgLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, int32_t stride)
156{
157 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, stride));
158 return Status{};
159}
160
161void NEReorgLayerKernel::run(const Window &window, const ThreadInfo &info)
162{
163 ARM_COMPUTE_UNUSED(info);
164 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
165 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window);
166
167 if(_func != nullptr)
168 {
169 (this->*_func)(window);
170 }
171}
172} // namespace arm_compute