blob: ce46c5454626cac990dec68079e0a09b8872f205 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NETransposeKernel.h"
25
Gian Marco5420b282017-11-29 10:41:38 +000026#include "arm_compute/core/AccessWindowStatic.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
Gian Marco5420b282017-11-29 10:41:38 +000030#include "arm_compute/core/Utils.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/core/Validate.h"
32
33#include <arm_neon.h>
34
35using namespace arm_compute;
36
37namespace arm_compute
38{
39class Coordinates;
40} // namespace arm_compute
41
42namespace
43{
Gian Marco7c435f22017-12-05 16:17:23 +000044TensorShape transposed_tensor_shape(const TensorShape &in)
45{
46 TensorShape output_shape{ in };
47 const size_t w_out = in[1];
48 const size_t h_out = in[0];
49 output_shape.set(0, w_out);
50 output_shape.set(1, h_out);
51
52 return output_shape;
53}
54
55unsigned int num_elems_processed(size_t element_size)
56{
57 switch(element_size)
58 {
59 case 1:
60 return 8;
61 break;
62 case 2:
63 return 4;
64 break;
65 case 4:
66 return 4;
67 break;
68 default:
69 ARM_COMPUTE_ERROR("Element size not supported");
70 break;
71 }
72}
73
74Error validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
75{
76 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::QS16, DataType::U32, DataType::S32,
77 DataType::F16,
78 DataType::F32);
79
80 if(output->total_size() != 0)
81 {
82 const TensorInfo tensor_info = input->clone()->set_tensor_shape(transposed_tensor_shape(input->tensor_shape()));
83
84 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info);
85 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
86 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
87 }
88
89 return Error{};
90}
91
92std::pair<Error, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
93{
94 const unsigned int num_elems_processed_per_iteration = num_elems_processed(input->element_size());
95
96 // Configure kernel window
97 Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration, num_elems_processed_per_iteration));
98
99 AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration, num_elems_processed_per_iteration);
100
101 bool window_changed = update_window_and_padding(win, input_access);
102
103 if(output->total_size() != 0)
104 {
105 // TODO (COMPMID-708): Replace AccessWindowStatic with AccessWindowTranspose
106 AccessWindowStatic output_access(output, 0, 0, ceil_to_multiple(output->dimension(0), num_elems_processed_per_iteration), ceil_to_multiple(output->dimension(1),
107 num_elems_processed_per_iteration));
108
109 window_changed = window_changed || update_window_and_padding(win, output_access);
110
111 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
112 }
113
114 Error err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Error{};
115 return std::make_pair(err, win);
116}
117
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100118void transpose_8bit_elements(const ITensor *in, ITensor *out, const Window &window)
119{
120 Window window_out(window);
121 window_out.set(Window::DimX, Window::Dimension(0, 0, 0));
122 window_out.set(Window::DimY, Window::Dimension(0, 0, 0));
123
124 Iterator input(in, window);
125 Iterator output(out, window_out);
126
127 const size_t input_stride_in_bytes = in->info()->strides_in_bytes()[1];
128 const size_t output_stride_in_bytes = out->info()->strides_in_bytes()[1];
129
130 execute_window_loop(window, [&](const Coordinates & id)
131 {
132 const uint8x8_t row0 = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + 0 * input_stride_in_bytes));
133 const uint8x8_t row1 = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + 1 * input_stride_in_bytes));
134 const uint8x8_t row2 = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + 2 * input_stride_in_bytes));
135 const uint8x8_t row3 = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + 3 * input_stride_in_bytes));
136 const uint8x8_t row4 = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + 4 * input_stride_in_bytes));
137 const uint8x8_t row5 = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + 5 * input_stride_in_bytes));
138 const uint8x8_t row6 = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + 6 * input_stride_in_bytes));
139 const uint8x8_t row7 = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + 7 * input_stride_in_bytes));
140
141 // Transpose 2x2
142 const uint8x8x2_t k0_u8 = vtrn_u8(row0, row1);
143 const uint8x8x2_t k1_u8 = vtrn_u8(row2, row3);
144 const uint8x8x2_t k2_u8 = vtrn_u8(row4, row5);
145 const uint8x8x2_t k3_u8 = vtrn_u8(row6, row7);
146
147 // Transpose 4x4
148 const uint16x4x2_t k0_u16 = vtrn_u16(vreinterpret_u16_u8(k0_u8.val[0]), vreinterpret_u16_u8(k1_u8.val[0]));
149 const uint16x4x2_t k1_u16 = vtrn_u16(vreinterpret_u16_u8(k0_u8.val[1]), vreinterpret_u16_u8(k1_u8.val[1]));
150 const uint16x4x2_t k2_u16 = vtrn_u16(vreinterpret_u16_u8(k2_u8.val[0]), vreinterpret_u16_u8(k3_u8.val[0]));
151 const uint16x4x2_t k3_u16 = vtrn_u16(vreinterpret_u16_u8(k2_u8.val[1]), vreinterpret_u16_u8(k3_u8.val[1]));
152
153 // Transpose 8x8
154 const uint32x2x2_t k0_u32 = vtrn_u32(vreinterpret_u32_u16(k0_u16.val[0]), vreinterpret_u32_u16(k2_u16.val[0]));
155 const uint32x2x2_t k1_u32 = vtrn_u32(vreinterpret_u32_u16(k0_u16.val[1]), vreinterpret_u32_u16(k2_u16.val[1]));
156 const uint32x2x2_t k2_u32 = vtrn_u32(vreinterpret_u32_u16(k1_u16.val[0]), vreinterpret_u32_u16(k3_u16.val[0]));
157 const uint32x2x2_t k3_u32 = vtrn_u32(vreinterpret_u32_u16(k1_u16.val[1]), vreinterpret_u32_u16(k3_u16.val[1]));
158
159 // Compute destination address
160 const size_t dst_offset_in_bytes = id.y() * sizeof(uint8_t) + id.x() * output_stride_in_bytes;
161
162 vst1_u8(reinterpret_cast<uint8_t *>(output.ptr() + dst_offset_in_bytes + 0 * output_stride_in_bytes), vreinterpret_u8_u16(vreinterpret_u16_u32(k0_u32.val[0])));
163 vst1_u8(reinterpret_cast<uint8_t *>(output.ptr() + dst_offset_in_bytes + 1 * output_stride_in_bytes), vreinterpret_u8_u16(vreinterpret_u16_u32(k2_u32.val[0])));
164 vst1_u8(reinterpret_cast<uint8_t *>(output.ptr() + dst_offset_in_bytes + 2 * output_stride_in_bytes), vreinterpret_u8_u16(vreinterpret_u16_u32(k1_u32.val[0])));
165 vst1_u8(reinterpret_cast<uint8_t *>(output.ptr() + dst_offset_in_bytes + 3 * output_stride_in_bytes), vreinterpret_u8_u16(vreinterpret_u16_u32(k3_u32.val[0])));
166 vst1_u8(reinterpret_cast<uint8_t *>(output.ptr() + dst_offset_in_bytes + 4 * output_stride_in_bytes), vreinterpret_u8_u16(vreinterpret_u16_u32(k0_u32.val[1])));
167 vst1_u8(reinterpret_cast<uint8_t *>(output.ptr() + dst_offset_in_bytes + 5 * output_stride_in_bytes), vreinterpret_u8_u16(vreinterpret_u16_u32(k2_u32.val[1])));
168 vst1_u8(reinterpret_cast<uint8_t *>(output.ptr() + dst_offset_in_bytes + 6 * output_stride_in_bytes), vreinterpret_u8_u16(vreinterpret_u16_u32(k1_u32.val[1])));
169 vst1_u8(reinterpret_cast<uint8_t *>(output.ptr() + dst_offset_in_bytes + 7 * output_stride_in_bytes), vreinterpret_u8_u16(vreinterpret_u16_u32(k3_u32.val[1])));
170 },
171 input, output);
172}
173
174void transpose_16bit_elements(const ITensor *in, ITensor *out, const Window &window)
175{
176 Window window_out(window);
177 window_out.set(Window::DimX, Window::Dimension(0, 0, 0));
178 window_out.set(Window::DimY, Window::Dimension(0, 0, 0));
179
180 Iterator input(in, window);
181 Iterator output(out, window_out);
182
183 const size_t input_stride_in_bytes = in->info()->strides_in_bytes()[1];
184 const size_t output_stride_in_bytes = out->info()->strides_in_bytes()[1];
185
186 execute_window_loop(window, [&](const Coordinates & id)
187 {
188 const uint16x4_t row0 = vld1_u16(reinterpret_cast<const uint16_t *>(input.ptr() + 0 * input_stride_in_bytes));
189 const uint16x4_t row1 = vld1_u16(reinterpret_cast<const uint16_t *>(input.ptr() + 1 * input_stride_in_bytes));
190 const uint16x4_t row2 = vld1_u16(reinterpret_cast<const uint16_t *>(input.ptr() + 2 * input_stride_in_bytes));
191 const uint16x4_t row3 = vld1_u16(reinterpret_cast<const uint16_t *>(input.ptr() + 3 * input_stride_in_bytes));
192
193 // Transpose 2x2
194 const uint16x4x2_t k0_u16 = vtrn_u16(row0, row1);
195 const uint16x4x2_t k1_u16 = vtrn_u16(row2, row3);
196
197 // Transpose 4x4
198 const uint32x2x2_t k0_u32 = vtrn_u32(vreinterpret_u32_u16(k0_u16.val[0]), vreinterpret_u32_u16(k1_u16.val[0]));
199 const uint32x2x2_t k1_u32 = vtrn_u32(vreinterpret_u32_u16(k0_u16.val[1]), vreinterpret_u32_u16(k1_u16.val[1]));
200
201 // Compute destination address
202 const size_t dst_offset_in_bytes = id.y() * sizeof(uint16_t) + id.x() * output_stride_in_bytes;
203
204 vst1_u16(reinterpret_cast<uint16_t *>(output.ptr() + dst_offset_in_bytes + 0 * output_stride_in_bytes), vreinterpret_u16_u32(k0_u32.val[0]));
205 vst1_u16(reinterpret_cast<uint16_t *>(output.ptr() + dst_offset_in_bytes + 1 * output_stride_in_bytes), vreinterpret_u16_u32(k1_u32.val[0]));
206 vst1_u16(reinterpret_cast<uint16_t *>(output.ptr() + dst_offset_in_bytes + 2 * output_stride_in_bytes), vreinterpret_u16_u32(k0_u32.val[1]));
207 vst1_u16(reinterpret_cast<uint16_t *>(output.ptr() + dst_offset_in_bytes + 3 * output_stride_in_bytes), vreinterpret_u16_u32(k1_u32.val[1]));
208 },
209 input, output);
210}
211
212void transpose_32bit_elements(const ITensor *in, ITensor *out, const Window &window)
213{
214 Window window_out(window);
215 window_out.set(Window::DimX, Window::Dimension(0, 0, 0));
216 window_out.set(Window::DimY, Window::Dimension(0, 0, 0));
217
218 Iterator input(in, window);
219 Iterator output(out, window_out);
220
221 const size_t input_stride_in_bytes = in->info()->strides_in_bytes()[1];
222 const size_t output_stride_in_bytes = out->info()->strides_in_bytes()[1];
223
224 execute_window_loop(window, [&](const Coordinates & id)
225 {
226 const uint32x4_t row0 = vld1q_u32(reinterpret_cast<const uint32_t *>(input.ptr() + 0 * input_stride_in_bytes));
227 const uint32x4_t row1 = vld1q_u32(reinterpret_cast<const uint32_t *>(input.ptr() + 1 * input_stride_in_bytes));
228 const uint32x4_t row2 = vld1q_u32(reinterpret_cast<const uint32_t *>(input.ptr() + 2 * input_stride_in_bytes));
229 const uint32x4_t row3 = vld1q_u32(reinterpret_cast<const uint32_t *>(input.ptr() + 3 * input_stride_in_bytes));
230
231 // Transpose 2x2
232 const uint32x2x2_t k0_u32 = vtrn_u32(vget_low_u32(row0), vget_low_u32(row1));
233 const uint32x2x2_t k1_u32 = vtrn_u32(vget_high_u32(row2), vget_high_u32(row3));
234 const uint32x2x2_t k2_u32 = vtrn_u32(vget_high_u32(row0), vget_high_u32(row1));
235 const uint32x2x2_t k3_u32 = vtrn_u32(vget_low_u32(row2), vget_low_u32(row3));
236
237 // Compute destination address
238 const size_t dst_offset_in_bytes = id.y() * sizeof(uint32_t) + id.x() * output_stride_in_bytes;
239
240 // Swap block 01 with block 10 and store
241 vst1q_u32(reinterpret_cast<uint32_t *>(output.ptr() + dst_offset_in_bytes + 0 * output_stride_in_bytes), vcombine_u32(k0_u32.val[0], k3_u32.val[0]));
242 vst1q_u32(reinterpret_cast<uint32_t *>(output.ptr() + dst_offset_in_bytes + 1 * output_stride_in_bytes), vcombine_u32(k0_u32.val[1], k3_u32.val[1]));
243 vst1q_u32(reinterpret_cast<uint32_t *>(output.ptr() + dst_offset_in_bytes + 2 * output_stride_in_bytes), vcombine_u32(k2_u32.val[0], k1_u32.val[0]));
244 vst1q_u32(reinterpret_cast<uint32_t *>(output.ptr() + dst_offset_in_bytes + 3 * output_stride_in_bytes), vcombine_u32(k2_u32.val[1], k1_u32.val[1]));
245 },
246 input, output);
247}
248} // namespace
249
Gian Marco7c435f22017-12-05 16:17:23 +0000250Error NETransposeKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
251{
252 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
253 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
254 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
255 return Error{};
256}
257
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100258NETransposeKernel::NETransposeKernel()
259 : _func(nullptr), _input(nullptr), _output(nullptr)
260{
261}
262
263void NETransposeKernel::configure(const ITensor *input, ITensor *output)
264{
Gian Marco7c435f22017-12-05 16:17:23 +0000265 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100266
267 // Output tensor auto inizialitation if not yet initialized
Gian Marco7c435f22017-12-05 16:17:23 +0000268 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(transposed_tensor_shape(input->info()->tensor_shape())));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100269
Gian Marco7c435f22017-12-05 16:17:23 +0000270 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100271
272 _input = input;
273 _output = output;
274
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100275 switch(input->info()->element_size())
276 {
277 case 1:
Gian Marco7c435f22017-12-05 16:17:23 +0000278 _func = &transpose_8bit_elements;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100279 break;
280 case 2:
Gian Marco7c435f22017-12-05 16:17:23 +0000281 _func = &transpose_16bit_elements;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100282 break;
283 case 4:
Gian Marco7c435f22017-12-05 16:17:23 +0000284 _func = &transpose_32bit_elements;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100285 break;
286 default:
287 ARM_COMPUTE_ERROR("Element size not supported");
288 break;
289 }
290
291 // Configure kernel window
Gian Marco7c435f22017-12-05 16:17:23 +0000292 auto win_config = validate_and_configure_window(input->info(), output->info());
293 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
294 INEKernel::configure(win_config.second);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100295}
296
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100297void NETransposeKernel::run(const Window &window, const ThreadInfo &info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100298{
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100299 ARM_COMPUTE_UNUSED(info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100300 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
301 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
302 ARM_COMPUTE_ERROR_ON(_func == nullptr);
303
304 (*_func)(_input, _output, window);
305}