blob: f18f5b7a42781a3dfb8276fa92ff510c2694f345 [file] [log] [blame]
Georgios Pinitas358ca202017-12-07 16:47:52 +00001/*
Giorgio Arena5b50f422021-02-17 11:43:05 +00002 * Copyright (c) 2017-2021 Arm Limited.
Georgios Pinitas358ca202017-12-07 16:47:52 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H
25#define ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H
Georgios Pinitas358ca202017-12-07 16:47:52 +000026
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +000027#include "arm_compute/core/Helpers.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/ITensorInfo.h"
Gian Marco Iodice7026b302019-06-26 17:18:11 +010029#include "arm_compute/core/KernelDescriptors.h"
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000030#include "arm_compute/core/Utils.h"
Adnan AlSinane4563a02021-09-01 15:32:03 +010031#include "arm_compute/runtime/FunctionDescriptors.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000032
Georgios Pinitas77589b52018-08-21 14:41:35 +010033#include "arm_compute/core/utils/helpers/tensor_transform.h"
34
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000035#include <cmath>
36
Georgios Pinitas358ca202017-12-07 16:47:52 +000037namespace arm_compute
38{
39namespace misc
40{
41namespace shape_calculator
42{
Pablo Telloa0a4ba12019-12-11 13:04:34 +000043/** Calculate the output tensor shape for the reduce mean operation
44 *
45 * @param[in] input Input tensor shape
46 * @param[in] reduction_axis Reduction axis
47 * @param[in] keep_dims Flag to indicate if dimensions are kept
48 *
49 * @return the calculated shape
50 */
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010051inline TensorShape calculate_reduce_mean_shape(ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims)
Pablo Telloa0a4ba12019-12-11 13:04:34 +000052{
53 const int reduction_ops = reduction_axis.num_dimensions();
54 Coordinates axis_local = reduction_axis;
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010055 const int input_dims = input->num_dimensions();
Pablo Telloa0a4ba12019-12-11 13:04:34 +000056 convert_negative_axis(axis_local, input_dims);
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010057 TensorShape out_shape = input->tensor_shape();
Pablo Telloa0a4ba12019-12-11 13:04:34 +000058 // Configure reshape layer if we want to drop the dimensions
59 if(!keep_dims)
60 {
61 // We have to sort the reduction axis vectors in order for remove_dimension
62 // to work properly
63 std::sort(axis_local.begin(), axis_local.begin() + reduction_ops);
64 for(int i = 0; i < reduction_ops; ++i)
65 {
66 out_shape.remove_dimension(axis_local[i] - i);
67 }
68 return out_shape;
69 }
70 else
71 {
72 for(int i = 0; i < reduction_ops; ++i)
73 {
74 out_shape.set(axis_local[i], 1);
75 }
76 return out_shape;
77 }
78}
Michalis Spyroud33fe342019-01-04 17:10:25 +000079/** Calculate the output tensor shape of a vector input given the convolution dimensions
80 *
81 * @param[in] input Input tensor shape
82 * @param[in] conv_w Convolution width
83 * @param[in] conv_h Convolution height
84 * @param[in] data_layout Data layout
85 *
86 * @return the calculated shape
87 */
Abe Mbise7784c832018-05-31 16:48:41 +010088inline TensorShape compute_vector_to_tensor_output_shape(const TensorShape &input, size_t conv_w, size_t conv_h, const DataLayout &data_layout)
89{
90 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
91 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
92 const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
93
94 TensorShape output_shape(input);
95 output_shape.set(idx_w, conv_w);
96 output_shape.set(idx_h, conv_h);
97 output_shape.set(idx_c, input.x() / (conv_w * conv_h));
98
99 return output_shape;
100}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100101
Michalis Spyroud33fe342019-01-04 17:10:25 +0000102/** Calculate the permuted shape of an input given a permutation vector
103 *
104 * @param[in] input Input tensor info
105 * @param[in] perm Permutation vector
106 *
107 * @return the calculated shape
108 */
Pablo Tello00afd112018-01-04 10:34:24 +0000109inline TensorShape compute_permutation_output_shape(const ITensorInfo &input, const PermutationVector &perm)
110{
111 TensorShape output_shape = input.tensor_shape();
112 permute(output_shape, perm);
113 return output_shape;
114}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100115
Michalis Spyroud33fe342019-01-04 17:10:25 +0000116/** Calculate the output shape of the reorg layer given a stride
117 *
118 * @param[in] input Input tensor info
119 * @param[in] stride Stride
120 *
121 * @return the calculated shape
122 */
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100123inline TensorShape compute_reorg_output_shape(const ITensorInfo &input, int32_t stride)
124{
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100125 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
126 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
127 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100128
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100129 ARM_COMPUTE_ERROR_ON(stride <= 0);
130 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_width] % stride != 0), "The width of the input tensor must be a multiple of stride");
131 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_height] % stride != 0), "The height of the input tensor must be a multiple of stride");
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100132
133 TensorShape output_shape{ input.tensor_shape() };
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100134
135 output_shape.set(idx_width, output_shape[idx_width] / stride);
136 output_shape.set(idx_height, output_shape[idx_height] / stride);
137 output_shape.set(idx_channel, output_shape[idx_channel] * stride * stride);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100138
139 return output_shape;
140}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100141
Michalis Spyroud33fe342019-01-04 17:10:25 +0000142/** Calculate the reshaped shape of the weights
143 *
144 * @param[in] weights Weights tensor info
145 * @param[in] has_bias (Optional) Set to true if there is bias
146 * @param[in] num_groups (Optional) Number of groups
147 *
148 * @return the calculated shape of the reshaped weights
149 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100150inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000151{
Giorgio Arena088c2b02018-08-07 16:59:05 +0100152 // Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100153 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100154 ARM_COMPUTE_ERROR_ON(weights.data_layout() == DataLayout::NHWC && num_groups > 1);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100155 ARM_COMPUTE_ERROR_ON((weights.dimension(3) % num_groups) != 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100156
Georgios Pinitas78c00902018-01-09 17:33:11 +0000157 // Calculate output shape
158 TensorShape weights_reshaped{ weights.tensor_shape() };
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100159 weights_reshaped.set(3, weights_reshaped[3] / num_groups);
160
Georgios Pinitas78c00902018-01-09 17:33:11 +0000161 weights_reshaped.collapse(3);
162 const size_t tmp_dim = weights_reshaped[0];
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100163 weights_reshaped.set(0, weights_reshaped[1]);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000164 weights_reshaped.set(1, tmp_dim + (has_bias ? 1 : 0));
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100165 if(weights.num_dimensions() < 5)
166 {
167 weights_reshaped.set(2, num_groups);
168 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000169
170 return weights_reshaped;
171}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100172
Michalis Spyroud33fe342019-01-04 17:10:25 +0000173/** Calculate the Left Hand Side matrix reshaped shape
174 *
175 * @param[in] a Input tensor info
176 * @param[in] lhs_info Left Hand Side matrix information
177 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
178 *
179 * @return the calculated shape
180 */
Gian Marco Iodice5ba5e092018-12-06 17:13:09 +0000181inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false)
182{
183 ARM_COMPUTE_ERROR_ON(lhs_info.m0 == 0);
184 ARM_COMPUTE_ERROR_ON(lhs_info.k0 == 0);
185 ARM_COMPUTE_ERROR_ON(lhs_info.v0 == 0);
186
187 // Input width/height
188 const unsigned int input_width = a.dimension(0);
189 const unsigned int input_height = reinterpret_input_as_3d ? a.dimension(1) * a.dimension(2) : a.dimension(1);
190
191 // Number of horizontal/vertical blocks in the input tensor
192 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(lhs_info.k0));
193 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(lhs_info.m0));
194
195 // Block size
196 const unsigned int block_size = lhs_info.m0 * lhs_info.k0;
197
198 // Output width/height
199 const unsigned int output_width = block_size * num_horiz_blocks * lhs_info.v0;
200 const unsigned int output_height = std::ceil(num_vert_blocks / static_cast<float>(lhs_info.v0));
201
202 TensorShape lhs_shape{ a.tensor_shape() };
203 lhs_shape.set(0, output_width);
204 lhs_shape.set(1, output_height);
205
206 if((reinterpret_input_as_3d) && (lhs_shape.num_dimensions() > 2))
207 {
208 // When the data format is NHWC and the shapes are Nx1x1
209 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
210 // To avoid failures by removing a dimension that doesn't exist
211 // check if the number of dimensions is greater than 2.
212 lhs_shape.remove_dimension(2);
213 }
214
215 return lhs_shape;
216}
217
Michalis Spyroud33fe342019-01-04 17:10:25 +0000218/** Calculate the Right Hand Side matrix reshaped shape
219 *
220 * @param[in] a Input tensor info
221 * @param[in] rhs_info Right Hand Side matrix information
222 *
223 * @return the calculated shape
224 */
Gian Marco Iodice3b0a2652018-12-07 11:18:09 +0000225inline TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRHSMatrixInfo &rhs_info)
226{
227 ARM_COMPUTE_ERROR_ON(rhs_info.n0 == 0);
228 ARM_COMPUTE_ERROR_ON(rhs_info.k0 == 0);
229 ARM_COMPUTE_ERROR_ON(rhs_info.h0 == 0);
230
231 // Input width/height
232 const unsigned int input_width = a.dimension(0);
233 const unsigned int input_height = a.dimension(1);
234
235 // Number of horizontal/vertical blocks in the input tensor
236 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(rhs_info.n0));
237 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(rhs_info.k0));
238
239 // Block size
240 const unsigned int block_size = rhs_info.n0 * rhs_info.k0;
241
242 // Output width/height
243 const unsigned int output_width = block_size * num_vert_blocks * rhs_info.h0;
244 const unsigned int output_height = std::ceil(num_horiz_blocks / static_cast<float>(rhs_info.h0));
245
246 TensorShape rhs_shape{ a.tensor_shape() };
247 rhs_shape.set(0, output_width);
248 rhs_shape.set(1, output_height);
249
250 return rhs_shape;
251}
252
Michalis Spyroud33fe342019-01-04 17:10:25 +0000253/** Calculate the interleaved shape of an input tensor
254 *
255 * @param[in] a Input tensor info
256 * @param[in] mult_interleave4x4_height (Optional) Interleave4x4 height
257 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
258 *
259 * @return the calculated shape
260 */
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100261inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000262{
Gian Marco36a0a462018-01-12 10:21:40 +0000263 // The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
264 ARM_COMPUTE_ERROR_ON(mult_interleave4x4_height < 1);
265 const int interleave_width = 4 * mult_interleave4x4_height;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000266 TensorShape shape_interleaved_a{ a.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000267 shape_interleaved_a.set(0, a.dimension(0) * interleave_width);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100268 if(reinterpret_input_as_3d)
269 {
270 const int M = a.dimension(1) * a.dimension(2);
271 const int height = std::ceil(M / static_cast<float>(interleave_width));
272 shape_interleaved_a.set(1, height);
Isabella Gottardi089695f2018-10-17 18:04:15 +0100273
274 // When the data format is NHWC and the shapes are Nx1x1
275 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
276 // To avoid failures by removing a dimension that doesn't exist
277 // check if the number of dimensions is greater than 2.
278 if(shape_interleaved_a.num_dimensions() > 2)
279 {
280 shape_interleaved_a.remove_dimension(2);
281 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100282 }
283 else
284 {
285 shape_interleaved_a.set(1, std::ceil(a.dimension(1) / static_cast<float>(interleave_width)));
286 }
Georgios Pinitas358ca202017-12-07 16:47:52 +0000287
288 return shape_interleaved_a;
289}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100290
Michalis Spyroud33fe342019-01-04 17:10:25 +0000291/** Calculate the transposed 1xW shape
292 *
293 * @param[in] b Input tensor info
294 *
295 * @return the calculated shape
296 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000297inline TensorShape compute_transpose1xW_shape(const ITensorInfo &b)
298{
299 // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
300 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
301 shape_transposed1xW_b.set(0, b.dimension(1) * 16);
302 shape_transposed1xW_b.set(1, std::ceil(b.dimension(0) / 16.f));
303
304 return shape_transposed1xW_b;
305}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100306
Michalis Spyroud33fe342019-01-04 17:10:25 +0000307/** Calculate the transposed 1xW width element shape
308 *
309 * @param[in] b Input tensor info
310 * @param[in] mult_transpose1xW_width (Optional) Transpose1xW width
311 *
312 * @return the calculated shape
313 */
Gian Marco36a0a462018-01-12 10:21:40 +0000314inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInfo &b, int mult_transpose1xW_width = 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000315{
Gian Marco36a0a462018-01-12 10:21:40 +0000316 // Note: mult_transpose1xW_width expresses the number of chunks with size 1x(W) we want to store on the same row
317 // The transpose1xW output matrix will have the following shape:
318 // [ b_height * W, ceil(b_width / W) ] where W = (16 / element size of the tensor) * mult_transpose1xW_width
319 ARM_COMPUTE_ERROR_ON(mult_transpose1xW_width < 1);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000320 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000321 const size_t transpose_width = (16 / b.element_size()) * mult_transpose1xW_width;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000322 shape_transposed1xW_b.set(0, b.dimension(1) * transpose_width);
323 shape_transposed1xW_b.set(1, static_cast<size_t>(std::ceil(b.dimension(0) / static_cast<float>(transpose_width))));
324
325 return shape_transposed1xW_b;
326}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100327
Michalis Spyroud33fe342019-01-04 17:10:25 +0000328/** Calculate the reductionA shape used in GEMMLowp
329 *
330 * @param[in] b Input tensor info
331 *
332 * @return the calculated shape
333 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000334inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
335{
336 TensorShape shape_vector_sum_col{ b.tensor_shape() };
337 if(shape_vector_sum_col.num_dimensions() > 1)
338 {
339 shape_vector_sum_col.remove_dimension(1);
340 }
341
342 return shape_vector_sum_col;
343}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100344
Michalis Spyroud33fe342019-01-04 17:10:25 +0000345/** Calculate the reductionB shape used in GEMMLowp
346 *
347 * @param[in] a Input tensor info
348 *
349 * @return the calculated shape
350 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000351inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
352{
353 TensorShape shape_vector_sum_row{ a.tensor_shape() };
354 shape_vector_sum_row.set(Window::DimX, a.dimension(1));
Georgios Pinitas932491f2018-09-21 16:33:15 +0100355 if(shape_vector_sum_row.num_dimensions() > 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000356 {
357 shape_vector_sum_row.remove_dimension(1);
358 }
359
360 return shape_vector_sum_row;
361}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100362
Michalis Spyroud33fe342019-01-04 17:10:25 +0000363/** Calculate the Col2Im shape
364 *
365 * @param[in] input Input tensor info
366 * @param[in] convolved_dims Convolved dimensions
367 * @param[in] batch_size_on_z True if batch size is on z axis
368 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
369 *
370 * @return the calculated shape
371 */
Giorgio Arena226e4b92018-08-23 12:00:02 +0100372inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &convolved_dims, bool batch_size_on_z, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000373{
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100374 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arena226e4b92018-08-23 12:00:02 +0100375 ARM_COMPUTE_ERROR_ON(input.tensor_shape()[1] != (convolved_dims.area()));
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100376 ARM_COMPUTE_ERROR_ON((num_groups > 1) && input.tensor_shape()[2] != num_groups);
377
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100378 const DataLayout data_layout = input.data_layout();
379 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
380 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
381 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100382
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100383 TensorShape col2im_shape{ input.tensor_shape() };
384 // If batches start on 3rd dimension shift dimensions right by 1 to retain upper tensor shape,
385 // as first three will be override by H,W,C data
386 if(batch_size_on_z && num_groups == 1)
387 {
388 col2im_shape.shift_right(1);
389 }
390 col2im_shape.set(width_idx, convolved_dims.width);
391 col2im_shape.set(height_idx, convolved_dims.height);
392 col2im_shape.set(channel_idx, input.tensor_shape()[0] * num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000393
394 return col2im_shape;
395}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100396
Michalis Spyroud33fe342019-01-04 17:10:25 +0000397/** Calculate the transposed shape of a tensor
398 *
399 * @param[in] input Input tensor info
400 *
401 * @return the calculated shape
402 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000403inline TensorShape compute_transposed_shape(const ITensorInfo &input)
404{
405 TensorShape shape_transposed{ input.tensor_shape() };
406
407 shape_transposed.set(0, input.dimension(1));
408 shape_transposed.set(1, input.dimension(0));
409
410 return shape_transposed;
411}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100412
Michalis Spyroud33fe342019-01-04 17:10:25 +0000413/** Calculate the depthwise convolution output shape of a tensor
414 *
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100415 * @param[in] input Input tensor info
416 * @param[in] weights Weights tensor info
417 * @param[in] info Convolution info
Michalis Spyroud33fe342019-01-04 17:10:25 +0000418 *
419 * @return the calculated shape
420 */
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100421inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000422{
423 const TensorShape input_shape{ input.tensor_shape() };
424 const TensorShape weights_shape{ weights.tensor_shape() };
425
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000426 const DataLayout data_layout = input.data_layout();
427 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
428 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Giorgio Arena76572242018-04-04 17:44:26 +0100429 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000430
Usama Arife73686a2019-04-08 17:30:48 +0100431 const DataLayout weights_data_layout = weights.data_layout();
432 const int weights_width_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::WIDTH);
433 const int weights_height_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::HEIGHT);
giuros016d109962019-01-07 17:47:19 +0000434
435 unsigned int output_width = 0;
436 unsigned int output_height = 0;
437 std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
Usama Arife73686a2019-04-08 17:30:48 +0100438 weights_shape[weights_width_idx], weights_shape[weights_height_idx],
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100439 info.pad_stride_info, info.dilation);
giuros016d109962019-01-07 17:47:19 +0000440
441 TensorShape output_shape{ input_shape };
442 output_shape.set(width_idx, output_width);
443 output_shape.set(height_idx, output_height);
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100444 output_shape.set(channel_idx, input_shape[channel_idx] * info.depth_multiplier);
giuros016d109962019-01-07 17:47:19 +0000445
446 return output_shape;
447}
448
Michalis Spyroud33fe342019-01-04 17:10:25 +0000449/** Calculate the upsampled output shape used for deconvolution
450 *
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100451 * @param[in] input Input tensor info
452 * @param[in] weights Weights tensor shape
453 * @param[in] sx Stride on x axis
454 * @param[in] sy Stride on y axis
455 * @param[in] out_dims Output shape dimensions
456 * @param[in] padx Padding on x axis
457 * @param[in] pady Padding on y axis
Michalis Spyroud33fe342019-01-04 17:10:25 +0000458 *
459 * @return the calculated shape
460 */
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100461inline TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy,
Manuel Bottini6e10aa32020-04-30 13:28:23 +0100462 std::pair<unsigned int, unsigned int> &out_dims, uint32_t &padx, uint32_t &pady)
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000463{
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100464 const DataLayout data_layout = input.data_layout();
465 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
466 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
467
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100468 // Find the upsampled dimensions
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100469 unsigned int out_x = (input.dimension(idx_w) - 1) * sx + 1;
470 unsigned int out_y = (input.dimension(idx_h) - 1) * sy + 1;
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100471
472 // Find the padding needed for the convolution with stride 1 in order to match output shape
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100473 padx = out_dims.first - (out_x - weights.dimension(idx_w) + 1);
474 pady = out_dims.second - (out_y - weights.dimension(idx_h) + 1);
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100475 out_x += padx;
476 out_y += pady;
477
478 TensorShape scale_out_shape(input.tensor_shape());
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100479 scale_out_shape.set(idx_w, out_x);
480 scale_out_shape.set(idx_h, out_y);
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000481
482 return scale_out_shape;
483}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100484
Michalis Spyroud33fe342019-01-04 17:10:25 +0000485/** Calculate the output shape of the deconvolution layer
486 *
487 * @param[in] out_dims Output x and y shape dimensions
488 * @param[in] input Input tensor info
489 * @param[in] weights Weights tensor shape
490 *
491 * @return the calculated shape
492 */
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100493inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned int, unsigned int> &out_dims, const ITensorInfo &input, const ITensorInfo &weights)
494{
495 const TensorShape input_shape{ input.tensor_shape() };
496 const TensorShape weights_shape{ weights.tensor_shape() };
497
498 const DataLayout data_layout = input.data_layout();
499 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
500 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
501 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
502 const int batch_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
503
504 TensorShape out_shape{ input_shape };
505 out_shape.set(width_idx, out_dims.first);
506 out_shape.set(height_idx, out_dims.second);
507 out_shape.set(channel_idx, weights_shape[batch_idx]);
508 return out_shape;
509}
510
Michalis Spyroud33fe342019-01-04 17:10:25 +0000511/** Calculate the im2col output shape of a tensor
512 *
513 * @param[in] input Input tensor info
514 * @param[in] kernel_dims The kernel dimensions (width and height).
515 * @param[in] conv_info Contains padding and stride information
516 * @param[in] has_bias In case biases are provided expands the matrix with 1
517 * @param[in] dilation Dilation, in elements, across x and y
518 * @param[in] batch_size_on_z True if batch size is on z axis
519 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
520 *
521 * @return the calculated shape
522 */
Giorgio Arena0f170392018-07-18 16:13:12 +0100523inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, bool batch_size_on_z,
524 unsigned int num_groups = 1)
Giorgio Arena156fcf32018-03-09 15:30:43 +0000525{
Giorgio Arena0f170392018-07-18 16:13:12 +0100526 // The output shape will be the 3D shape [ out_channels * kernel_area, num_elems_per_out_channel, batches ] if batch_size_on_z == true
527 // or the 4D shape [ out_channels * kernel_area / num_groups, num_elems_per_out_channel, num_groups, batches ] if batch_size_on_z == false
528
529 ARM_COMPUTE_ERROR_ON(num_groups == 0);
530 ARM_COMPUTE_ERROR_ON(num_groups > 1 && input->data_layout() != DataLayout::NCHW);
531 ARM_COMPUTE_ERROR_ON(num_groups > 1 && batch_size_on_z);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000532
533 TensorShape output_shape{ input->tensor_shape() };
534
535 const DataLayout data_layout = input->data_layout();
536 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
537 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
538 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
539
540 std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
Giorgio Arena0f170392018-07-18 16:13:12 +0100541 output_shape.set(0, (output_shape[channel_idx] / num_groups * kernel_dims.area() + (has_bias ? 1 : 0))); // NOLINT
Giorgio Arenaf485a102018-04-20 16:06:21 +0100542 output_shape.set(1, (out_dims.first * out_dims.second));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100543 if(batch_size_on_z && output_shape.num_dimensions() >= 3)
544 {
545 output_shape.remove_dimension(2);
546 }
547 else
548 {
Giorgio Arena0f170392018-07-18 16:13:12 +0100549 output_shape.set(2, num_groups);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100550 }
Giorgio Arena156fcf32018-03-09 15:30:43 +0000551
552 return output_shape;
553}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100554
Michalis Spyroud33fe342019-01-04 17:10:25 +0000555/** Calculate the flattened output shape of a tensor
556 *
557 * @param[in] input Input tensor info
558 *
559 * @return the calculated shape
560 */
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100561inline TensorShape compute_flatten_shape(const ITensorInfo *input)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000562{
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100563 // The output shape will be the flatten version of the input (i.e. [ width * height * channels, num_batches, ... ] ). Used for FlattenLayer and FullyConnectedLayer.
564
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000565 TensorShape output_shape{ input->tensor_shape() };
566
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100567 output_shape.collapse(3);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000568
569 return output_shape;
570}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100571
Michalis Spyroud33fe342019-01-04 17:10:25 +0000572/** Calculate the softmax output shape of a tensor
573 *
574 * @param[in] input Input tensor info
575 * @param[in] axis (Optional) Softmax axis
576 *
577 * @return the calculated shape
578 */
giuros01efbf6c82018-09-03 09:53:53 +0100579inline TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis = 1)
580{
581 // The output shape will be a 2D version of the input. For instance:
582 // - [x,y,z] and axis 1 will return [x, y*z]
583 // - [x,y,z,w] and axis 2 will return [x*y, w*z]
584 // - [x,y,z,w] and axis 3 will return [x*y*z, w]
585 TensorShape shape2D = input->tensor_shape();
586
587 if(axis < input->num_dimensions())
588 {
589 // Collapse from axis onward (this changes the shape)
590 shape2D.collapse_from(axis);
591
592 // Collapse the rest (collapse is inclusive)
593 shape2D.collapse(shape2D.num_dimensions() - 1);
594 }
595 else
596 {
597 // Collapse everything
598 shape2D.collapse(shape2D.num_dimensions());
599 }
600
601 if(axis == 0)
602 {
603 // If axis is zero the first dim should be one. Since
604 // collapse is an inclusive operation we need to shift
605 shape2D.shift_right(1);
606 }
607
608 return shape2D;
609}
610
Michalis Spyroud33fe342019-01-04 17:10:25 +0000611/** Calculate the winograd filter transform shape
612 *
613 * @param[in] input Input tensor info
614 * @param[in] winograd_info Winograd information
615 *
616 * @return the calculated shape
617 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000618inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000619{
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000620 TensorShape tensor_shape{ input.tensor_shape() };
621
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000622 const Size2D kernel_size = winograd_info.kernel_size;
623 const Size2D output_tile_size = winograd_info.output_tile_size;
624 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
Giorgio Arena2d9de0a2018-03-15 17:58:20 +0000625
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000626 tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
627 tensor_shape.set(Window::DimX, input.dimension(3));
628 tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
629 tensor_shape.set(Window::DimZ, input_tile_size.area());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000630
631 return tensor_shape;
632}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100633
Michalis Spyroud33fe342019-01-04 17:10:25 +0000634/** Calculate the winograd input transform shape
635 *
636 * @param[in] input Input tensor info
637 * @param[in] winograd_info Winograd information
638 *
639 * @return the calculated shape
640 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000641inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000642{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000643 const PadStrideInfo conv_info = winograd_info.convolution_info;
644 const Size2D kernel_size = winograd_info.kernel_size;
645 const Size2D output_tile_size = winograd_info.output_tile_size;
646 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
647
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100648 const size_t idx_w = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
649 const size_t idx_h = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
650 const size_t idx_c = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000651
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100652 // Compute the number of output tiles along the x and y direction of size "output_tile_size"
653 const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input.tensor_shape()[idx_w], input.tensor_shape()[idx_h]),
654 kernel_size,
655 output_tile_size,
656 conv_info);
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100657
658 const unsigned int width = input.tensor_shape()[idx_c];
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100659 const unsigned int height = num_tiles.area();
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000660 const unsigned int depth = input_tile_size.area();
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000661
662 TensorShape output_shape{ input.tensor_shape() };
663 output_shape.set(0, width);
664 output_shape.set(1, height);
665 output_shape.set(2, depth);
666
667 return output_shape;
668}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100669
Michalis Spyroud33fe342019-01-04 17:10:25 +0000670/** Calculate the winograd output transform shape
671 *
672 * @param[in] input Input tensor info
673 * @param[in] winograd_info Winograd information
674 *
675 * @return the calculated shape
676 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000677inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000678{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000679 const PadStrideInfo conv_info = winograd_info.convolution_info;
680 const Size2D kernel_size = winograd_info.kernel_size;
681 const Size2D input_dimensions = winograd_info.input_dimensions;
682 const DataLayout data_layout = winograd_info.output_data_layout;
683
684 // Compute output shape
685 unsigned int output_width = 0;
686 unsigned int output_height = 0;
687 std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
688 kernel_size.width, kernel_size.height, conv_info);
689
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000690 TensorShape tensor_shape{ input.tensor_shape() };
691
692 // Output dimension
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000693 const unsigned int out_w = output_width;
694 const unsigned int out_h = output_height;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000695 const unsigned int out_c = input.dimension(0);
696
697 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w);
698 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT), out_h);
699 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL), out_c);
700
701 return tensor_shape;
702}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100703
Michalis Spyroud33fe342019-01-04 17:10:25 +0000704/** Calculate the deep convolution shape output shape of a tensor
705 *
706 * @param[in] input Input tensor info
707 * @param[in] weights Weights tensor info
708 * @param[in] conv_info Contains padding and stride information
709 *
710 * @return the calculated shape
711 */
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000712inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
713{
714 const TensorShape input_shape{ input.tensor_shape() };
715 const TensorShape weights_shape{ weights.tensor_shape() };
716
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000717 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
718 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
719 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
720
Giorgio Arenac0f54432018-03-16 14:02:34 +0000721 const unsigned int input_width = input_shape[idx_width];
722 const unsigned int input_height = input_shape[idx_height];
723 const unsigned int weights_width = weights_shape[idx_width];
724 const unsigned int weights_height = weights_shape[idx_height];
725 const unsigned int weights_out_channel = weights_shape[3];
726 unsigned int output_width = 0;
727 unsigned int output_height = 0;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000728 std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000729
730 TensorShape output_shape{ input_shape };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000731 output_shape.set(idx_width, output_width);
732 output_shape.set(idx_height, output_height);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000733 output_shape.set(idx_channel, weights_out_channel);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000734
735 return output_shape;
736}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100737
Michalis Spyroud33fe342019-01-04 17:10:25 +0000738/** Calculate the min/max shape output shape of a tensor
739 *
740 * @param[in] input Input tensor info
741 *
742 * @return the calculated shape
743 */
Alex Gilday60954c62018-03-05 16:22:48 +0000744inline TensorShape compute_min_max_shape(const ITensorInfo *input)
745{
746 TensorShape output_shape{ input->tensor_shape() };
747 output_shape.set(Window::DimX, 2);
748 output_shape.remove_dimension(1);
749 output_shape.remove_dimension(1);
750
751 return output_shape;
752}
753
Michalis Spyroud33fe342019-01-04 17:10:25 +0000754/** Calculate the output pool shape of a tensor
755 *
756 * @param[in] input Input tensor info
757 * @param[in] pool_info Pooling layer info
758 *
759 * @return the calculated shape
760 */
Michalis Spyroue74b2012018-04-18 09:49:16 +0100761inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
762{
Freddie Liardetafcbb8f2021-05-04 12:41:16 +0100763 int pooled_w = 0;
764 int pooled_h = 0;
Michalis Spyroue74b2012018-04-18 09:49:16 +0100765
Giorgio Arena3c520c52018-05-01 11:47:24 +0100766 TensorShape output_shape{ input.tensor_shape() };
Michalis Spyroue74b2012018-04-18 09:49:16 +0100767
Freddie Liardetafcbb8f2021-05-04 12:41:16 +0100768 const bool is_global_pooling = pool_info.is_global_pooling;
769 const int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
770 const int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
771 const int input_width = input.tensor_shape()[idx_width];
772 const int input_height = input.tensor_shape()[idx_height];
773 const int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size.width;
774 const int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size.height;
Giorgio Arena3c520c52018-05-01 11:47:24 +0100775
Freddie Liardetafcbb8f2021-05-04 12:41:16 +0100776 std::tie(pooled_w, pooled_h) = scaled_dimensions_signed(input_width, input_height,
777 pool_size_x, pool_size_y,
778 pool_info.pad_stride_info);
Michalis Spyroue74b2012018-04-18 09:49:16 +0100779
Freddie Liardetafcbb8f2021-05-04 12:41:16 +0100780 ARM_COMPUTE_ERROR_ON_MSG((pooled_w < 1 || pooled_h < 1), "Calculated output dimension size is invalid");
781
782 output_shape.set(idx_width, static_cast<size_t>(pooled_w));
783 output_shape.set(idx_height, static_cast<size_t>(pooled_h));
Michalis Spyroue74b2012018-04-18 09:49:16 +0100784
785 return output_shape;
786}
787
morgolock37722d92020-04-09 14:17:48 +0100788/** Calculate the output unpool shape of a tensor
789 *
790 * @param[in] input Input tensor info
791 * @param[in] pool_info Pooling layer info
792 *
793 * @return the calculated shape
794 */
795inline TensorShape compute_unpool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
796{
797 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
798 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
799 const TensorShape input_shape = input.tensor_shape();
800 ARM_COMPUTE_ERROR_ON(input_shape[idx_height] <= 1 || input_shape[idx_width] <= 1);
801 const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
802 const unsigned int stride_x = pad_stride_info.stride().first;
803 const unsigned int stride_y = pad_stride_info.stride().second;
804
805 const int pad_left = pad_stride_info.pad_left();
806 const int pad_top = pad_stride_info.pad_top();
807 const int pad_right = pad_stride_info.pad_right();
808 const int pad_bottom = pad_stride_info.pad_bottom();
809
810 TensorShape output_shape = input_shape;
811 const unsigned int out_width = (input_shape[idx_width] - 1) * stride_x - pad_left - pad_right + pool_info.pool_size.width;
812 const unsigned int out_height = (input_shape[idx_height] - 1) * stride_y - pad_top - pad_bottom + pool_info.pool_size.height;
813
814 output_shape.set(idx_width, out_width);
815 output_shape.set(idx_height, out_height);
816 return output_shape;
817}
818
George Wort44b4e972019-01-08 11:41:54 +0000819/** Calculate the output roi align shape of a tensor
820 *
821 * @param[in] input Input tensor info
822 * @param[in] rois Rois tensor info
823 * @param[in] pool_info Pooling layer info
824 *
825 * @return the calculated shape
826 */
827inline TensorShape compute_roi_align_shape(const ITensorInfo &input, const ITensorInfo &rois, ROIPoolingLayerInfo pool_info)
828{
829 TensorShape output_shape{ input.tensor_shape() };
830
831 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
832 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
833
834 output_shape.set(idx_width, pool_info.pooled_width());
835 output_shape.set(idx_height, pool_info.pooled_height());
836 output_shape.set(3, rois.dimension(1));
837
838 return output_shape;
839}
840
Michalis Spyroud33fe342019-01-04 17:10:25 +0000841/** Calculate the RNN shape of a tensor
842 *
843 * @param[in] input Input tensor info
844 * @param[in] batch_size Batch size
845 *
846 * @return the calculated shape
847 */
Michalis Spyrou36a559e2018-03-20 10:30:58 +0000848inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
849{
850 TensorShape output_shape{ input->tensor_shape() };
851 output_shape.set(1, batch_size);
852
853 return output_shape;
854}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100855
Michalis Spyroud33fe342019-01-04 17:10:25 +0000856/** Calculate the matrix multiplication output shape of two tensors
857 *
858 * @param[in] input0 First input tensor info
859 * @param[in] input1 Second input tensor info
860 * @param[in] is_interleaved_transposed True if the input is interleaved transposed
861 * @param[in] reshape_info GEMM reshape info
862 *
863 * @return the calculated shape
864 */
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100865inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
866{
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000867 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100868 ARM_COMPUTE_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The first input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100869
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100870 const bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000871 const bool reinterpret_output_as_3d = reshape_info.depth_output_gemm3d() != 0;
872 const int depth_output_gemm3d = reinterpret_output_as_3d ? reshape_info.depth_output_gemm3d() : 1;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100873 const int m = reshape_info.reinterpret_input_as_3d() ? input0.dimension(1) * input0.dimension(2) : input0.dimension(1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000874
875 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
876 // dimension of the output tensor
877 const int dim0 = is_interleaved_transposed ? reshape_info.n() : input1.dimension(0);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000878 const int dim1 = is_interleaved_transposed ? reshape_info.m() / depth_output_gemm3d : m / depth_output_gemm3d;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100879 const int dim2 = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
880 const int dim3 = reinterpret_input_as_3d ? 1 : input0.tensor_shape()[3];
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000881
882 TensorShape output_shape{ input0.tensor_shape() };
883
884 output_shape.set(0, dim0);
885 output_shape.set(1, dim1);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000886 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : dim2);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100887 output_shape.set(3, reinterpret_output_as_3d ? dim2 : dim3);
888 output_shape.set(4, reinterpret_output_as_3d ? dim3 : 1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000889
890 return output_shape;
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100891}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100892
Michalis Spyroud33fe342019-01-04 17:10:25 +0000893/** Calculate the matrix multiplication output shape of two tensors
894 *
895 * @param[in] input0 First input tensor info
896 * @param[in] input1 Second input tensor info
897 * @param[in] gemm_info GEMM reshape info
898 *
899 * @return the calculated shape
900 */
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000901inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMReshapeInfo &gemm_info)
902{
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100903 ARM_COMPUTE_UNUSED(input1);
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000904 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
905
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000906 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000907 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d() != 0;
908 const int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d() : 1;
909
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000910 TensorShape output_shape{ input0.tensor_shape() };
911
Vidhya Sudhan Loganathanae1a89e2019-05-03 09:13:55 +0100912 if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
913 {
914 output_shape.set(0, gemm_info.n());
915 output_shape.set(1, gemm_info.m());
916 }
917 else
918 {
919 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
920 // dimension of the output tensor
921 const int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
922 output_shape.set(0, gemm_info.n());
923 output_shape.set(1, gemm_info.m() / depth_output_gemm3d);
924 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
925 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
926 }
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000927
928 return output_shape;
929}
930
Michalis Spyroud33fe342019-01-04 17:10:25 +0000931/** Calculate the matrix multiplication output shape of two tensors
932 *
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100933 * @param[in] input0 First input tensor info
934 * @param[in] input1 Second input tensor info
935 * @param[in] gemm_info GEMM kernel info used to retrieve the original dimensions of the input matrices
936 *
937 * @return the calculated shape
938 */
939inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMKernelInfo &gemm_info)
940{
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100941 ARM_COMPUTE_UNUSED(input1);
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100942 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
943
944 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
945 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
946 const unsigned int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d : 1;
947
948 TensorShape output_shape{ input0.tensor_shape() };
949
950 if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
951 {
952 output_shape.set(0, gemm_info.n);
953 output_shape.set(1, gemm_info.m);
954 }
955 else
956 {
957 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
958 // dimension of the output tensor
959 const unsigned int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
960 output_shape.set(0, gemm_info.n);
961 output_shape.set(1, gemm_info.m / depth_output_gemm3d);
962 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
963 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
964 }
965
966 return output_shape;
967}
968
969/** Calculate the matrix multiplication output shape of two tensors
970 *
Michalis Spyroud33fe342019-01-04 17:10:25 +0000971 * @param[in] input Input tensor info
972 * @param[in] gemm_3d_depth (Optional) GEMM 3d depth
973 * @param[in] batch_size_on_z (Optional) True if batch size is on z axis
974 *
975 * @return the calculated shape
976 */
Georgios Pinitas932491f2018-09-21 16:33:15 +0100977inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1, bool batch_size_on_z = false)
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100978{
979 ARM_COMPUTE_ERROR_ON(input.data_layout() != DataLayout::NHWC && gemm_3d_depth > 1);
980
981 TensorShape output_shape = input.tensor_shape();
982 if(gemm_3d_depth > 1)
983 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100984 if(batch_size_on_z)
985 {
986 output_shape.shift_right(1);
987 }
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100988 output_shape.set(0, input.tensor_shape().x());
989 output_shape.set(1, input.tensor_shape().y() / gemm_3d_depth);
990 output_shape.set(2, gemm_3d_depth);
991 }
992
993 return output_shape;
994}
995
Michalis Spyroud33fe342019-01-04 17:10:25 +0000996/** Calculate the strided slice output shape of a tensor
997 *
998 * @param[in] input Input tensor info
999 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
1000 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
1001 * @param[in] strides The strides of the dimensions of the input tensor to be sliced
1002 * @param[in] begin_mask If the ith bit of begin_mask is set, starts[i] is ignored and the fullest possible range in that dimension is used instead.
1003 * @param[in] end_mask If the ith bit of end_mask is set, ends[i] is ignored and the fullest possible range in that dimension is used instead.
1004 * @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1
1005 *
1006 * @return the calculated shape
1007 */
Georgios Pinitas77589b52018-08-21 14:41:35 +01001008inline TensorShape compute_strided_slice_shape(const ITensorInfo &input,
1009 const Coordinates &starts, const Coordinates &ends, const Coordinates &strides,
1010 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
1011{
1012 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001013 return compute_strided_slice_output_shape(input.tensor_shape(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
1014}
Georgios Pinitas77589b52018-08-21 14:41:35 +01001015
Michalis Spyroud33fe342019-01-04 17:10:25 +00001016/** Calculate the slice output shape of a tensor
1017 *
1018 * @param[in] input_shape Input tensor info
1019 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
1020 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
1021 *
1022 * @return the calculated shape
1023 */
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001024inline TensorShape compute_slice_shape(const TensorShape &input_shape, const Coordinates &starts, const Coordinates &ends)
1025{
1026 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitas77589b52018-08-21 14:41:35 +01001027
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001028 return compute_strided_slice_output_shape(input_shape,
1029 starts, ends, BiStrides(),
1030 0, construct_slice_end_mask(ends), 0);
Georgios Pinitas77589b52018-08-21 14:41:35 +01001031}
Georgios Pinitase1a352c2018-09-03 12:42:19 +01001032
Michalis Spyroud33fe342019-01-04 17:10:25 +00001033/** Calculate the batch to space output shape of a tensor
1034 *
1035 * @param[in] input Input tensor info
1036 * @param[in] block_x Block shape x value
1037 * @param[in] block_y Block shape y value
1038 *
1039 * @return the calculated shape
1040 */
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001041inline TensorShape compute_batch_to_space_shape(const ITensorInfo *input, const int block_x, const int block_y)
1042{
1043 ARM_COMPUTE_ERROR_ON(block_x <= 0 || block_y <= 0);
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001044
1045 const DataLayout data_layout = input->data_layout();
1046 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1047 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001048 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001049
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001050 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001051 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x);
1052 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y);
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001053 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y));
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001054
1055 return output_shape;
1056}
Georgios Pinitas77589b52018-08-21 14:41:35 +01001057
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001058/** Calculate the depth to space output shape of a tensor
1059 *
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001060 * @param[in] input_shape Input tensor shape
1061 * @param[in] data_layout Operation data layout
1062 * @param[in] block Block shape value
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001063 *
1064 * @return the calculated shape
1065 */
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001066inline TensorShape compute_depth_to_space_shape(const TensorShape &input_shape, DataLayout data_layout, int block)
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001067{
1068 ARM_COMPUTE_ERROR_ON(block < 2);
1069
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001070 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1071 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1072 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001073
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001074 TensorShape output_shape{ input_shape };
1075 output_shape.set(idx_width, input_shape[idx_width] * block);
1076 output_shape.set(idx_height, input_shape[idx_height] * block);
1077 output_shape.set(idx_channel, input_shape[idx_channel] / (block * block));
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001078
1079 return output_shape;
1080}
1081
Michalis Spyroud33fe342019-01-04 17:10:25 +00001082/** Calculate the split output shape of a tensor
1083 *
1084 * @param[in] input Input tensor info
1085 * @param[in] axis Axis on which to split the input
1086 * @param[in] num_splits Number of splits
1087 *
1088 * @return the calculated shape
1089 */
Georgios Pinitase1a352c2018-09-03 12:42:19 +01001090inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int axis, unsigned int num_splits)
1091{
1092 TensorShape empty_shape;
1093 empty_shape.set(0, 0);
1094
1095 TensorShape out_shape{ input->tensor_shape() };
1096
1097 // Return empty shape if axis is invalid
1098 if(axis > input->tensor_shape().num_dimensions())
1099 {
1100 return empty_shape;
1101 }
1102
1103 size_t axis_size = out_shape[axis];
1104
1105 // Return empty shape if num_split is not valid
1106 if(axis_size % num_splits)
1107 {
1108 return empty_shape;
1109 }
1110
1111 out_shape[axis] = axis_size / num_splits;
1112 return out_shape;
1113}
1114
Michalis Spyroud33fe342019-01-04 17:10:25 +00001115/** Calculate the space to batch output shape of a tensor
1116 *
1117 * @param[in] input Input tensor info
1118 * @param[in] block_x Block shape x value
1119 * @param[in] block_y Block shape y value
1120 * @param[in] padding_left Left padding values
1121 * @param[in] padding_right Right padding values
1122 *
1123 * @return the calculated shape
1124 */
Michalis Spyrou16934a52018-08-21 18:03:58 +01001125inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const int block_x, const int block_y, const Size2D &padding_left, const Size2D &padding_right)
1126{
1127 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001128
1129 const DataLayout data_layout = input->data_layout();
1130 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1131 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1132 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
1133
SiCong Li18bdfae2020-11-08 21:58:01 +00001134 ARM_COMPUTE_ERROR_ON((input->tensor_shape()[idx_width] + padding_left.x() + padding_right.x()) % block_x != 0);
1135 ARM_COMPUTE_ERROR_ON((input->tensor_shape()[idx_height] + padding_left.y() + padding_right.y()) % block_y != 0);
1136
1137 output_shape.set(idx_width, (input->tensor_shape()[idx_width] + padding_left.x() + padding_right.x()) / block_x);
1138 output_shape.set(idx_height, (input->tensor_shape()[idx_height] + padding_left.y() + padding_right.y()) / block_y);
1139 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] * block_x * block_y);
Michalis Spyrou16934a52018-08-21 18:03:58 +01001140
1141 return output_shape;
1142}
Pablo Tello32521432018-11-15 14:43:10 +00001143
Manuel Bottini5b7d5372019-05-17 14:04:22 +01001144/** Calculate the space to batch output shape of a tensor
1145 *
1146 * @param[in] input Input tensor info
1147 * @param[in] block_shape Block shape value
1148 *
1149 * @return the calculated shape
1150 */
1151inline TensorShape compute_space_to_depth_shape(const ITensorInfo *input, int32_t block_shape)
1152{
1153 TensorShape output_shape{ input->tensor_shape() };
1154
1155 const DataLayout data_layout = input->data_layout();
1156 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1157 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1158 const int idx_depth = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
1159
1160 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_shape);
1161 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_shape);
1162 output_shape.set(idx_depth, input->tensor_shape()[idx_depth] / (block_shape * block_shape));
1163
1164 return output_shape;
1165}
1166
Michalis Spyroud33fe342019-01-04 17:10:25 +00001167/** Calculate the prior box output shape of a tensor
1168 *
1169 * @param[in] input Input tensor info
1170 * @param[in] info PriorBoxLayer info
1171 *
1172 * @return the calculated shape
1173 */
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001174inline TensorShape compute_prior_box_shape(const ITensorInfo &input, const PriorBoxLayerInfo &info)
1175{
1176 DataLayout data_layout = input.data_layout();
1177 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1178 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Pablo Tello32521432018-11-15 14:43:10 +00001179 const int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001180
1181 TensorShape output_shape{};
1182 output_shape.set(0, input.dimension(idx_w) * input.dimension(idx_h) * num_priors * 4);
1183 output_shape.set(1, 2);
1184
1185 return output_shape;
1186}
Michalis Spyrou16934a52018-08-21 18:03:58 +01001187
Michalis Spyroud33fe342019-01-04 17:10:25 +00001188/** Calculate the padded shape of a tensor
1189 *
1190 * @param[in] input_shape Input tensor shape
1191 * @param[in] padding Paddings list
1192 *
1193 * @return the calculated shape
1194 */
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001195inline TensorShape compute_padded_shape(const TensorShape &input_shape, const PaddingList &padding)
1196{
1197 TensorShape padded_shape = input_shape;
1198 for(size_t dim = 0; dim < padding.size(); ++dim)
1199 {
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +00001200 const auto &padding_pair = padding[dim];
1201 const uint32_t shape_on_index = (padded_shape.num_dimensions() <= dim) ? 1 : input_shape[dim];
1202 padded_shape.set(dim, padding_pair.first + shape_on_index + padding_pair.second);
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001203 }
1204 return padded_shape;
1205}
1206
Michalis Spyroud33fe342019-01-04 17:10:25 +00001207/** Calculate the tiled shape of a tensor
1208 *
1209 * @param[in] input_shape Input tensor shape
1210 * @param[in] multiples Paddings list
1211 *
1212 * @return the calculated shape
1213 */
giuros013175fcf2018-11-21 09:59:17 +00001214inline TensorShape compute_tiled_shape(const TensorShape &input_shape, const Multiples &multiples)
1215{
1216 TensorShape tiled_shape = input_shape;
1217 for(size_t dim = 0; dim < multiples.size(); ++dim)
1218 {
1219 tiled_shape.set(dim, input_shape[dim] * multiples[dim]);
1220 }
1221 return tiled_shape;
1222}
1223
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001224/** Calculate the reduced shape of a tensor given an axis
1225 *
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001226 * @param[in] input Input tensor info
1227 * @param[in] axis Axis on which to perform reduction
1228 * @param[in] keep_dims (Optional) Whether to keep the dimension after reduction operation. Defaults to true.
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001229 *
1230 * @return the calculated shape
1231 */
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001232inline TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims = true)
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001233{
1234 TensorShape output_shape{ input };
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001235
1236 if(!keep_dims)
1237 {
1238 output_shape.remove_dimension(axis);
1239 }
1240 else
1241 {
1242 output_shape.set(axis, 1);
1243 }
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001244
1245 return output_shape;
1246}
1247
Michalis Spyroud33fe342019-01-04 17:10:25 +00001248/** Calculate the upsampled shape of a tensor
1249 *
1250 * @param[in] input Input tensor info
1251 * @param[in] info Contains stride information (x and y)
1252 *
1253 * @return the calculated shape
1254 */
Michalis Spyrouceb889e2018-09-17 18:24:41 +01001255inline TensorShape compute_upsample_shape(const ITensorInfo &input, const Size2D &info)
1256{
1257 const DataLayout data_layout = input.data_layout();
1258 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1259 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1260
1261 TensorShape scale_out_shape(input.tensor_shape());
1262 const unsigned int out_x = input.dimension(idx_width) * info.x();
1263 const unsigned int out_y = input.dimension(idx_height) * info.y();
1264 scale_out_shape.set(idx_width, out_x);
1265 scale_out_shape.set(idx_height, out_y);
1266
1267 return scale_out_shape;
1268}
1269
Michalis Spyroud33fe342019-01-04 17:10:25 +00001270/** Get the tensor shape
1271 *
1272 * @param[in] data Input data
1273 *
1274 * @return the extracted tensor shape
1275 */
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001276template <typename T>
Georgios Pinitase2220552018-07-20 13:23:44 +01001277inline TensorShape extract_shape(T *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001278{
Georgios Pinitase2220552018-07-20 13:23:44 +01001279 return data->info()->tensor_shape();
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001280}
1281
John Kesapidescafec8f2019-02-19 15:53:59 +00001282inline TensorShape extract_shape(ITensorInfo *data)
John Kesapides917959c2019-02-04 12:37:29 +00001283{
1284 return data->tensor_shape();
1285}
John Kesapidescafec8f2019-02-19 15:53:59 +00001286inline TensorShape extract_shape(const ITensorInfo *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001287{
Georgios Pinitase2220552018-07-20 13:23:44 +01001288 return data->tensor_shape();
1289}
1290
1291inline TensorShape extract_shape(const TensorShape *data)
1292{
1293 return *data;
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001294}
1295
Michalis Spyroua9c44722019-04-05 17:18:36 +01001296inline TensorShape extract_shape(TensorShape *data)
1297{
1298 return *data;
1299}
1300
Michalis Spyroud33fe342019-01-04 17:10:25 +00001301/** Calculate the unstack shape of a tensor
1302 *
1303 * @param[in] input_shape Input tensor shape
1304 * @param[in] axis Axis on which to perform the unstack operation
1305 *
1306 * @return the calculated shape
1307 */
Pablo Tello54303692018-11-22 16:14:36 +00001308inline TensorShape calculate_unstack_shape(TensorShape input_shape, unsigned int axis)
1309{
1310 ARM_COMPUTE_ERROR_ON(axis > input_shape.num_dimensions());
1311 input_shape.remove_dimension(axis);
1312 return input_shape;
1313}
1314
Pablo Tello3dd5b682019-03-04 14:14:02 +00001315/** Calculate the concatenate output shape of the concatenate operation along a single axis
Michalis Spyroud33fe342019-01-04 17:10:25 +00001316 *
Pablo Tello3dd5b682019-03-04 14:14:02 +00001317 * @param[in] input Vector containing the shapes of the inputs
1318 * @param[in] axis Axis along which to concatenate the input tensors
Michalis Spyroud33fe342019-01-04 17:10:25 +00001319 *
1320 * @return the calculated shape
1321 */
Georgios Pinitase29acf12018-07-16 14:40:09 +01001322template <typename T>
Pablo Tello3dd5b682019-03-04 14:14:02 +00001323inline TensorShape calculate_concatenate_shape(const std::vector<T *> &input, size_t axis)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001324{
Pablo Tello3dd5b682019-03-04 14:14:02 +00001325 TensorShape out_shape = extract_shape(input[0]);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001326
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001327#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001328 // All dimensions must match except the axis one
1329 for(unsigned int i = 0; i < MAX_DIMS; ++i)
1330 {
1331 if(i == axis)
1332 {
1333 continue;
1334 }
1335
1336 for(const auto &tensor : input)
1337 {
1338 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
1339 const TensorShape shape = extract_shape(tensor);
1340 ARM_COMPUTE_ERROR_ON(out_shape[i] != shape[i]);
1341 }
1342 }
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001343#endif // defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001344
1345 // Calculate output shape
Pablo Tello3dd5b682019-03-04 14:14:02 +00001346 size_t new_size = 0;
1347 for(const auto &tensor : input)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001348 {
Georgios Pinitase2220552018-07-20 13:23:44 +01001349 const TensorShape shape = extract_shape(tensor);
Pablo Tello3dd5b682019-03-04 14:14:02 +00001350 new_size += shape[axis];
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001351 }
1352
Pablo Tello3dd5b682019-03-04 14:14:02 +00001353 out_shape.set(axis, new_size);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001354
1355 return out_shape;
1356}
Michalis Spyroud33fe342019-01-04 17:10:25 +00001357/** Calculate the stack output shape of a tensor
1358 *
1359 * @param[in] a Input tensor info
1360 * @param[in] axis Axis on which to perform the stack operation
1361 * @param[in] num_tensors Number of tensors to stack
1362 *
1363 * @return the calculated shape
1364 */
Gian Marco Iodice8aa985e2018-11-27 15:58:08 +00001365inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis, unsigned int num_tensors)
1366{
1367 ARM_COMPUTE_ERROR_ON(axis > a.num_dimensions());
1368 ARM_COMPUTE_ERROR_ON(a.num_dimensions() > 4);
1369
1370 TensorShape shape_out{ a.tensor_shape() };
1371 shape_out.set(axis, num_tensors);
1372
1373 unsigned int i_shift = 0;
1374
1375 for(unsigned int i = 0; i < a.num_dimensions(); ++i)
1376 {
1377 if(i == axis)
1378 {
1379 i_shift++;
1380 }
1381
1382 shape_out.set(i + i_shift, a.tensor_shape()[i]);
1383 }
1384 return shape_out;
1385}
Manuel Bottini8529bd62018-11-21 11:53:04 +00001386
Adnan AlSinane4563a02021-09-01 15:32:03 +01001387/** Calculate the output shape of 3d Convolution
1388 *
1389 * @param[in] src Input tensor shape
1390 * @param[in] weights Weights tensor shape
1391 * @param[in] conv3d_info 3d Convolution Parameters object
1392 *
1393 * @return the calculated shape
1394 */
1395inline TensorShape compute_conv3d_shape(const TensorShape &src, const TensorShape &weights, const Conv3dInfo &conv3d_info)
1396{
1397 // Weight tensor shape indices (D H W Cin Cout)
1398 constexpr unsigned int weights_depth_dim = 4u;
1399 constexpr unsigned int weights_height_dim = 3u;
1400 constexpr unsigned int weights_width_dim = 2u;
1401 constexpr unsigned int weights_CHout_dim = 0u;
1402
1403 // Source/Destination Tensor shape indices (N D H W C)
1404 constexpr unsigned int batch_dim = 4u;
1405 constexpr unsigned int depth_dim = 3u;
1406 constexpr unsigned int height_dim = 2u;
1407 constexpr unsigned int width_dim = 1u;
1408 constexpr unsigned int channel_dim = 0u;
1409
1410 TensorShape output_shape{ src };
1411 const size_t pad_left = conv3d_info.padding.left;
1412 const size_t pad_right = conv3d_info.padding.right;
1413 const size_t pad_top = conv3d_info.padding.top;
1414 const size_t pad_bottom = conv3d_info.padding.bottom;
1415 const size_t pad_front = conv3d_info.padding.front;
1416 const size_t pad_back = conv3d_info.padding.back;
1417 const size_t dilation_x = conv3d_info.dilation.width;
1418 const size_t dilation_y = conv3d_info.dilation.height;
1419 const size_t dilation_z = conv3d_info.dilation.depth;
1420 const size_t stride_x = conv3d_info.stride.x();
1421 const size_t stride_y = conv3d_info.stride.y();
1422 const size_t stride_z = conv3d_info.stride.z();
1423
1424 int output_width_size = 0;
1425 int output_height_size = 0;
1426 int output_depth_size = 0;
1427
1428 switch(conv3d_info.round_type)
1429 {
1430 case DimensionRoundingType::FLOOR:
1431 output_width_size = static_cast<int>(std::floor((static_cast<float>(src[width_dim] + pad_left + pad_right - (dilation_x * (weights[weights_width_dim] - 1) + 1)) / stride_x) + 1));
1432 output_height_size = static_cast<int>(std::floor((static_cast<float>(src[height_dim] + pad_top + pad_bottom - (dilation_y * (weights[weights_height_dim] - 1) + 1)) / stride_y) + 1));
1433 output_depth_size = static_cast<int>(std::floor((static_cast<float>(src[depth_dim] + pad_front + pad_back - (dilation_z * (weights[weights_depth_dim] - 1) + 1)) / stride_z) + 1));
1434 break;
1435 case DimensionRoundingType::CEIL:
1436 output_width_size = static_cast<int>(std::ceil((static_cast<float>(src[width_dim] + pad_left + pad_right - (dilation_x * (weights[weights_width_dim] - 1) + 1)) / stride_x) + 1));
1437 output_height_size = static_cast<int>(std::ceil((static_cast<float>(src[height_dim] + pad_top + pad_bottom - (dilation_y * (weights[weights_height_dim] - 1) + 1)) / stride_y) + 1));
1438 output_depth_size = static_cast<int>(std::ceil((static_cast<float>(src[depth_dim] + pad_front + pad_back - (dilation_z * (weights[weights_depth_dim] - 1) + 1)) / stride_z) + 1));
1439 break;
1440 default:
1441 ARM_COMPUTE_ERROR("Unsupported rounding type");
1442 }
1443
1444 output_shape.set(batch_dim, src[batch_dim]);
1445 output_shape.set(width_dim, output_width_size);
1446 output_shape.set(height_dim, output_height_size);
1447 output_shape.set(depth_dim, output_depth_size);
1448 output_shape.set(channel_dim, weights[weights_CHout_dim]);
1449 return output_shape;
1450}
1451
Manuel Bottini8529bd62018-11-21 11:53:04 +00001452inline TensorShape compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis)
1453{
1454 ARM_COMPUTE_ERROR_ON(indices_shape.num_dimensions() > 1);
1455 ARM_COMPUTE_ERROR_ON(input_shape.num_dimensions() > 4);
1456 ARM_COMPUTE_ERROR_ON(actual_axis >= input_shape.num_dimensions());
1457
1458 TensorShape output_shape = input_shape;
1459 output_shape[actual_axis] = indices_shape[0];
1460
1461 return output_shape;
1462}
Georgios Pinitas358ca202017-12-07 16:47:52 +00001463} // namespace shape_calculator
1464} // namespace misc
1465} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +00001466#endif /* ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H */