blob: ba37f9a61e0642695f111078e96e4b267d2e2b38 [file] [log] [blame]
Georgios Pinitas358ca202017-12-07 16:47:52 +00001/*
Giorgio Arena5b50f422021-02-17 11:43:05 +00002 * Copyright (c) 2017-2021 Arm Limited.
Georgios Pinitas358ca202017-12-07 16:47:52 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H
25#define ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H
Georgios Pinitas358ca202017-12-07 16:47:52 +000026
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +000027#include "arm_compute/core/Helpers.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/ITensorInfo.h"
Gian Marco Iodice7026b302019-06-26 17:18:11 +010029#include "arm_compute/core/KernelDescriptors.h"
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000030#include "arm_compute/core/Utils.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000031
Georgios Pinitas77589b52018-08-21 14:41:35 +010032#include "arm_compute/core/utils/helpers/tensor_transform.h"
33
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000034#include <cmath>
35
Georgios Pinitas358ca202017-12-07 16:47:52 +000036namespace arm_compute
37{
38namespace misc
39{
40namespace shape_calculator
41{
Pablo Telloa0a4ba12019-12-11 13:04:34 +000042/** Calculate the output tensor shape for the reduce mean operation
43 *
44 * @param[in] input Input tensor shape
45 * @param[in] reduction_axis Reduction axis
46 * @param[in] keep_dims Flag to indicate if dimensions are kept
47 *
48 * @return the calculated shape
49 */
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010050inline TensorShape calculate_reduce_mean_shape(ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims)
Pablo Telloa0a4ba12019-12-11 13:04:34 +000051{
52 const int reduction_ops = reduction_axis.num_dimensions();
53 Coordinates axis_local = reduction_axis;
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010054 const int input_dims = input->num_dimensions();
Pablo Telloa0a4ba12019-12-11 13:04:34 +000055 convert_negative_axis(axis_local, input_dims);
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010056 TensorShape out_shape = input->tensor_shape();
Pablo Telloa0a4ba12019-12-11 13:04:34 +000057 // Configure reshape layer if we want to drop the dimensions
58 if(!keep_dims)
59 {
60 // We have to sort the reduction axis vectors in order for remove_dimension
61 // to work properly
62 std::sort(axis_local.begin(), axis_local.begin() + reduction_ops);
63 for(int i = 0; i < reduction_ops; ++i)
64 {
65 out_shape.remove_dimension(axis_local[i] - i);
66 }
67 return out_shape;
68 }
69 else
70 {
71 for(int i = 0; i < reduction_ops; ++i)
72 {
73 out_shape.set(axis_local[i], 1);
74 }
75 return out_shape;
76 }
77}
Michalis Spyroud33fe342019-01-04 17:10:25 +000078/** Calculate the output tensor shape of a vector input given the convolution dimensions
79 *
80 * @param[in] input Input tensor shape
81 * @param[in] conv_w Convolution width
82 * @param[in] conv_h Convolution height
83 * @param[in] data_layout Data layout
84 *
85 * @return the calculated shape
86 */
Abe Mbise7784c832018-05-31 16:48:41 +010087inline TensorShape compute_vector_to_tensor_output_shape(const TensorShape &input, size_t conv_w, size_t conv_h, const DataLayout &data_layout)
88{
89 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
90 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
91 const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
92
93 TensorShape output_shape(input);
94 output_shape.set(idx_w, conv_w);
95 output_shape.set(idx_h, conv_h);
96 output_shape.set(idx_c, input.x() / (conv_w * conv_h));
97
98 return output_shape;
99}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100100
Michalis Spyroud33fe342019-01-04 17:10:25 +0000101/** Calculate the permuted shape of an input given a permutation vector
102 *
103 * @param[in] input Input tensor info
104 * @param[in] perm Permutation vector
105 *
106 * @return the calculated shape
107 */
Pablo Tello00afd112018-01-04 10:34:24 +0000108inline TensorShape compute_permutation_output_shape(const ITensorInfo &input, const PermutationVector &perm)
109{
110 TensorShape output_shape = input.tensor_shape();
111 permute(output_shape, perm);
112 return output_shape;
113}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100114
Michalis Spyroud33fe342019-01-04 17:10:25 +0000115/** Calculate the output shape of the reorg layer given a stride
116 *
117 * @param[in] input Input tensor info
118 * @param[in] stride Stride
119 *
120 * @return the calculated shape
121 */
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100122inline TensorShape compute_reorg_output_shape(const ITensorInfo &input, int32_t stride)
123{
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100124 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
125 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
126 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100127
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100128 ARM_COMPUTE_ERROR_ON(stride <= 0);
129 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_width] % stride != 0), "The width of the input tensor must be a multiple of stride");
130 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_height] % stride != 0), "The height of the input tensor must be a multiple of stride");
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100131
132 TensorShape output_shape{ input.tensor_shape() };
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100133
134 output_shape.set(idx_width, output_shape[idx_width] / stride);
135 output_shape.set(idx_height, output_shape[idx_height] / stride);
136 output_shape.set(idx_channel, output_shape[idx_channel] * stride * stride);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100137
138 return output_shape;
139}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100140
Michalis Spyroud33fe342019-01-04 17:10:25 +0000141/** Calculate the reshaped shape of the weights
142 *
143 * @param[in] weights Weights tensor info
144 * @param[in] has_bias (Optional) Set to true if there is bias
145 * @param[in] num_groups (Optional) Number of groups
146 *
147 * @return the calculated shape of the reshaped weights
148 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100149inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000150{
Giorgio Arena088c2b02018-08-07 16:59:05 +0100151 // Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100152 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100153 ARM_COMPUTE_ERROR_ON(weights.data_layout() == DataLayout::NHWC && num_groups > 1);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100154 ARM_COMPUTE_ERROR_ON((weights.dimension(3) % num_groups) != 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100155
Georgios Pinitas78c00902018-01-09 17:33:11 +0000156 // Calculate output shape
157 TensorShape weights_reshaped{ weights.tensor_shape() };
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100158 weights_reshaped.set(3, weights_reshaped[3] / num_groups);
159
Georgios Pinitas78c00902018-01-09 17:33:11 +0000160 weights_reshaped.collapse(3);
161 const size_t tmp_dim = weights_reshaped[0];
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100162 weights_reshaped.set(0, weights_reshaped[1]);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000163 weights_reshaped.set(1, tmp_dim + (has_bias ? 1 : 0));
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100164 if(weights.num_dimensions() < 5)
165 {
166 weights_reshaped.set(2, num_groups);
167 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000168
169 return weights_reshaped;
170}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100171
Michalis Spyroud33fe342019-01-04 17:10:25 +0000172/** Calculate the Left Hand Side matrix reshaped shape
173 *
174 * @param[in] a Input tensor info
175 * @param[in] lhs_info Left Hand Side matrix information
176 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
177 *
178 * @return the calculated shape
179 */
Gian Marco Iodice5ba5e092018-12-06 17:13:09 +0000180inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false)
181{
182 ARM_COMPUTE_ERROR_ON(lhs_info.m0 == 0);
183 ARM_COMPUTE_ERROR_ON(lhs_info.k0 == 0);
184 ARM_COMPUTE_ERROR_ON(lhs_info.v0 == 0);
185
186 // Input width/height
187 const unsigned int input_width = a.dimension(0);
188 const unsigned int input_height = reinterpret_input_as_3d ? a.dimension(1) * a.dimension(2) : a.dimension(1);
189
190 // Number of horizontal/vertical blocks in the input tensor
191 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(lhs_info.k0));
192 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(lhs_info.m0));
193
194 // Block size
195 const unsigned int block_size = lhs_info.m0 * lhs_info.k0;
196
197 // Output width/height
198 const unsigned int output_width = block_size * num_horiz_blocks * lhs_info.v0;
199 const unsigned int output_height = std::ceil(num_vert_blocks / static_cast<float>(lhs_info.v0));
200
201 TensorShape lhs_shape{ a.tensor_shape() };
202 lhs_shape.set(0, output_width);
203 lhs_shape.set(1, output_height);
204
205 if((reinterpret_input_as_3d) && (lhs_shape.num_dimensions() > 2))
206 {
207 // When the data format is NHWC and the shapes are Nx1x1
208 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
209 // To avoid failures by removing a dimension that doesn't exist
210 // check if the number of dimensions is greater than 2.
211 lhs_shape.remove_dimension(2);
212 }
213
214 return lhs_shape;
215}
216
Michalis Spyroud33fe342019-01-04 17:10:25 +0000217/** Calculate the Right Hand Side matrix reshaped shape
218 *
219 * @param[in] a Input tensor info
220 * @param[in] rhs_info Right Hand Side matrix information
221 *
222 * @return the calculated shape
223 */
Gian Marco Iodice3b0a2652018-12-07 11:18:09 +0000224inline TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRHSMatrixInfo &rhs_info)
225{
226 ARM_COMPUTE_ERROR_ON(rhs_info.n0 == 0);
227 ARM_COMPUTE_ERROR_ON(rhs_info.k0 == 0);
228 ARM_COMPUTE_ERROR_ON(rhs_info.h0 == 0);
229
230 // Input width/height
231 const unsigned int input_width = a.dimension(0);
232 const unsigned int input_height = a.dimension(1);
233
234 // Number of horizontal/vertical blocks in the input tensor
235 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(rhs_info.n0));
236 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(rhs_info.k0));
237
238 // Block size
239 const unsigned int block_size = rhs_info.n0 * rhs_info.k0;
240
241 // Output width/height
242 const unsigned int output_width = block_size * num_vert_blocks * rhs_info.h0;
243 const unsigned int output_height = std::ceil(num_horiz_blocks / static_cast<float>(rhs_info.h0));
244
245 TensorShape rhs_shape{ a.tensor_shape() };
246 rhs_shape.set(0, output_width);
247 rhs_shape.set(1, output_height);
248
249 return rhs_shape;
250}
251
Michalis Spyroud33fe342019-01-04 17:10:25 +0000252/** Calculate the interleaved shape of an input tensor
253 *
254 * @param[in] a Input tensor info
255 * @param[in] mult_interleave4x4_height (Optional) Interleave4x4 height
256 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
257 *
258 * @return the calculated shape
259 */
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100260inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000261{
Gian Marco36a0a462018-01-12 10:21:40 +0000262 // The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
263 ARM_COMPUTE_ERROR_ON(mult_interleave4x4_height < 1);
264 const int interleave_width = 4 * mult_interleave4x4_height;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000265 TensorShape shape_interleaved_a{ a.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000266 shape_interleaved_a.set(0, a.dimension(0) * interleave_width);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100267 if(reinterpret_input_as_3d)
268 {
269 const int M = a.dimension(1) * a.dimension(2);
270 const int height = std::ceil(M / static_cast<float>(interleave_width));
271 shape_interleaved_a.set(1, height);
Isabella Gottardi089695f2018-10-17 18:04:15 +0100272
273 // When the data format is NHWC and the shapes are Nx1x1
274 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
275 // To avoid failures by removing a dimension that doesn't exist
276 // check if the number of dimensions is greater than 2.
277 if(shape_interleaved_a.num_dimensions() > 2)
278 {
279 shape_interleaved_a.remove_dimension(2);
280 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100281 }
282 else
283 {
284 shape_interleaved_a.set(1, std::ceil(a.dimension(1) / static_cast<float>(interleave_width)));
285 }
Georgios Pinitas358ca202017-12-07 16:47:52 +0000286
287 return shape_interleaved_a;
288}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100289
giuros016d109962019-01-07 17:47:19 +0000290/** Calculate the reshaped shape of the weights to use in depthwise convolution
291 *
292 * @param[in] input Input tensor info
293 * @param[in] info Depthwise convolution information to be used for reshaping.
294 *
295 * @return the calculated shape
296 */
297inline TensorShape compute_reshaped_depthwise_weights_shape(const ITensorInfo &input, const DepthwiseConvolutionReshapeInfo &info)
298{
299 const auto data_layout = input.data_layout();
300 TensorShape weights_shape{};
301
302 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
303 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
304 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
305 const size_t num_channels = input.dimension(channel_idx);
306 const size_t num_rows = input.dimension(height_idx);
307 const size_t num_cols = input.dimension(width_idx);
308
309 weights_shape.set(0, num_rows * num_cols * info.c0);
310 weights_shape.set(1, DIV_CEIL(num_channels, info.c0));
311 return weights_shape;
312}
313
Michalis Spyroud33fe342019-01-04 17:10:25 +0000314/** Calculate the transposed 1xW shape
315 *
316 * @param[in] b Input tensor info
317 *
318 * @return the calculated shape
319 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000320inline TensorShape compute_transpose1xW_shape(const ITensorInfo &b)
321{
322 // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
323 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
324 shape_transposed1xW_b.set(0, b.dimension(1) * 16);
325 shape_transposed1xW_b.set(1, std::ceil(b.dimension(0) / 16.f));
326
327 return shape_transposed1xW_b;
328}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100329
Michalis Spyroud33fe342019-01-04 17:10:25 +0000330/** Calculate the transposed 1xW width element shape
331 *
332 * @param[in] b Input tensor info
333 * @param[in] mult_transpose1xW_width (Optional) Transpose1xW width
334 *
335 * @return the calculated shape
336 */
Gian Marco36a0a462018-01-12 10:21:40 +0000337inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInfo &b, int mult_transpose1xW_width = 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000338{
Gian Marco36a0a462018-01-12 10:21:40 +0000339 // Note: mult_transpose1xW_width expresses the number of chunks with size 1x(W) we want to store on the same row
340 // The transpose1xW output matrix will have the following shape:
341 // [ b_height * W, ceil(b_width / W) ] where W = (16 / element size of the tensor) * mult_transpose1xW_width
342 ARM_COMPUTE_ERROR_ON(mult_transpose1xW_width < 1);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000343 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000344 const size_t transpose_width = (16 / b.element_size()) * mult_transpose1xW_width;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000345 shape_transposed1xW_b.set(0, b.dimension(1) * transpose_width);
346 shape_transposed1xW_b.set(1, static_cast<size_t>(std::ceil(b.dimension(0) / static_cast<float>(transpose_width))));
347
348 return shape_transposed1xW_b;
349}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100350
Michalis Spyroud33fe342019-01-04 17:10:25 +0000351/** Calculate the reductionA shape used in GEMMLowp
352 *
353 * @param[in] b Input tensor info
354 *
355 * @return the calculated shape
356 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000357inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
358{
359 TensorShape shape_vector_sum_col{ b.tensor_shape() };
360 if(shape_vector_sum_col.num_dimensions() > 1)
361 {
362 shape_vector_sum_col.remove_dimension(1);
363 }
364
365 return shape_vector_sum_col;
366}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100367
Michalis Spyroud33fe342019-01-04 17:10:25 +0000368/** Calculate the reductionB shape used in GEMMLowp
369 *
370 * @param[in] a Input tensor info
371 *
372 * @return the calculated shape
373 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000374inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
375{
376 TensorShape shape_vector_sum_row{ a.tensor_shape() };
377 shape_vector_sum_row.set(Window::DimX, a.dimension(1));
Georgios Pinitas932491f2018-09-21 16:33:15 +0100378 if(shape_vector_sum_row.num_dimensions() > 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000379 {
380 shape_vector_sum_row.remove_dimension(1);
381 }
382
383 return shape_vector_sum_row;
384}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100385
Michalis Spyroud33fe342019-01-04 17:10:25 +0000386/** Calculate the Col2Im shape
387 *
388 * @param[in] input Input tensor info
389 * @param[in] convolved_dims Convolved dimensions
390 * @param[in] batch_size_on_z True if batch size is on z axis
391 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
392 *
393 * @return the calculated shape
394 */
Giorgio Arena226e4b92018-08-23 12:00:02 +0100395inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &convolved_dims, bool batch_size_on_z, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000396{
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100397 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arena226e4b92018-08-23 12:00:02 +0100398 ARM_COMPUTE_ERROR_ON(input.tensor_shape()[1] != (convolved_dims.area()));
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100399 ARM_COMPUTE_ERROR_ON((num_groups > 1) && input.tensor_shape()[2] != num_groups);
400
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100401 const DataLayout data_layout = input.data_layout();
402 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
403 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
404 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100405
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100406 TensorShape col2im_shape{ input.tensor_shape() };
407 // If batches start on 3rd dimension shift dimensions right by 1 to retain upper tensor shape,
408 // as first three will be override by H,W,C data
409 if(batch_size_on_z && num_groups == 1)
410 {
411 col2im_shape.shift_right(1);
412 }
413 col2im_shape.set(width_idx, convolved_dims.width);
414 col2im_shape.set(height_idx, convolved_dims.height);
415 col2im_shape.set(channel_idx, input.tensor_shape()[0] * num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000416
417 return col2im_shape;
418}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100419
Michalis Spyroud33fe342019-01-04 17:10:25 +0000420/** Calculate the transposed shape of a tensor
421 *
422 * @param[in] input Input tensor info
423 *
424 * @return the calculated shape
425 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000426inline TensorShape compute_transposed_shape(const ITensorInfo &input)
427{
428 TensorShape shape_transposed{ input.tensor_shape() };
429
430 shape_transposed.set(0, input.dimension(1));
431 shape_transposed.set(1, input.dimension(0));
432
433 return shape_transposed;
434}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100435
Michalis Spyroud33fe342019-01-04 17:10:25 +0000436/** Calculate the depthwise convolution output shape of a tensor
437 *
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100438 * @param[in] input Input tensor info
439 * @param[in] weights Weights tensor info
440 * @param[in] info Convolution info
Michalis Spyroud33fe342019-01-04 17:10:25 +0000441 *
442 * @return the calculated shape
443 */
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100444inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000445{
446 const TensorShape input_shape{ input.tensor_shape() };
447 const TensorShape weights_shape{ weights.tensor_shape() };
448
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000449 const DataLayout data_layout = input.data_layout();
450 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
451 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Giorgio Arena76572242018-04-04 17:44:26 +0100452 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000453
Usama Arife73686a2019-04-08 17:30:48 +0100454 const DataLayout weights_data_layout = weights.data_layout();
455 const int weights_width_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::WIDTH);
456 const int weights_height_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::HEIGHT);
giuros016d109962019-01-07 17:47:19 +0000457
458 unsigned int output_width = 0;
459 unsigned int output_height = 0;
460 std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
Usama Arife73686a2019-04-08 17:30:48 +0100461 weights_shape[weights_width_idx], weights_shape[weights_height_idx],
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100462 info.pad_stride_info, info.dilation);
giuros016d109962019-01-07 17:47:19 +0000463
464 TensorShape output_shape{ input_shape };
465 output_shape.set(width_idx, output_width);
466 output_shape.set(height_idx, output_height);
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100467 output_shape.set(channel_idx, input_shape[channel_idx] * info.depth_multiplier);
giuros016d109962019-01-07 17:47:19 +0000468
469 return output_shape;
470}
471
Michalis Spyroud33fe342019-01-04 17:10:25 +0000472/** Calculate the upsampled output shape used for deconvolution
473 *
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100474 * @param[in] input Input tensor info
475 * @param[in] weights Weights tensor shape
476 * @param[in] sx Stride on x axis
477 * @param[in] sy Stride on y axis
478 * @param[in] out_dims Output shape dimensions
479 * @param[in] padx Padding on x axis
480 * @param[in] pady Padding on y axis
Michalis Spyroud33fe342019-01-04 17:10:25 +0000481 *
482 * @return the calculated shape
483 */
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100484inline TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy,
Manuel Bottini6e10aa32020-04-30 13:28:23 +0100485 std::pair<unsigned int, unsigned int> &out_dims, uint32_t &padx, uint32_t &pady)
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000486{
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100487 const DataLayout data_layout = input.data_layout();
488 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
489 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
490
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100491 // Find the upsampled dimensions
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100492 unsigned int out_x = (input.dimension(idx_w) - 1) * sx + 1;
493 unsigned int out_y = (input.dimension(idx_h) - 1) * sy + 1;
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100494
495 // Find the padding needed for the convolution with stride 1 in order to match output shape
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100496 padx = out_dims.first - (out_x - weights.dimension(idx_w) + 1);
497 pady = out_dims.second - (out_y - weights.dimension(idx_h) + 1);
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100498 out_x += padx;
499 out_y += pady;
500
501 TensorShape scale_out_shape(input.tensor_shape());
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100502 scale_out_shape.set(idx_w, out_x);
503 scale_out_shape.set(idx_h, out_y);
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000504
505 return scale_out_shape;
506}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100507
Michalis Spyroud33fe342019-01-04 17:10:25 +0000508/** Calculate the output shape of the deconvolution layer
509 *
510 * @param[in] out_dims Output x and y shape dimensions
511 * @param[in] input Input tensor info
512 * @param[in] weights Weights tensor shape
513 *
514 * @return the calculated shape
515 */
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100516inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned int, unsigned int> &out_dims, const ITensorInfo &input, const ITensorInfo &weights)
517{
518 const TensorShape input_shape{ input.tensor_shape() };
519 const TensorShape weights_shape{ weights.tensor_shape() };
520
521 const DataLayout data_layout = input.data_layout();
522 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
523 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
524 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
525 const int batch_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
526
527 TensorShape out_shape{ input_shape };
528 out_shape.set(width_idx, out_dims.first);
529 out_shape.set(height_idx, out_dims.second);
530 out_shape.set(channel_idx, weights_shape[batch_idx]);
531 return out_shape;
532}
533
Michalis Spyroud33fe342019-01-04 17:10:25 +0000534/** Calculate the im2col output shape of a tensor
535 *
536 * @param[in] input Input tensor info
537 * @param[in] kernel_dims The kernel dimensions (width and height).
538 * @param[in] conv_info Contains padding and stride information
539 * @param[in] has_bias In case biases are provided expands the matrix with 1
540 * @param[in] dilation Dilation, in elements, across x and y
541 * @param[in] batch_size_on_z True if batch size is on z axis
542 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
543 *
544 * @return the calculated shape
545 */
Giorgio Arena0f170392018-07-18 16:13:12 +0100546inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, bool batch_size_on_z,
547 unsigned int num_groups = 1)
Giorgio Arena156fcf32018-03-09 15:30:43 +0000548{
Giorgio Arena0f170392018-07-18 16:13:12 +0100549 // The output shape will be the 3D shape [ out_channels * kernel_area, num_elems_per_out_channel, batches ] if batch_size_on_z == true
550 // or the 4D shape [ out_channels * kernel_area / num_groups, num_elems_per_out_channel, num_groups, batches ] if batch_size_on_z == false
551
552 ARM_COMPUTE_ERROR_ON(num_groups == 0);
553 ARM_COMPUTE_ERROR_ON(num_groups > 1 && input->data_layout() != DataLayout::NCHW);
554 ARM_COMPUTE_ERROR_ON(num_groups > 1 && batch_size_on_z);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000555
556 TensorShape output_shape{ input->tensor_shape() };
557
558 const DataLayout data_layout = input->data_layout();
559 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
560 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
561 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
562
563 std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
Giorgio Arena0f170392018-07-18 16:13:12 +0100564 output_shape.set(0, (output_shape[channel_idx] / num_groups * kernel_dims.area() + (has_bias ? 1 : 0))); // NOLINT
Giorgio Arenaf485a102018-04-20 16:06:21 +0100565 output_shape.set(1, (out_dims.first * out_dims.second));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100566 if(batch_size_on_z && output_shape.num_dimensions() >= 3)
567 {
568 output_shape.remove_dimension(2);
569 }
570 else
571 {
Giorgio Arena0f170392018-07-18 16:13:12 +0100572 output_shape.set(2, num_groups);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100573 }
Giorgio Arena156fcf32018-03-09 15:30:43 +0000574
575 return output_shape;
576}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100577
Michalis Spyroud33fe342019-01-04 17:10:25 +0000578/** Calculate the flattened output shape of a tensor
579 *
580 * @param[in] input Input tensor info
581 *
582 * @return the calculated shape
583 */
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100584inline TensorShape compute_flatten_shape(const ITensorInfo *input)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000585{
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100586 // The output shape will be the flatten version of the input (i.e. [ width * height * channels, num_batches, ... ] ). Used for FlattenLayer and FullyConnectedLayer.
587
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000588 TensorShape output_shape{ input->tensor_shape() };
589
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100590 output_shape.collapse(3);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000591
592 return output_shape;
593}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100594
Michalis Spyroud33fe342019-01-04 17:10:25 +0000595/** Calculate the softmax output shape of a tensor
596 *
597 * @param[in] input Input tensor info
598 * @param[in] axis (Optional) Softmax axis
599 *
600 * @return the calculated shape
601 */
giuros01efbf6c82018-09-03 09:53:53 +0100602inline TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis = 1)
603{
604 // The output shape will be a 2D version of the input. For instance:
605 // - [x,y,z] and axis 1 will return [x, y*z]
606 // - [x,y,z,w] and axis 2 will return [x*y, w*z]
607 // - [x,y,z,w] and axis 3 will return [x*y*z, w]
608 TensorShape shape2D = input->tensor_shape();
609
610 if(axis < input->num_dimensions())
611 {
612 // Collapse from axis onward (this changes the shape)
613 shape2D.collapse_from(axis);
614
615 // Collapse the rest (collapse is inclusive)
616 shape2D.collapse(shape2D.num_dimensions() - 1);
617 }
618 else
619 {
620 // Collapse everything
621 shape2D.collapse(shape2D.num_dimensions());
622 }
623
624 if(axis == 0)
625 {
626 // If axis is zero the first dim should be one. Since
627 // collapse is an inclusive operation we need to shift
628 shape2D.shift_right(1);
629 }
630
631 return shape2D;
632}
633
Michalis Spyroud33fe342019-01-04 17:10:25 +0000634/** Calculate the winograd filter transform shape
635 *
636 * @param[in] input Input tensor info
637 * @param[in] winograd_info Winograd information
638 *
639 * @return the calculated shape
640 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000641inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000642{
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000643 TensorShape tensor_shape{ input.tensor_shape() };
644
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000645 const Size2D kernel_size = winograd_info.kernel_size;
646 const Size2D output_tile_size = winograd_info.output_tile_size;
647 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
Giorgio Arena2d9de0a2018-03-15 17:58:20 +0000648
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000649 tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
650 tensor_shape.set(Window::DimX, input.dimension(3));
651 tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
652 tensor_shape.set(Window::DimZ, input_tile_size.area());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000653
654 return tensor_shape;
655}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100656
Michalis Spyroud33fe342019-01-04 17:10:25 +0000657/** Calculate the winograd input transform shape
658 *
659 * @param[in] input Input tensor info
660 * @param[in] winograd_info Winograd information
661 *
662 * @return the calculated shape
663 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000664inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000665{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000666 const PadStrideInfo conv_info = winograd_info.convolution_info;
667 const Size2D kernel_size = winograd_info.kernel_size;
668 const Size2D output_tile_size = winograd_info.output_tile_size;
669 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
670
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100671 const size_t idx_w = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
672 const size_t idx_h = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
673 const size_t idx_c = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000674
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100675 // Compute the number of output tiles along the x and y direction of size "output_tile_size"
676 const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input.tensor_shape()[idx_w], input.tensor_shape()[idx_h]),
677 kernel_size,
678 output_tile_size,
679 conv_info);
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100680
681 const unsigned int width = input.tensor_shape()[idx_c];
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100682 const unsigned int height = num_tiles.area();
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000683 const unsigned int depth = input_tile_size.area();
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000684
685 TensorShape output_shape{ input.tensor_shape() };
686 output_shape.set(0, width);
687 output_shape.set(1, height);
688 output_shape.set(2, depth);
689
690 return output_shape;
691}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100692
Michalis Spyroud33fe342019-01-04 17:10:25 +0000693/** Calculate the winograd output transform shape
694 *
695 * @param[in] input Input tensor info
696 * @param[in] winograd_info Winograd information
697 *
698 * @return the calculated shape
699 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000700inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000701{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000702 const PadStrideInfo conv_info = winograd_info.convolution_info;
703 const Size2D kernel_size = winograd_info.kernel_size;
704 const Size2D input_dimensions = winograd_info.input_dimensions;
705 const DataLayout data_layout = winograd_info.output_data_layout;
706
707 // Compute output shape
708 unsigned int output_width = 0;
709 unsigned int output_height = 0;
710 std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
711 kernel_size.width, kernel_size.height, conv_info);
712
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000713 TensorShape tensor_shape{ input.tensor_shape() };
714
715 // Output dimension
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000716 const unsigned int out_w = output_width;
717 const unsigned int out_h = output_height;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000718 const unsigned int out_c = input.dimension(0);
719
720 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w);
721 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT), out_h);
722 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL), out_c);
723
724 return tensor_shape;
725}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100726
Michalis Spyroud33fe342019-01-04 17:10:25 +0000727/** Calculate the deep convolution shape output shape of a tensor
728 *
729 * @param[in] input Input tensor info
730 * @param[in] weights Weights tensor info
731 * @param[in] conv_info Contains padding and stride information
732 *
733 * @return the calculated shape
734 */
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000735inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
736{
737 const TensorShape input_shape{ input.tensor_shape() };
738 const TensorShape weights_shape{ weights.tensor_shape() };
739
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000740 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
741 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
742 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
743
Giorgio Arenac0f54432018-03-16 14:02:34 +0000744 const unsigned int input_width = input_shape[idx_width];
745 const unsigned int input_height = input_shape[idx_height];
746 const unsigned int weights_width = weights_shape[idx_width];
747 const unsigned int weights_height = weights_shape[idx_height];
748 const unsigned int weights_out_channel = weights_shape[3];
749 unsigned int output_width = 0;
750 unsigned int output_height = 0;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000751 std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000752
753 TensorShape output_shape{ input_shape };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000754 output_shape.set(idx_width, output_width);
755 output_shape.set(idx_height, output_height);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000756 output_shape.set(idx_channel, weights_out_channel);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000757
758 return output_shape;
759}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100760
Michalis Spyroud33fe342019-01-04 17:10:25 +0000761/** Calculate the min/max shape output shape of a tensor
762 *
763 * @param[in] input Input tensor info
764 *
765 * @return the calculated shape
766 */
Alex Gilday60954c62018-03-05 16:22:48 +0000767inline TensorShape compute_min_max_shape(const ITensorInfo *input)
768{
769 TensorShape output_shape{ input->tensor_shape() };
770 output_shape.set(Window::DimX, 2);
771 output_shape.remove_dimension(1);
772 output_shape.remove_dimension(1);
773
774 return output_shape;
775}
776
Michalis Spyroud33fe342019-01-04 17:10:25 +0000777/** Calculate the output pool shape of a tensor
778 *
779 * @param[in] input Input tensor info
780 * @param[in] pool_info Pooling layer info
781 *
782 * @return the calculated shape
783 */
Michalis Spyroue74b2012018-04-18 09:49:16 +0100784inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
785{
786 unsigned int pooled_w = 0;
787 unsigned int pooled_h = 0;
788
Giorgio Arena3c520c52018-05-01 11:47:24 +0100789 TensorShape output_shape{ input.tensor_shape() };
Michalis Spyroue74b2012018-04-18 09:49:16 +0100790
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +0000791 const bool is_global_pooling = pool_info.is_global_pooling;
Giorgio Arena3c520c52018-05-01 11:47:24 +0100792 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
793 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +0000794 const unsigned int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size.width;
795 const unsigned int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size.height;
Giorgio Arena3c520c52018-05-01 11:47:24 +0100796
797 std::tie(pooled_w, pooled_h) = scaled_dimensions(output_shape[idx_width],
798 output_shape[idx_height],
Michalis Spyroue74b2012018-04-18 09:49:16 +0100799 pool_size_x,
800 pool_size_y,
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +0000801 pool_info.pad_stride_info);
Michalis Spyroue74b2012018-04-18 09:49:16 +0100802
Giorgio Arena3c520c52018-05-01 11:47:24 +0100803 output_shape.set(idx_width, pooled_w);
804 output_shape.set(idx_height, pooled_h);
Michalis Spyroue74b2012018-04-18 09:49:16 +0100805
806 return output_shape;
807}
808
morgolock37722d92020-04-09 14:17:48 +0100809/** Calculate the output unpool shape of a tensor
810 *
811 * @param[in] input Input tensor info
812 * @param[in] pool_info Pooling layer info
813 *
814 * @return the calculated shape
815 */
816inline TensorShape compute_unpool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
817{
818 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
819 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
820 const TensorShape input_shape = input.tensor_shape();
821 ARM_COMPUTE_ERROR_ON(input_shape[idx_height] <= 1 || input_shape[idx_width] <= 1);
822 const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
823 const unsigned int stride_x = pad_stride_info.stride().first;
824 const unsigned int stride_y = pad_stride_info.stride().second;
825
826 const int pad_left = pad_stride_info.pad_left();
827 const int pad_top = pad_stride_info.pad_top();
828 const int pad_right = pad_stride_info.pad_right();
829 const int pad_bottom = pad_stride_info.pad_bottom();
830
831 TensorShape output_shape = input_shape;
832 const unsigned int out_width = (input_shape[idx_width] - 1) * stride_x - pad_left - pad_right + pool_info.pool_size.width;
833 const unsigned int out_height = (input_shape[idx_height] - 1) * stride_y - pad_top - pad_bottom + pool_info.pool_size.height;
834
835 output_shape.set(idx_width, out_width);
836 output_shape.set(idx_height, out_height);
837 return output_shape;
838}
839
George Wort44b4e972019-01-08 11:41:54 +0000840/** Calculate the output roi align shape of a tensor
841 *
842 * @param[in] input Input tensor info
843 * @param[in] rois Rois tensor info
844 * @param[in] pool_info Pooling layer info
845 *
846 * @return the calculated shape
847 */
848inline TensorShape compute_roi_align_shape(const ITensorInfo &input, const ITensorInfo &rois, ROIPoolingLayerInfo pool_info)
849{
850 TensorShape output_shape{ input.tensor_shape() };
851
852 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
853 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
854
855 output_shape.set(idx_width, pool_info.pooled_width());
856 output_shape.set(idx_height, pool_info.pooled_height());
857 output_shape.set(3, rois.dimension(1));
858
859 return output_shape;
860}
861
Michalis Spyroud33fe342019-01-04 17:10:25 +0000862/** Calculate the RNN shape of a tensor
863 *
864 * @param[in] input Input tensor info
865 * @param[in] batch_size Batch size
866 *
867 * @return the calculated shape
868 */
Michalis Spyrou36a559e2018-03-20 10:30:58 +0000869inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
870{
871 TensorShape output_shape{ input->tensor_shape() };
872 output_shape.set(1, batch_size);
873
874 return output_shape;
875}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100876
Michalis Spyroud33fe342019-01-04 17:10:25 +0000877/** Calculate the matrix multiplication output shape of two tensors
878 *
879 * @param[in] input0 First input tensor info
880 * @param[in] input1 Second input tensor info
881 * @param[in] is_interleaved_transposed True if the input is interleaved transposed
882 * @param[in] reshape_info GEMM reshape info
883 *
884 * @return the calculated shape
885 */
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100886inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
887{
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000888 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100889 ARM_COMPUTE_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The first input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100890
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100891 const bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000892 const bool reinterpret_output_as_3d = reshape_info.depth_output_gemm3d() != 0;
893 const int depth_output_gemm3d = reinterpret_output_as_3d ? reshape_info.depth_output_gemm3d() : 1;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100894 const int m = reshape_info.reinterpret_input_as_3d() ? input0.dimension(1) * input0.dimension(2) : input0.dimension(1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000895
896 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
897 // dimension of the output tensor
898 const int dim0 = is_interleaved_transposed ? reshape_info.n() : input1.dimension(0);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000899 const int dim1 = is_interleaved_transposed ? reshape_info.m() / depth_output_gemm3d : m / depth_output_gemm3d;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100900 const int dim2 = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
901 const int dim3 = reinterpret_input_as_3d ? 1 : input0.tensor_shape()[3];
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000902
903 TensorShape output_shape{ input0.tensor_shape() };
904
905 output_shape.set(0, dim0);
906 output_shape.set(1, dim1);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000907 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : dim2);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100908 output_shape.set(3, reinterpret_output_as_3d ? dim2 : dim3);
909 output_shape.set(4, reinterpret_output_as_3d ? dim3 : 1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000910
911 return output_shape;
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100912}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100913
Michalis Spyroud33fe342019-01-04 17:10:25 +0000914/** Calculate the matrix multiplication output shape of two tensors
915 *
916 * @param[in] input0 First input tensor info
917 * @param[in] input1 Second input tensor info
918 * @param[in] gemm_info GEMM reshape info
919 *
920 * @return the calculated shape
921 */
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000922inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMReshapeInfo &gemm_info)
923{
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100924 ARM_COMPUTE_UNUSED(input1);
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000925 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
926
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000927 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000928 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d() != 0;
929 const int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d() : 1;
930
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000931 TensorShape output_shape{ input0.tensor_shape() };
932
Vidhya Sudhan Loganathanae1a89e2019-05-03 09:13:55 +0100933 if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
934 {
935 output_shape.set(0, gemm_info.n());
936 output_shape.set(1, gemm_info.m());
937 }
938 else
939 {
940 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
941 // dimension of the output tensor
942 const int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
943 output_shape.set(0, gemm_info.n());
944 output_shape.set(1, gemm_info.m() / depth_output_gemm3d);
945 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
946 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
947 }
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000948
949 return output_shape;
950}
951
Michalis Spyroud33fe342019-01-04 17:10:25 +0000952/** Calculate the matrix multiplication output shape of two tensors
953 *
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100954 * @param[in] input0 First input tensor info
955 * @param[in] input1 Second input tensor info
956 * @param[in] gemm_info GEMM kernel info used to retrieve the original dimensions of the input matrices
957 *
958 * @return the calculated shape
959 */
960inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMKernelInfo &gemm_info)
961{
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100962 ARM_COMPUTE_UNUSED(input1);
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100963 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
964
965 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
966 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
967 const unsigned int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d : 1;
968
969 TensorShape output_shape{ input0.tensor_shape() };
970
971 if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
972 {
973 output_shape.set(0, gemm_info.n);
974 output_shape.set(1, gemm_info.m);
975 }
976 else
977 {
978 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
979 // dimension of the output tensor
980 const unsigned int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
981 output_shape.set(0, gemm_info.n);
982 output_shape.set(1, gemm_info.m / depth_output_gemm3d);
983 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
984 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
985 }
986
987 return output_shape;
988}
989
990/** Calculate the matrix multiplication output shape of two tensors
991 *
Michalis Spyroud33fe342019-01-04 17:10:25 +0000992 * @param[in] input Input tensor info
993 * @param[in] gemm_3d_depth (Optional) GEMM 3d depth
994 * @param[in] batch_size_on_z (Optional) True if batch size is on z axis
995 *
996 * @return the calculated shape
997 */
Georgios Pinitas932491f2018-09-21 16:33:15 +0100998inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1, bool batch_size_on_z = false)
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100999{
1000 ARM_COMPUTE_ERROR_ON(input.data_layout() != DataLayout::NHWC && gemm_3d_depth > 1);
1001
1002 TensorShape output_shape = input.tensor_shape();
1003 if(gemm_3d_depth > 1)
1004 {
Georgios Pinitas932491f2018-09-21 16:33:15 +01001005 if(batch_size_on_z)
1006 {
1007 output_shape.shift_right(1);
1008 }
Georgios Pinitas041f36d2018-09-18 18:38:37 +01001009 output_shape.set(0, input.tensor_shape().x());
1010 output_shape.set(1, input.tensor_shape().y() / gemm_3d_depth);
1011 output_shape.set(2, gemm_3d_depth);
1012 }
1013
1014 return output_shape;
1015}
1016
Michalis Spyroud33fe342019-01-04 17:10:25 +00001017/** Calculate the strided slice output shape of a tensor
1018 *
1019 * @param[in] input Input tensor info
1020 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
1021 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
1022 * @param[in] strides The strides of the dimensions of the input tensor to be sliced
1023 * @param[in] begin_mask If the ith bit of begin_mask is set, starts[i] is ignored and the fullest possible range in that dimension is used instead.
1024 * @param[in] end_mask If the ith bit of end_mask is set, ends[i] is ignored and the fullest possible range in that dimension is used instead.
1025 * @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1
1026 *
1027 * @return the calculated shape
1028 */
Georgios Pinitas77589b52018-08-21 14:41:35 +01001029inline TensorShape compute_strided_slice_shape(const ITensorInfo &input,
1030 const Coordinates &starts, const Coordinates &ends, const Coordinates &strides,
1031 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
1032{
1033 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001034 return compute_strided_slice_output_shape(input.tensor_shape(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
1035}
Georgios Pinitas77589b52018-08-21 14:41:35 +01001036
Michalis Spyroud33fe342019-01-04 17:10:25 +00001037/** Calculate the slice output shape of a tensor
1038 *
1039 * @param[in] input_shape Input tensor info
1040 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
1041 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
1042 *
1043 * @return the calculated shape
1044 */
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001045inline TensorShape compute_slice_shape(const TensorShape &input_shape, const Coordinates &starts, const Coordinates &ends)
1046{
1047 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitas77589b52018-08-21 14:41:35 +01001048
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001049 return compute_strided_slice_output_shape(input_shape,
1050 starts, ends, BiStrides(),
1051 0, construct_slice_end_mask(ends), 0);
Georgios Pinitas77589b52018-08-21 14:41:35 +01001052}
Georgios Pinitase1a352c2018-09-03 12:42:19 +01001053
Michalis Spyroud33fe342019-01-04 17:10:25 +00001054/** Calculate the batch to space output shape of a tensor
1055 *
1056 * @param[in] input Input tensor info
1057 * @param[in] block_x Block shape x value
1058 * @param[in] block_y Block shape y value
1059 *
1060 * @return the calculated shape
1061 */
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001062inline TensorShape compute_batch_to_space_shape(const ITensorInfo *input, const int block_x, const int block_y)
1063{
1064 ARM_COMPUTE_ERROR_ON(block_x <= 0 || block_y <= 0);
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001065
1066 const DataLayout data_layout = input->data_layout();
1067 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1068 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001069 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001070
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001071 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001072 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x);
1073 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y);
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001074 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y));
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001075
1076 return output_shape;
1077}
Georgios Pinitas77589b52018-08-21 14:41:35 +01001078
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001079/** Calculate the depth to space output shape of a tensor
1080 *
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001081 * @param[in] input_shape Input tensor shape
1082 * @param[in] data_layout Operation data layout
1083 * @param[in] block Block shape value
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001084 *
1085 * @return the calculated shape
1086 */
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001087inline TensorShape compute_depth_to_space_shape(const TensorShape &input_shape, DataLayout data_layout, int block)
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001088{
1089 ARM_COMPUTE_ERROR_ON(block < 2);
1090
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001091 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1092 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1093 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001094
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001095 TensorShape output_shape{ input_shape };
1096 output_shape.set(idx_width, input_shape[idx_width] * block);
1097 output_shape.set(idx_height, input_shape[idx_height] * block);
1098 output_shape.set(idx_channel, input_shape[idx_channel] / (block * block));
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001099
1100 return output_shape;
1101}
1102
Michalis Spyroud33fe342019-01-04 17:10:25 +00001103/** Calculate the split output shape of a tensor
1104 *
1105 * @param[in] input Input tensor info
1106 * @param[in] axis Axis on which to split the input
1107 * @param[in] num_splits Number of splits
1108 *
1109 * @return the calculated shape
1110 */
Georgios Pinitase1a352c2018-09-03 12:42:19 +01001111inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int axis, unsigned int num_splits)
1112{
1113 TensorShape empty_shape;
1114 empty_shape.set(0, 0);
1115
1116 TensorShape out_shape{ input->tensor_shape() };
1117
1118 // Return empty shape if axis is invalid
1119 if(axis > input->tensor_shape().num_dimensions())
1120 {
1121 return empty_shape;
1122 }
1123
1124 size_t axis_size = out_shape[axis];
1125
1126 // Return empty shape if num_split is not valid
1127 if(axis_size % num_splits)
1128 {
1129 return empty_shape;
1130 }
1131
1132 out_shape[axis] = axis_size / num_splits;
1133 return out_shape;
1134}
1135
Michalis Spyroud33fe342019-01-04 17:10:25 +00001136/** Calculate the space to batch output shape of a tensor
1137 *
1138 * @param[in] input Input tensor info
1139 * @param[in] block_x Block shape x value
1140 * @param[in] block_y Block shape y value
1141 * @param[in] padding_left Left padding values
1142 * @param[in] padding_right Right padding values
1143 *
1144 * @return the calculated shape
1145 */
Michalis Spyrou16934a52018-08-21 18:03:58 +01001146inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const int block_x, const int block_y, const Size2D &padding_left, const Size2D &padding_right)
1147{
1148 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001149
1150 const DataLayout data_layout = input->data_layout();
1151 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1152 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1153 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
1154
SiCong Li18bdfae2020-11-08 21:58:01 +00001155 ARM_COMPUTE_ERROR_ON((input->tensor_shape()[idx_width] + padding_left.x() + padding_right.x()) % block_x != 0);
1156 ARM_COMPUTE_ERROR_ON((input->tensor_shape()[idx_height] + padding_left.y() + padding_right.y()) % block_y != 0);
1157
1158 output_shape.set(idx_width, (input->tensor_shape()[idx_width] + padding_left.x() + padding_right.x()) / block_x);
1159 output_shape.set(idx_height, (input->tensor_shape()[idx_height] + padding_left.y() + padding_right.y()) / block_y);
1160 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] * block_x * block_y);
Michalis Spyrou16934a52018-08-21 18:03:58 +01001161
1162 return output_shape;
1163}
Pablo Tello32521432018-11-15 14:43:10 +00001164
Manuel Bottini5b7d5372019-05-17 14:04:22 +01001165/** Calculate the space to batch output shape of a tensor
1166 *
1167 * @param[in] input Input tensor info
1168 * @param[in] block_shape Block shape value
1169 *
1170 * @return the calculated shape
1171 */
1172inline TensorShape compute_space_to_depth_shape(const ITensorInfo *input, int32_t block_shape)
1173{
1174 TensorShape output_shape{ input->tensor_shape() };
1175
1176 const DataLayout data_layout = input->data_layout();
1177 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1178 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1179 const int idx_depth = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
1180
1181 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_shape);
1182 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_shape);
1183 output_shape.set(idx_depth, input->tensor_shape()[idx_depth] / (block_shape * block_shape));
1184
1185 return output_shape;
1186}
1187
Michalis Spyroud33fe342019-01-04 17:10:25 +00001188/** Calculate the prior box output shape of a tensor
1189 *
1190 * @param[in] input Input tensor info
1191 * @param[in] info PriorBoxLayer info
1192 *
1193 * @return the calculated shape
1194 */
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001195inline TensorShape compute_prior_box_shape(const ITensorInfo &input, const PriorBoxLayerInfo &info)
1196{
1197 DataLayout data_layout = input.data_layout();
1198 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1199 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Pablo Tello32521432018-11-15 14:43:10 +00001200 const int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001201
1202 TensorShape output_shape{};
1203 output_shape.set(0, input.dimension(idx_w) * input.dimension(idx_h) * num_priors * 4);
1204 output_shape.set(1, 2);
1205
1206 return output_shape;
1207}
Michalis Spyrou16934a52018-08-21 18:03:58 +01001208
Michalis Spyroud33fe342019-01-04 17:10:25 +00001209/** Calculate the padded shape of a tensor
1210 *
1211 * @param[in] input_shape Input tensor shape
1212 * @param[in] padding Paddings list
1213 *
1214 * @return the calculated shape
1215 */
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001216inline TensorShape compute_padded_shape(const TensorShape &input_shape, const PaddingList &padding)
1217{
1218 TensorShape padded_shape = input_shape;
1219 for(size_t dim = 0; dim < padding.size(); ++dim)
1220 {
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +00001221 const auto &padding_pair = padding[dim];
1222 const uint32_t shape_on_index = (padded_shape.num_dimensions() <= dim) ? 1 : input_shape[dim];
1223 padded_shape.set(dim, padding_pair.first + shape_on_index + padding_pair.second);
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001224 }
1225 return padded_shape;
1226}
1227
Michalis Spyroud33fe342019-01-04 17:10:25 +00001228/** Calculate the tiled shape of a tensor
1229 *
1230 * @param[in] input_shape Input tensor shape
1231 * @param[in] multiples Paddings list
1232 *
1233 * @return the calculated shape
1234 */
giuros013175fcf2018-11-21 09:59:17 +00001235inline TensorShape compute_tiled_shape(const TensorShape &input_shape, const Multiples &multiples)
1236{
1237 TensorShape tiled_shape = input_shape;
1238 for(size_t dim = 0; dim < multiples.size(); ++dim)
1239 {
1240 tiled_shape.set(dim, input_shape[dim] * multiples[dim]);
1241 }
1242 return tiled_shape;
1243}
1244
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001245/** Calculate the reduced shape of a tensor given an axis
1246 *
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001247 * @param[in] input Input tensor info
1248 * @param[in] axis Axis on which to perform reduction
1249 * @param[in] keep_dims (Optional) Whether to keep the dimension after reduction operation. Defaults to true.
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001250 *
1251 * @return the calculated shape
1252 */
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001253inline TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims = true)
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001254{
1255 TensorShape output_shape{ input };
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001256
1257 if(!keep_dims)
1258 {
1259 output_shape.remove_dimension(axis);
1260 }
1261 else
1262 {
1263 output_shape.set(axis, 1);
1264 }
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001265
1266 return output_shape;
1267}
1268
Michalis Spyroud33fe342019-01-04 17:10:25 +00001269/** Calculate the upsampled shape of a tensor
1270 *
1271 * @param[in] input Input tensor info
1272 * @param[in] info Contains stride information (x and y)
1273 *
1274 * @return the calculated shape
1275 */
Michalis Spyrouceb889e2018-09-17 18:24:41 +01001276inline TensorShape compute_upsample_shape(const ITensorInfo &input, const Size2D &info)
1277{
1278 const DataLayout data_layout = input.data_layout();
1279 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1280 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1281
1282 TensorShape scale_out_shape(input.tensor_shape());
1283 const unsigned int out_x = input.dimension(idx_width) * info.x();
1284 const unsigned int out_y = input.dimension(idx_height) * info.y();
1285 scale_out_shape.set(idx_width, out_x);
1286 scale_out_shape.set(idx_height, out_y);
1287
1288 return scale_out_shape;
1289}
1290
Michalis Spyroud33fe342019-01-04 17:10:25 +00001291/** Get the tensor shape
1292 *
1293 * @param[in] data Input data
1294 *
1295 * @return the extracted tensor shape
1296 */
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001297template <typename T>
Georgios Pinitase2220552018-07-20 13:23:44 +01001298inline TensorShape extract_shape(T *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001299{
Georgios Pinitase2220552018-07-20 13:23:44 +01001300 return data->info()->tensor_shape();
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001301}
1302
John Kesapidescafec8f2019-02-19 15:53:59 +00001303inline TensorShape extract_shape(ITensorInfo *data)
John Kesapides917959c2019-02-04 12:37:29 +00001304{
1305 return data->tensor_shape();
1306}
John Kesapidescafec8f2019-02-19 15:53:59 +00001307inline TensorShape extract_shape(const ITensorInfo *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001308{
Georgios Pinitase2220552018-07-20 13:23:44 +01001309 return data->tensor_shape();
1310}
1311
1312inline TensorShape extract_shape(const TensorShape *data)
1313{
1314 return *data;
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001315}
1316
Michalis Spyroua9c44722019-04-05 17:18:36 +01001317inline TensorShape extract_shape(TensorShape *data)
1318{
1319 return *data;
1320}
1321
Michalis Spyroud33fe342019-01-04 17:10:25 +00001322/** Calculate the unstack shape of a tensor
1323 *
1324 * @param[in] input_shape Input tensor shape
1325 * @param[in] axis Axis on which to perform the unstack operation
1326 *
1327 * @return the calculated shape
1328 */
Pablo Tello54303692018-11-22 16:14:36 +00001329inline TensorShape calculate_unstack_shape(TensorShape input_shape, unsigned int axis)
1330{
1331 ARM_COMPUTE_ERROR_ON(axis > input_shape.num_dimensions());
1332 input_shape.remove_dimension(axis);
1333 return input_shape;
1334}
1335
Pablo Tello3dd5b682019-03-04 14:14:02 +00001336/** Calculate the concatenate output shape of the concatenate operation along a single axis
Michalis Spyroud33fe342019-01-04 17:10:25 +00001337 *
Pablo Tello3dd5b682019-03-04 14:14:02 +00001338 * @param[in] input Vector containing the shapes of the inputs
1339 * @param[in] axis Axis along which to concatenate the input tensors
Michalis Spyroud33fe342019-01-04 17:10:25 +00001340 *
1341 * @return the calculated shape
1342 */
Georgios Pinitase29acf12018-07-16 14:40:09 +01001343template <typename T>
Pablo Tello3dd5b682019-03-04 14:14:02 +00001344inline TensorShape calculate_concatenate_shape(const std::vector<T *> &input, size_t axis)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001345{
Pablo Tello3dd5b682019-03-04 14:14:02 +00001346 TensorShape out_shape = extract_shape(input[0]);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001347
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001348#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001349 // All dimensions must match except the axis one
1350 for(unsigned int i = 0; i < MAX_DIMS; ++i)
1351 {
1352 if(i == axis)
1353 {
1354 continue;
1355 }
1356
1357 for(const auto &tensor : input)
1358 {
1359 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
1360 const TensorShape shape = extract_shape(tensor);
1361 ARM_COMPUTE_ERROR_ON(out_shape[i] != shape[i]);
1362 }
1363 }
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001364#endif // defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001365
1366 // Calculate output shape
Pablo Tello3dd5b682019-03-04 14:14:02 +00001367 size_t new_size = 0;
1368 for(const auto &tensor : input)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001369 {
Georgios Pinitase2220552018-07-20 13:23:44 +01001370 const TensorShape shape = extract_shape(tensor);
Pablo Tello3dd5b682019-03-04 14:14:02 +00001371 new_size += shape[axis];
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001372 }
1373
Pablo Tello3dd5b682019-03-04 14:14:02 +00001374 out_shape.set(axis, new_size);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001375
1376 return out_shape;
1377}
Michalis Spyroud33fe342019-01-04 17:10:25 +00001378/** Calculate the stack output shape of a tensor
1379 *
1380 * @param[in] a Input tensor info
1381 * @param[in] axis Axis on which to perform the stack operation
1382 * @param[in] num_tensors Number of tensors to stack
1383 *
1384 * @return the calculated shape
1385 */
Gian Marco Iodice8aa985e2018-11-27 15:58:08 +00001386inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis, unsigned int num_tensors)
1387{
1388 ARM_COMPUTE_ERROR_ON(axis > a.num_dimensions());
1389 ARM_COMPUTE_ERROR_ON(a.num_dimensions() > 4);
1390
1391 TensorShape shape_out{ a.tensor_shape() };
1392 shape_out.set(axis, num_tensors);
1393
1394 unsigned int i_shift = 0;
1395
1396 for(unsigned int i = 0; i < a.num_dimensions(); ++i)
1397 {
1398 if(i == axis)
1399 {
1400 i_shift++;
1401 }
1402
1403 shape_out.set(i + i_shift, a.tensor_shape()[i]);
1404 }
1405 return shape_out;
1406}
Manuel Bottini8529bd62018-11-21 11:53:04 +00001407
1408inline TensorShape compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis)
1409{
1410 ARM_COMPUTE_ERROR_ON(indices_shape.num_dimensions() > 1);
1411 ARM_COMPUTE_ERROR_ON(input_shape.num_dimensions() > 4);
1412 ARM_COMPUTE_ERROR_ON(actual_axis >= input_shape.num_dimensions());
1413
1414 TensorShape output_shape = input_shape;
1415 output_shape[actual_axis] = indices_shape[0];
1416
1417 return output_shape;
1418}
Georgios Pinitas358ca202017-12-07 16:47:52 +00001419} // namespace shape_calculator
1420} // namespace misc
1421} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +00001422#endif /* ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H */