blob: 2a4aa4d7db3834b30100e1519247806001d06147 [file] [log] [blame]
Georgios Pinitas358ca202017-12-07 16:47:52 +00001/*
Viet-Hoa Do37c989a2023-02-24 15:52:21 +00002 * Copyright (c) 2017-2023 Arm Limited.
Georgios Pinitas358ca202017-12-07 16:47:52 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000024#ifndef ACL_ARM_COMPUTE_CORE_UTILS_MISC_SHAPECALCULATOR
25#define ACL_ARM_COMPUTE_CORE_UTILS_MISC_SHAPECALCULATOR
Georgios Pinitas358ca202017-12-07 16:47:52 +000026
Matthew Benthamf1aeab92023-05-30 13:35:34 +000027#include "arm_compute/core/ConvolutionInfo.h"
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +000028#include "arm_compute/core/Helpers.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000029#include "arm_compute/core/ITensorInfo.h"
Gian Marco Iodice7026b302019-06-26 17:18:11 +010030#include "arm_compute/core/KernelDescriptors.h"
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000031#include "arm_compute/core/Utils.h"
Adnan AlSinane4563a02021-09-01 15:32:03 +010032#include "arm_compute/runtime/FunctionDescriptors.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000033
Georgios Pinitas77589b52018-08-21 14:41:35 +010034#include "arm_compute/core/utils/helpers/tensor_transform.h"
35
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000036#include <cmath>
37
Georgios Pinitas358ca202017-12-07 16:47:52 +000038namespace arm_compute
39{
40namespace misc
41{
42namespace shape_calculator
43{
Pablo Telloa0a4ba12019-12-11 13:04:34 +000044/** Calculate the output tensor shape for the reduce mean operation
45 *
46 * @param[in] input Input tensor shape
47 * @param[in] reduction_axis Reduction axis
48 * @param[in] keep_dims Flag to indicate if dimensions are kept
49 *
50 * @return the calculated shape
51 */
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010052inline TensorShape calculate_reduce_mean_shape(ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims)
Pablo Telloa0a4ba12019-12-11 13:04:34 +000053{
54 const int reduction_ops = reduction_axis.num_dimensions();
55 Coordinates axis_local = reduction_axis;
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010056 const int input_dims = input->num_dimensions();
Pablo Telloa0a4ba12019-12-11 13:04:34 +000057 convert_negative_axis(axis_local, input_dims);
Manuel Bottinic58f0ad2020-08-07 16:49:15 +010058 TensorShape out_shape = input->tensor_shape();
Pablo Telloa0a4ba12019-12-11 13:04:34 +000059 // Configure reshape layer if we want to drop the dimensions
60 if(!keep_dims)
61 {
62 // We have to sort the reduction axis vectors in order for remove_dimension
63 // to work properly
64 std::sort(axis_local.begin(), axis_local.begin() + reduction_ops);
65 for(int i = 0; i < reduction_ops; ++i)
66 {
67 out_shape.remove_dimension(axis_local[i] - i);
68 }
69 return out_shape;
70 }
71 else
72 {
73 for(int i = 0; i < reduction_ops; ++i)
74 {
75 out_shape.set(axis_local[i], 1);
76 }
77 return out_shape;
78 }
79}
Michalis Spyroud33fe342019-01-04 17:10:25 +000080/** Calculate the output tensor shape of a vector input given the convolution dimensions
81 *
82 * @param[in] input Input tensor shape
83 * @param[in] conv_w Convolution width
84 * @param[in] conv_h Convolution height
85 * @param[in] data_layout Data layout
86 *
87 * @return the calculated shape
88 */
Abe Mbise7784c832018-05-31 16:48:41 +010089inline TensorShape compute_vector_to_tensor_output_shape(const TensorShape &input, size_t conv_w, size_t conv_h, const DataLayout &data_layout)
90{
91 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
92 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
93 const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
94
95 TensorShape output_shape(input);
96 output_shape.set(idx_w, conv_w);
97 output_shape.set(idx_h, conv_h);
98 output_shape.set(idx_c, input.x() / (conv_w * conv_h));
99
100 return output_shape;
101}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100102
Michalis Spyroud33fe342019-01-04 17:10:25 +0000103/** Calculate the permuted shape of an input given a permutation vector
104 *
105 * @param[in] input Input tensor info
106 * @param[in] perm Permutation vector
107 *
108 * @return the calculated shape
109 */
Pablo Tello00afd112018-01-04 10:34:24 +0000110inline TensorShape compute_permutation_output_shape(const ITensorInfo &input, const PermutationVector &perm)
111{
112 TensorShape output_shape = input.tensor_shape();
113 permute(output_shape, perm);
114 return output_shape;
115}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100116
Michalis Spyroud33fe342019-01-04 17:10:25 +0000117/** Calculate the output shape of the reorg layer given a stride
118 *
119 * @param[in] input Input tensor info
120 * @param[in] stride Stride
121 *
122 * @return the calculated shape
123 */
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100124inline TensorShape compute_reorg_output_shape(const ITensorInfo &input, int32_t stride)
125{
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100126 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
127 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
128 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100129
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100130 ARM_COMPUTE_ERROR_ON(stride <= 0);
131 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_width] % stride != 0), "The width of the input tensor must be a multiple of stride");
132 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_height] % stride != 0), "The height of the input tensor must be a multiple of stride");
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100133
134 TensorShape output_shape{ input.tensor_shape() };
Gian Marco Iodice477531c2018-08-21 17:53:38 +0100135
136 output_shape.set(idx_width, output_shape[idx_width] / stride);
137 output_shape.set(idx_height, output_shape[idx_height] / stride);
138 output_shape.set(idx_channel, output_shape[idx_channel] * stride * stride);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100139
140 return output_shape;
141}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100142
Michalis Spyroud33fe342019-01-04 17:10:25 +0000143/** Calculate the reshaped shape of the weights
144 *
145 * @param[in] weights Weights tensor info
146 * @param[in] has_bias (Optional) Set to true if there is bias
147 * @param[in] num_groups (Optional) Number of groups
148 *
149 * @return the calculated shape of the reshaped weights
150 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100151inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000152{
Giorgio Arena088c2b02018-08-07 16:59:05 +0100153 // Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100154 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100155 ARM_COMPUTE_ERROR_ON(weights.data_layout() == DataLayout::NHWC && num_groups > 1);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100156 ARM_COMPUTE_ERROR_ON((weights.dimension(3) % num_groups) != 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100157
Georgios Pinitas78c00902018-01-09 17:33:11 +0000158 // Calculate output shape
159 TensorShape weights_reshaped{ weights.tensor_shape() };
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100160 weights_reshaped.set(3, weights_reshaped[3] / num_groups);
161
Georgios Pinitas78c00902018-01-09 17:33:11 +0000162 weights_reshaped.collapse(3);
163 const size_t tmp_dim = weights_reshaped[0];
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100164 weights_reshaped.set(0, weights_reshaped[1]);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000165 weights_reshaped.set(1, tmp_dim + (has_bias ? 1 : 0));
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100166 if(weights.num_dimensions() < 5)
167 {
168 weights_reshaped.set(2, num_groups);
169 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000170
171 return weights_reshaped;
172}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100173
Michalis Spyroud33fe342019-01-04 17:10:25 +0000174/** Calculate the Left Hand Side matrix reshaped shape
175 *
176 * @param[in] a Input tensor info
177 * @param[in] lhs_info Left Hand Side matrix information
178 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
179 *
180 * @return the calculated shape
181 */
Gian Marco Iodice5ba5e092018-12-06 17:13:09 +0000182inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false)
183{
184 ARM_COMPUTE_ERROR_ON(lhs_info.m0 == 0);
185 ARM_COMPUTE_ERROR_ON(lhs_info.k0 == 0);
186 ARM_COMPUTE_ERROR_ON(lhs_info.v0 == 0);
187
188 // Input width/height
189 const unsigned int input_width = a.dimension(0);
190 const unsigned int input_height = reinterpret_input_as_3d ? a.dimension(1) * a.dimension(2) : a.dimension(1);
191
192 // Number of horizontal/vertical blocks in the input tensor
193 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(lhs_info.k0));
194 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(lhs_info.m0));
195
196 // Block size
197 const unsigned int block_size = lhs_info.m0 * lhs_info.k0;
198
199 // Output width/height
200 const unsigned int output_width = block_size * num_horiz_blocks * lhs_info.v0;
201 const unsigned int output_height = std::ceil(num_vert_blocks / static_cast<float>(lhs_info.v0));
202
203 TensorShape lhs_shape{ a.tensor_shape() };
204 lhs_shape.set(0, output_width);
205 lhs_shape.set(1, output_height);
206
207 if((reinterpret_input_as_3d) && (lhs_shape.num_dimensions() > 2))
208 {
209 // When the data format is NHWC and the shapes are Nx1x1
210 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
211 // To avoid failures by removing a dimension that doesn't exist
212 // check if the number of dimensions is greater than 2.
213 lhs_shape.remove_dimension(2);
214 }
215
216 return lhs_shape;
217}
218
Michalis Spyroud33fe342019-01-04 17:10:25 +0000219/** Calculate the Right Hand Side matrix reshaped shape
220 *
221 * @param[in] a Input tensor info
222 * @param[in] rhs_info Right Hand Side matrix information
223 *
224 * @return the calculated shape
225 */
Gian Marco Iodice3b0a2652018-12-07 11:18:09 +0000226inline TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRHSMatrixInfo &rhs_info)
227{
228 ARM_COMPUTE_ERROR_ON(rhs_info.n0 == 0);
229 ARM_COMPUTE_ERROR_ON(rhs_info.k0 == 0);
230 ARM_COMPUTE_ERROR_ON(rhs_info.h0 == 0);
231
232 // Input width/height
233 const unsigned int input_width = a.dimension(0);
234 const unsigned int input_height = a.dimension(1);
235
236 // Number of horizontal/vertical blocks in the input tensor
237 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(rhs_info.n0));
238 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(rhs_info.k0));
239
240 // Block size
241 const unsigned int block_size = rhs_info.n0 * rhs_info.k0;
242
243 // Output width/height
244 const unsigned int output_width = block_size * num_vert_blocks * rhs_info.h0;
245 const unsigned int output_height = std::ceil(num_horiz_blocks / static_cast<float>(rhs_info.h0));
246
247 TensorShape rhs_shape{ a.tensor_shape() };
248 rhs_shape.set(0, output_width);
249 rhs_shape.set(1, output_height);
250
251 return rhs_shape;
252}
253
Michalis Spyroud33fe342019-01-04 17:10:25 +0000254/** Calculate the interleaved shape of an input tensor
255 *
256 * @param[in] a Input tensor info
257 * @param[in] mult_interleave4x4_height (Optional) Interleave4x4 height
258 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
259 *
260 * @return the calculated shape
261 */
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100262inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000263{
Gian Marco36a0a462018-01-12 10:21:40 +0000264 // The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
265 ARM_COMPUTE_ERROR_ON(mult_interleave4x4_height < 1);
266 const int interleave_width = 4 * mult_interleave4x4_height;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000267 TensorShape shape_interleaved_a{ a.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000268 shape_interleaved_a.set(0, a.dimension(0) * interleave_width);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100269 if(reinterpret_input_as_3d)
270 {
271 const int M = a.dimension(1) * a.dimension(2);
272 const int height = std::ceil(M / static_cast<float>(interleave_width));
273 shape_interleaved_a.set(1, height);
Isabella Gottardi089695f2018-10-17 18:04:15 +0100274
275 // When the data format is NHWC and the shapes are Nx1x1
276 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
277 // To avoid failures by removing a dimension that doesn't exist
278 // check if the number of dimensions is greater than 2.
279 if(shape_interleaved_a.num_dimensions() > 2)
280 {
281 shape_interleaved_a.remove_dimension(2);
282 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100283 }
284 else
285 {
286 shape_interleaved_a.set(1, std::ceil(a.dimension(1) / static_cast<float>(interleave_width)));
287 }
Georgios Pinitas358ca202017-12-07 16:47:52 +0000288
289 return shape_interleaved_a;
290}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100291
Michalis Spyroud33fe342019-01-04 17:10:25 +0000292/** Calculate the transposed 1xW shape
293 *
294 * @param[in] b Input tensor info
295 *
296 * @return the calculated shape
297 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000298inline TensorShape compute_transpose1xW_shape(const ITensorInfo &b)
299{
300 // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
301 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
302 shape_transposed1xW_b.set(0, b.dimension(1) * 16);
303 shape_transposed1xW_b.set(1, std::ceil(b.dimension(0) / 16.f));
304
305 return shape_transposed1xW_b;
306}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100307
Michalis Spyroud33fe342019-01-04 17:10:25 +0000308/** Calculate the transposed 1xW width element shape
309 *
310 * @param[in] b Input tensor info
311 * @param[in] mult_transpose1xW_width (Optional) Transpose1xW width
312 *
313 * @return the calculated shape
314 */
Gian Marco36a0a462018-01-12 10:21:40 +0000315inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInfo &b, int mult_transpose1xW_width = 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000316{
Gian Marco36a0a462018-01-12 10:21:40 +0000317 // Note: mult_transpose1xW_width expresses the number of chunks with size 1x(W) we want to store on the same row
318 // The transpose1xW output matrix will have the following shape:
319 // [ b_height * W, ceil(b_width / W) ] where W = (16 / element size of the tensor) * mult_transpose1xW_width
320 ARM_COMPUTE_ERROR_ON(mult_transpose1xW_width < 1);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000321 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000322 const size_t transpose_width = (16 / b.element_size()) * mult_transpose1xW_width;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000323 shape_transposed1xW_b.set(0, b.dimension(1) * transpose_width);
324 shape_transposed1xW_b.set(1, static_cast<size_t>(std::ceil(b.dimension(0) / static_cast<float>(transpose_width))));
325
326 return shape_transposed1xW_b;
327}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100328
Michalis Spyroud33fe342019-01-04 17:10:25 +0000329/** Calculate the reductionA shape used in GEMMLowp
330 *
331 * @param[in] b Input tensor info
332 *
333 * @return the calculated shape
334 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000335inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
336{
337 TensorShape shape_vector_sum_col{ b.tensor_shape() };
338 if(shape_vector_sum_col.num_dimensions() > 1)
339 {
340 shape_vector_sum_col.remove_dimension(1);
341 }
342
343 return shape_vector_sum_col;
344}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100345
Michalis Spyroud33fe342019-01-04 17:10:25 +0000346/** Calculate the reductionB shape used in GEMMLowp
347 *
348 * @param[in] a Input tensor info
349 *
350 * @return the calculated shape
351 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000352inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
353{
354 TensorShape shape_vector_sum_row{ a.tensor_shape() };
355 shape_vector_sum_row.set(Window::DimX, a.dimension(1));
Georgios Pinitas932491f2018-09-21 16:33:15 +0100356 if(shape_vector_sum_row.num_dimensions() > 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000357 {
358 shape_vector_sum_row.remove_dimension(1);
359 }
360
361 return shape_vector_sum_row;
362}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100363
Michalis Spyroud33fe342019-01-04 17:10:25 +0000364/** Calculate the Col2Im shape
365 *
366 * @param[in] input Input tensor info
367 * @param[in] convolved_dims Convolved dimensions
368 * @param[in] batch_size_on_z True if batch size is on z axis
369 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
370 *
371 * @return the calculated shape
372 */
Giorgio Arena226e4b92018-08-23 12:00:02 +0100373inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &convolved_dims, bool batch_size_on_z, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000374{
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100375 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arena226e4b92018-08-23 12:00:02 +0100376 ARM_COMPUTE_ERROR_ON(input.tensor_shape()[1] != (convolved_dims.area()));
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100377 ARM_COMPUTE_ERROR_ON((num_groups > 1) && input.tensor_shape()[2] != num_groups);
378
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100379 const DataLayout data_layout = input.data_layout();
380 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
381 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
382 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100383
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100384 TensorShape col2im_shape{ input.tensor_shape() };
385 // If batches start on 3rd dimension shift dimensions right by 1 to retain upper tensor shape,
386 // as first three will be override by H,W,C data
387 if(batch_size_on_z && num_groups == 1)
388 {
389 col2im_shape.shift_right(1);
390 }
391 col2im_shape.set(width_idx, convolved_dims.width);
392 col2im_shape.set(height_idx, convolved_dims.height);
393 col2im_shape.set(channel_idx, input.tensor_shape()[0] * num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000394
395 return col2im_shape;
396}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100397
Michalis Spyroud33fe342019-01-04 17:10:25 +0000398/** Calculate the transposed shape of a tensor
399 *
400 * @param[in] input Input tensor info
401 *
402 * @return the calculated shape
403 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000404inline TensorShape compute_transposed_shape(const ITensorInfo &input)
405{
406 TensorShape shape_transposed{ input.tensor_shape() };
407
Viet-Hoa Do545358e2023-05-25 12:01:50 +0100408 shape_transposed.set(0, input.dimension(1), false);
409 shape_transposed.set(1, input.dimension(0), false);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000410
411 return shape_transposed;
412}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100413
Michalis Spyroud33fe342019-01-04 17:10:25 +0000414/** Calculate the depthwise convolution output shape of a tensor
415 *
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100416 * @param[in] input Input tensor info
417 * @param[in] weights Weights tensor info
418 * @param[in] info Convolution info
Michalis Spyroud33fe342019-01-04 17:10:25 +0000419 *
420 * @return the calculated shape
421 */
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100422inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000423{
424 const TensorShape input_shape{ input.tensor_shape() };
425 const TensorShape weights_shape{ weights.tensor_shape() };
426
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000427 const DataLayout data_layout = input.data_layout();
428 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
429 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Giorgio Arena76572242018-04-04 17:44:26 +0100430 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000431
Usama Arife73686a2019-04-08 17:30:48 +0100432 const DataLayout weights_data_layout = weights.data_layout();
433 const int weights_width_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::WIDTH);
434 const int weights_height_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::HEIGHT);
giuros016d109962019-01-07 17:47:19 +0000435
Renato Arantes57132942023-04-24 07:19:59 +0000436 unsigned int output_width = 0;
437 unsigned int output_height = 0;
giuros016d109962019-01-07 17:47:19 +0000438 std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
Usama Arife73686a2019-04-08 17:30:48 +0100439 weights_shape[weights_width_idx], weights_shape[weights_height_idx],
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100440 info.pad_stride_info, info.dilation);
giuros016d109962019-01-07 17:47:19 +0000441
442 TensorShape output_shape{ input_shape };
443 output_shape.set(width_idx, output_width);
444 output_shape.set(height_idx, output_height);
Michalis Spyrou60c3b0e2021-04-08 12:02:58 +0100445 output_shape.set(channel_idx, input_shape[channel_idx] * info.depth_multiplier);
giuros016d109962019-01-07 17:47:19 +0000446
447 return output_shape;
448}
449
Michalis Spyroud33fe342019-01-04 17:10:25 +0000450/** Calculate the upsampled output shape used for deconvolution
451 *
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100452 * @param[in] input Input tensor info
453 * @param[in] weights Weights tensor shape
454 * @param[in] sx Stride on x axis
455 * @param[in] sy Stride on y axis
456 * @param[in] out_dims Output shape dimensions
457 * @param[in] padx Padding on x axis
458 * @param[in] pady Padding on y axis
Michalis Spyroud33fe342019-01-04 17:10:25 +0000459 *
460 * @return the calculated shape
461 */
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100462inline TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy,
Manuel Bottini6e10aa32020-04-30 13:28:23 +0100463 std::pair<unsigned int, unsigned int> &out_dims, uint32_t &padx, uint32_t &pady)
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000464{
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100465 const DataLayout data_layout = input.data_layout();
466 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
467 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
468
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100469 // Find the upsampled dimensions
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100470 unsigned int out_x = (input.dimension(idx_w) - 1) * sx + 1;
471 unsigned int out_y = (input.dimension(idx_h) - 1) * sy + 1;
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100472
473 // Find the padding needed for the convolution with stride 1 in order to match output shape
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100474 padx = out_dims.first - (out_x - weights.dimension(idx_w) + 1);
475 pady = out_dims.second - (out_y - weights.dimension(idx_h) + 1);
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100476 out_x += padx;
477 out_y += pady;
478
479 TensorShape scale_out_shape(input.tensor_shape());
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100480 scale_out_shape.set(idx_w, out_x);
481 scale_out_shape.set(idx_h, out_y);
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000482
483 return scale_out_shape;
484}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100485
Michalis Spyroud33fe342019-01-04 17:10:25 +0000486/** Calculate the output shape of the deconvolution layer
487 *
488 * @param[in] out_dims Output x and y shape dimensions
489 * @param[in] input Input tensor info
490 * @param[in] weights Weights tensor shape
491 *
492 * @return the calculated shape
493 */
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100494inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned int, unsigned int> &out_dims, const ITensorInfo &input, const ITensorInfo &weights)
495{
496 const TensorShape input_shape{ input.tensor_shape() };
497 const TensorShape weights_shape{ weights.tensor_shape() };
498
499 const DataLayout data_layout = input.data_layout();
500 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
501 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
502 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
503 const int batch_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
504
505 TensorShape out_shape{ input_shape };
506 out_shape.set(width_idx, out_dims.first);
507 out_shape.set(height_idx, out_dims.second);
508 out_shape.set(channel_idx, weights_shape[batch_idx]);
509 return out_shape;
510}
511
Michalis Spyroud33fe342019-01-04 17:10:25 +0000512/** Calculate the im2col output shape of a tensor
513 *
514 * @param[in] input Input tensor info
515 * @param[in] kernel_dims The kernel dimensions (width and height).
516 * @param[in] conv_info Contains padding and stride information
517 * @param[in] has_bias In case biases are provided expands the matrix with 1
518 * @param[in] dilation Dilation, in elements, across x and y
519 * @param[in] batch_size_on_z True if batch size is on z axis
520 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
Renato Arantes57132942023-04-24 07:19:59 +0000521 * @param[in] input_pad_right (Optional) When fast-math is selected, per element padding for the im2col matrix may be necessary
Michalis Spyroud33fe342019-01-04 17:10:25 +0000522 *
523 * @return the calculated shape
524 */
Giorgio Arena0f170392018-07-18 16:13:12 +0100525inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, bool batch_size_on_z,
Renato Arantes57132942023-04-24 07:19:59 +0000526 unsigned int num_groups = 1, unsigned int input_pad_right = 0)
Giorgio Arena156fcf32018-03-09 15:30:43 +0000527{
Giorgio Arena0f170392018-07-18 16:13:12 +0100528 // The output shape will be the 3D shape [ out_channels * kernel_area, num_elems_per_out_channel, batches ] if batch_size_on_z == true
529 // or the 4D shape [ out_channels * kernel_area / num_groups, num_elems_per_out_channel, num_groups, batches ] if batch_size_on_z == false
530
531 ARM_COMPUTE_ERROR_ON(num_groups == 0);
532 ARM_COMPUTE_ERROR_ON(num_groups > 1 && input->data_layout() != DataLayout::NCHW);
533 ARM_COMPUTE_ERROR_ON(num_groups > 1 && batch_size_on_z);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000534
535 TensorShape output_shape{ input->tensor_shape() };
536
537 const DataLayout data_layout = input->data_layout();
538 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
539 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
540 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
541
542 std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
Renato Arantes57132942023-04-24 07:19:59 +0000543 output_shape.set(0, ((output_shape[channel_idx] + input_pad_right) / num_groups * kernel_dims.area() + (has_bias ? 1 : 0))); // NOLINT
Giorgio Arenaf485a102018-04-20 16:06:21 +0100544 output_shape.set(1, (out_dims.first * out_dims.second));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100545 if(batch_size_on_z && output_shape.num_dimensions() >= 3)
546 {
547 output_shape.remove_dimension(2);
548 }
549 else
550 {
Giorgio Arena0f170392018-07-18 16:13:12 +0100551 output_shape.set(2, num_groups);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100552 }
Giorgio Arena156fcf32018-03-09 15:30:43 +0000553
554 return output_shape;
555}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100556
Michalis Spyroud33fe342019-01-04 17:10:25 +0000557/** Calculate the flattened output shape of a tensor
558 *
559 * @param[in] input Input tensor info
560 *
561 * @return the calculated shape
562 */
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100563inline TensorShape compute_flatten_shape(const ITensorInfo *input)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000564{
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100565 // The output shape will be the flatten version of the input (i.e. [ width * height * channels, num_batches, ... ] ). Used for FlattenLayer and FullyConnectedLayer.
566
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000567 TensorShape output_shape{ input->tensor_shape() };
568
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100569 output_shape.collapse(3);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000570
571 return output_shape;
572}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100573
Michalis Spyroud33fe342019-01-04 17:10:25 +0000574/** Calculate the softmax output shape of a tensor
575 *
576 * @param[in] input Input tensor info
577 * @param[in] axis (Optional) Softmax axis
578 *
579 * @return the calculated shape
580 */
giuros01efbf6c82018-09-03 09:53:53 +0100581inline TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis = 1)
582{
583 // The output shape will be a 2D version of the input. For instance:
584 // - [x,y,z] and axis 1 will return [x, y*z]
585 // - [x,y,z,w] and axis 2 will return [x*y, w*z]
586 // - [x,y,z,w] and axis 3 will return [x*y*z, w]
587 TensorShape shape2D = input->tensor_shape();
588
589 if(axis < input->num_dimensions())
590 {
591 // Collapse from axis onward (this changes the shape)
592 shape2D.collapse_from(axis);
593
594 // Collapse the rest (collapse is inclusive)
595 shape2D.collapse(shape2D.num_dimensions() - 1);
596 }
597 else
598 {
599 // Collapse everything
600 shape2D.collapse(shape2D.num_dimensions());
601 }
602
603 if(axis == 0)
604 {
605 // If axis is zero the first dim should be one. Since
606 // collapse is an inclusive operation we need to shift
607 shape2D.shift_right(1);
608 }
609
610 return shape2D;
611}
612
Michalis Spyroud33fe342019-01-04 17:10:25 +0000613/** Calculate the winograd filter transform shape
614 *
615 * @param[in] input Input tensor info
616 * @param[in] winograd_info Winograd information
617 *
618 * @return the calculated shape
619 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000620inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000621{
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000622 TensorShape tensor_shape{ input.tensor_shape() };
623
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000624 const Size2D kernel_size = winograd_info.kernel_size;
625 const Size2D output_tile_size = winograd_info.output_tile_size;
626 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
Giorgio Arena2d9de0a2018-03-15 17:58:20 +0000627
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000628 tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
629 tensor_shape.set(Window::DimX, input.dimension(3));
630 tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
631 tensor_shape.set(Window::DimZ, input_tile_size.area());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000632
633 return tensor_shape;
634}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100635
Michalis Spyroud33fe342019-01-04 17:10:25 +0000636/** Calculate the winograd input transform shape
637 *
638 * @param[in] input Input tensor info
639 * @param[in] winograd_info Winograd information
640 *
641 * @return the calculated shape
642 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000643inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000644{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000645 const PadStrideInfo conv_info = winograd_info.convolution_info;
646 const Size2D kernel_size = winograd_info.kernel_size;
647 const Size2D output_tile_size = winograd_info.output_tile_size;
648 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
649
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100650 const size_t idx_w = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
651 const size_t idx_h = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
652 const size_t idx_c = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000653
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100654 // Compute the number of output tiles along the x and y direction of size "output_tile_size"
655 const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input.tensor_shape()[idx_w], input.tensor_shape()[idx_h]),
656 kernel_size,
657 output_tile_size,
658 conv_info);
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100659
660 const unsigned int width = input.tensor_shape()[idx_c];
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100661 const unsigned int height = num_tiles.area();
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000662 const unsigned int depth = input_tile_size.area();
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000663
664 TensorShape output_shape{ input.tensor_shape() };
665 output_shape.set(0, width);
666 output_shape.set(1, height);
667 output_shape.set(2, depth);
668
669 return output_shape;
670}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100671
Michalis Spyroud33fe342019-01-04 17:10:25 +0000672/** Calculate the winograd output transform shape
673 *
674 * @param[in] input Input tensor info
675 * @param[in] winograd_info Winograd information
676 *
677 * @return the calculated shape
678 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000679inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000680{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000681 const PadStrideInfo conv_info = winograd_info.convolution_info;
682 const Size2D kernel_size = winograd_info.kernel_size;
683 const Size2D input_dimensions = winograd_info.input_dimensions;
684 const DataLayout data_layout = winograd_info.output_data_layout;
685
686 // Compute output shape
Renato Arantes57132942023-04-24 07:19:59 +0000687 unsigned int output_width = 0;
688 unsigned int output_height = 0;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000689 std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
690 kernel_size.width, kernel_size.height, conv_info);
691
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000692 TensorShape tensor_shape{ input.tensor_shape() };
693
694 // Output dimension
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000695 const unsigned int out_w = output_width;
696 const unsigned int out_h = output_height;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000697 const unsigned int out_c = input.dimension(0);
698
699 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w);
700 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT), out_h);
701 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL), out_c);
702
703 return tensor_shape;
704}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100705
Michalis Spyroud33fe342019-01-04 17:10:25 +0000706/** Calculate the deep convolution shape output shape of a tensor
707 *
SiCongLid9287352021-11-03 19:01:22 +0000708 * @param[in] input_shape Input tensor shape
709 * @param[in] input_data_layout Input data layout
710 * @param[in] weights_shape Weights tensor shape
711 * @param[in] conv_info Contains padding and stride information
Michalis Spyroud33fe342019-01-04 17:10:25 +0000712 *
713 * @return the calculated shape
714 */
SiCongLid9287352021-11-03 19:01:22 +0000715inline TensorShape compute_deep_convolution_shape(const TensorShape &input_shape, DataLayout input_data_layout, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000716{
SiCongLid9287352021-11-03 19:01:22 +0000717 const size_t idx_width = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::WIDTH);
718 const size_t idx_height = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::HEIGHT);
719 const size_t idx_channel = get_data_layout_dimension_index(input_data_layout, DataLayoutDimension::CHANNEL);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000720
Giorgio Arenac0f54432018-03-16 14:02:34 +0000721 const unsigned int input_width = input_shape[idx_width];
722 const unsigned int input_height = input_shape[idx_height];
723 const unsigned int weights_width = weights_shape[idx_width];
724 const unsigned int weights_height = weights_shape[idx_height];
725 const unsigned int weights_out_channel = weights_shape[3];
726 unsigned int output_width = 0;
727 unsigned int output_height = 0;
Renato Arantes57132942023-04-24 07:19:59 +0000728 std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000729
730 TensorShape output_shape{ input_shape };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000731 output_shape.set(idx_width, output_width);
732 output_shape.set(idx_height, output_height);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000733 output_shape.set(idx_channel, weights_out_channel);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000734
735 return output_shape;
736}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100737
SiCongLid9287352021-11-03 19:01:22 +0000738/** Calculate the deep convolution shape output shape of a tensor
739 *
740 * @param[in] input Input tensor info
741 * @param[in] weights Weights tensor info
742 * @param[in] conv_info Contains padding and stride information
743 *
744 * @return the calculated shape
745 */
746inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const PadStrideInfo &conv_info)
747{
748 return compute_deep_convolution_shape(input.tensor_shape(), input.data_layout(), weights.tensor_shape(), conv_info);
749}
750
Gian Marco Iodice5d016812022-11-17 11:03:39 +0000751/** Calculate the indirect buffer output shape used by the indirect convolution function
752 *
753 * @param[in] input_shape Input tensor shape
754 * @param[in] input_data_layout Input data layout
755 * @param[in] weights_shape Weights tensor shape
756 * @param[in] conv_info Contains padding and stride information
757 * @param[in] desc Contains the direct/indirect convolution compute arguments, such as the tiling dimensions
758 *
759 * @return the calculated shape
760 */
761inline TensorShape compute_indirect_buffer_shape(const TensorShape &input_shape, DataLayout input_data_layout, const TensorShape &weights_shape, const PadStrideInfo &conv_info,
762 const DirectConvComputeKernelInfo &desc)
763{
764 ARM_COMPUTE_ERROR_ON_MSG(input_data_layout != DataLayout::NHWC, "The data layout can only be NHWC");
765 ARM_COMPUTE_ERROR_ON_MSG(desc.m0 <= 0 || desc.m0 > 8, "M0 can only be greater than 0 and less than or equal to 8");
766
767 const unsigned int m0 = desc.m0;
768 const unsigned int kw = weights_shape[1];
769 const unsigned int kh = weights_shape[2];
770
771 TensorShape output_conv2d_shape = compute_deep_convolution_shape(input_shape, input_data_layout, weights_shape, conv_info);
772
773 const unsigned int output_w = m0 * kw * kh;
774 const unsigned int output_h = DIV_CEIL(output_conv2d_shape[1] * output_conv2d_shape[2], m0);
775 const unsigned int output_b = output_conv2d_shape[3];
776
777 return TensorShape(output_w, output_h, output_b);
778}
779
Michalis Spyroud33fe342019-01-04 17:10:25 +0000780/** Calculate the min/max shape output shape of a tensor
781 *
782 * @param[in] input Input tensor info
783 *
784 * @return the calculated shape
785 */
Alex Gilday60954c62018-03-05 16:22:48 +0000786inline TensorShape compute_min_max_shape(const ITensorInfo *input)
787{
788 TensorShape output_shape{ input->tensor_shape() };
789 output_shape.set(Window::DimX, 2);
790 output_shape.remove_dimension(1);
791 output_shape.remove_dimension(1);
792
793 return output_shape;
794}
795
Michalis Spyroud33fe342019-01-04 17:10:25 +0000796/** Calculate the output pool shape of a tensor
797 *
798 * @param[in] input Input tensor info
799 * @param[in] pool_info Pooling layer info
800 *
801 * @return the calculated shape
802 */
Michalis Spyroue74b2012018-04-18 09:49:16 +0100803inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
804{
Freddie Liardetafcbb8f2021-05-04 12:41:16 +0100805 int pooled_w = 0;
806 int pooled_h = 0;
Michalis Spyroue74b2012018-04-18 09:49:16 +0100807
Giorgio Arena3c520c52018-05-01 11:47:24 +0100808 TensorShape output_shape{ input.tensor_shape() };
Michalis Spyroue74b2012018-04-18 09:49:16 +0100809
Freddie Liardetafcbb8f2021-05-04 12:41:16 +0100810 const bool is_global_pooling = pool_info.is_global_pooling;
811 const int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
812 const int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
813 const int input_width = input.tensor_shape()[idx_width];
814 const int input_height = input.tensor_shape()[idx_height];
815 const int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size.width;
816 const int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size.height;
Giorgio Arena3c520c52018-05-01 11:47:24 +0100817
Freddie Liardetafcbb8f2021-05-04 12:41:16 +0100818 std::tie(pooled_w, pooled_h) = scaled_dimensions_signed(input_width, input_height,
819 pool_size_x, pool_size_y,
820 pool_info.pad_stride_info);
Michalis Spyroue74b2012018-04-18 09:49:16 +0100821
Freddie Liardetafcbb8f2021-05-04 12:41:16 +0100822 ARM_COMPUTE_ERROR_ON_MSG((pooled_w < 1 || pooled_h < 1), "Calculated output dimension size is invalid");
823
824 output_shape.set(idx_width, static_cast<size_t>(pooled_w));
825 output_shape.set(idx_height, static_cast<size_t>(pooled_h));
Michalis Spyroue74b2012018-04-18 09:49:16 +0100826
827 return output_shape;
828}
829
morgolock37722d92020-04-09 14:17:48 +0100830/** Calculate the output unpool shape of a tensor
831 *
832 * @param[in] input Input tensor info
833 * @param[in] pool_info Pooling layer info
834 *
835 * @return the calculated shape
836 */
837inline TensorShape compute_unpool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
838{
839 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
840 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
841 const TensorShape input_shape = input.tensor_shape();
842 ARM_COMPUTE_ERROR_ON(input_shape[idx_height] <= 1 || input_shape[idx_width] <= 1);
843 const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
844 const unsigned int stride_x = pad_stride_info.stride().first;
845 const unsigned int stride_y = pad_stride_info.stride().second;
846
847 const int pad_left = pad_stride_info.pad_left();
848 const int pad_top = pad_stride_info.pad_top();
849 const int pad_right = pad_stride_info.pad_right();
850 const int pad_bottom = pad_stride_info.pad_bottom();
851
852 TensorShape output_shape = input_shape;
853 const unsigned int out_width = (input_shape[idx_width] - 1) * stride_x - pad_left - pad_right + pool_info.pool_size.width;
854 const unsigned int out_height = (input_shape[idx_height] - 1) * stride_y - pad_top - pad_bottom + pool_info.pool_size.height;
855
856 output_shape.set(idx_width, out_width);
857 output_shape.set(idx_height, out_height);
858 return output_shape;
859}
860
George Wort44b4e972019-01-08 11:41:54 +0000861/** Calculate the output roi align shape of a tensor
862 *
863 * @param[in] input Input tensor info
864 * @param[in] rois Rois tensor info
865 * @param[in] pool_info Pooling layer info
866 *
867 * @return the calculated shape
868 */
869inline TensorShape compute_roi_align_shape(const ITensorInfo &input, const ITensorInfo &rois, ROIPoolingLayerInfo pool_info)
870{
871 TensorShape output_shape{ input.tensor_shape() };
872
873 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
874 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
875
876 output_shape.set(idx_width, pool_info.pooled_width());
877 output_shape.set(idx_height, pool_info.pooled_height());
878 output_shape.set(3, rois.dimension(1));
879
880 return output_shape;
881}
882
Michalis Spyroud33fe342019-01-04 17:10:25 +0000883/** Calculate the RNN shape of a tensor
884 *
885 * @param[in] input Input tensor info
886 * @param[in] batch_size Batch size
887 *
888 * @return the calculated shape
889 */
Michalis Spyrou36a559e2018-03-20 10:30:58 +0000890inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
891{
892 TensorShape output_shape{ input->tensor_shape() };
893 output_shape.set(1, batch_size);
894
895 return output_shape;
896}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100897
Michalis Spyroud33fe342019-01-04 17:10:25 +0000898/** Calculate the matrix multiplication output shape of two tensors
899 *
900 * @param[in] input0 First input tensor info
901 * @param[in] input1 Second input tensor info
902 * @param[in] is_interleaved_transposed True if the input is interleaved transposed
903 * @param[in] reshape_info GEMM reshape info
904 *
905 * @return the calculated shape
906 */
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100907inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
908{
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000909 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100910 ARM_COMPUTE_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The first input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100911
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100912 const bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000913 const bool reinterpret_output_as_3d = reshape_info.depth_output_gemm3d() != 0;
914 const int depth_output_gemm3d = reinterpret_output_as_3d ? reshape_info.depth_output_gemm3d() : 1;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100915 const int m = reshape_info.reinterpret_input_as_3d() ? input0.dimension(1) * input0.dimension(2) : input0.dimension(1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000916
917 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
918 // dimension of the output tensor
919 const int dim0 = is_interleaved_transposed ? reshape_info.n() : input1.dimension(0);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000920 const int dim1 = is_interleaved_transposed ? reshape_info.m() / depth_output_gemm3d : m / depth_output_gemm3d;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100921 const int dim2 = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
922 const int dim3 = reinterpret_input_as_3d ? 1 : input0.tensor_shape()[3];
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000923
924 TensorShape output_shape{ input0.tensor_shape() };
925
926 output_shape.set(0, dim0);
927 output_shape.set(1, dim1);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000928 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : dim2);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100929 output_shape.set(3, reinterpret_output_as_3d ? dim2 : dim3);
930 output_shape.set(4, reinterpret_output_as_3d ? dim3 : 1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000931
932 return output_shape;
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100933}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100934
Michalis Spyroud33fe342019-01-04 17:10:25 +0000935/** Calculate the matrix multiplication output shape of two tensors
936 *
937 * @param[in] input0 First input tensor info
938 * @param[in] input1 Second input tensor info
939 * @param[in] gemm_info GEMM reshape info
940 *
941 * @return the calculated shape
942 */
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000943inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMReshapeInfo &gemm_info)
944{
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100945 ARM_COMPUTE_UNUSED(input1);
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000946 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
947
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000948 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000949 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d() != 0;
950 const int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d() : 1;
951
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000952 TensorShape output_shape{ input0.tensor_shape() };
953
Vidhya Sudhan Loganathanae1a89e2019-05-03 09:13:55 +0100954 if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
955 {
956 output_shape.set(0, gemm_info.n());
957 output_shape.set(1, gemm_info.m());
958 }
959 else
960 {
961 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
962 // dimension of the output tensor
963 const int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
964 output_shape.set(0, gemm_info.n());
965 output_shape.set(1, gemm_info.m() / depth_output_gemm3d);
966 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
967 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
968 }
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000969
970 return output_shape;
971}
972
Michalis Spyroud33fe342019-01-04 17:10:25 +0000973/** Calculate the matrix multiplication output shape of two tensors
974 *
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100975 * @param[in] input0 First input tensor info
976 * @param[in] input1 Second input tensor info
977 * @param[in] gemm_info GEMM kernel info used to retrieve the original dimensions of the input matrices
978 *
979 * @return the calculated shape
980 */
981inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMKernelInfo &gemm_info)
982{
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100983 ARM_COMPUTE_UNUSED(input1);
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100984 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
985
986 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
987 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
988 const unsigned int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d : 1;
989
990 TensorShape output_shape{ input0.tensor_shape() };
991
992 if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
993 {
994 output_shape.set(0, gemm_info.n);
995 output_shape.set(1, gemm_info.m);
996 }
997 else
998 {
999 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
1000 // dimension of the output tensor
1001 const unsigned int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
1002 output_shape.set(0, gemm_info.n);
1003 output_shape.set(1, gemm_info.m / depth_output_gemm3d);
1004 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
1005 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
1006 }
1007
1008 return output_shape;
1009}
1010
1011/** Calculate the matrix multiplication output shape of two tensors
1012 *
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +00001013 * @param[in] input0 First input tensor info
1014 * @param[in] input1 Second input tensor info
1015 * @param[in] matmul_info Batch MatMul Kernel info to know which matrix is transposed
1016 *
1017 * @return the calculated shape
1018 */
Gunes Bayir8918b232023-03-17 13:52:21 +00001019inline TensorShape compute_matmul_shape(const TensorShape &input0, const TensorShape &input1, const MatMulKernelInfo &matmul_info)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +00001020{
1021 TensorShape output_shape{ input0 };
1022
1023 if(matmul_info.adj_lhs)
1024 {
1025 output_shape.set(1, input0[0]); // The vertical (M) dimension
1026 }
1027
1028 if(matmul_info.adj_rhs)
1029 {
1030 output_shape.set(0, input1[1]); // The horizontal (N) dimension
1031 }
1032 else
1033 {
1034 output_shape.set(0, input1[0]); // The horizontal (N) dimension
1035 }
1036
1037 return output_shape;
1038}
1039/** Calculate the matrix multiplication output shape of two tensors
1040 *
Michalis Spyroud33fe342019-01-04 17:10:25 +00001041 * @param[in] input Input tensor info
1042 * @param[in] gemm_3d_depth (Optional) GEMM 3d depth
1043 * @param[in] batch_size_on_z (Optional) True if batch size is on z axis
1044 *
1045 * @return the calculated shape
1046 */
Georgios Pinitas932491f2018-09-21 16:33:15 +01001047inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1, bool batch_size_on_z = false)
Georgios Pinitas041f36d2018-09-18 18:38:37 +01001048{
1049 ARM_COMPUTE_ERROR_ON(input.data_layout() != DataLayout::NHWC && gemm_3d_depth > 1);
1050
1051 TensorShape output_shape = input.tensor_shape();
1052 if(gemm_3d_depth > 1)
1053 {
Georgios Pinitas932491f2018-09-21 16:33:15 +01001054 if(batch_size_on_z)
1055 {
1056 output_shape.shift_right(1);
1057 }
Georgios Pinitas041f36d2018-09-18 18:38:37 +01001058 output_shape.set(0, input.tensor_shape().x());
1059 output_shape.set(1, input.tensor_shape().y() / gemm_3d_depth);
1060 output_shape.set(2, gemm_3d_depth);
1061 }
1062
1063 return output_shape;
1064}
1065
Michalis Spyroud33fe342019-01-04 17:10:25 +00001066/** Calculate the strided slice output shape of a tensor
1067 *
1068 * @param[in] input Input tensor info
1069 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
1070 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
1071 * @param[in] strides The strides of the dimensions of the input tensor to be sliced
1072 * @param[in] begin_mask If the ith bit of begin_mask is set, starts[i] is ignored and the fullest possible range in that dimension is used instead.
1073 * @param[in] end_mask If the ith bit of end_mask is set, ends[i] is ignored and the fullest possible range in that dimension is used instead.
1074 * @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1
1075 *
1076 * @return the calculated shape
1077 */
Georgios Pinitas77589b52018-08-21 14:41:35 +01001078inline TensorShape compute_strided_slice_shape(const ITensorInfo &input,
1079 const Coordinates &starts, const Coordinates &ends, const Coordinates &strides,
1080 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
1081{
1082 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001083 return compute_strided_slice_output_shape(input.tensor_shape(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
1084}
Georgios Pinitas77589b52018-08-21 14:41:35 +01001085
Michalis Spyroud33fe342019-01-04 17:10:25 +00001086/** Calculate the slice output shape of a tensor
1087 *
1088 * @param[in] input_shape Input tensor info
1089 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
1090 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
1091 *
1092 * @return the calculated shape
1093 */
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001094inline TensorShape compute_slice_shape(const TensorShape &input_shape, const Coordinates &starts, const Coordinates &ends)
1095{
1096 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitas77589b52018-08-21 14:41:35 +01001097
Georgios Pinitasb4af2c62018-12-10 18:45:35 +00001098 return compute_strided_slice_output_shape(input_shape,
1099 starts, ends, BiStrides(),
1100 0, construct_slice_end_mask(ends), 0);
Georgios Pinitas77589b52018-08-21 14:41:35 +01001101}
Georgios Pinitase1a352c2018-09-03 12:42:19 +01001102
Michalis Spyroud33fe342019-01-04 17:10:25 +00001103/** Calculate the batch to space output shape of a tensor
1104 *
SiCong Li5a7d1572023-03-21 12:00:15 +00001105 * @param[in] data_layout Data layout
1106 * @param[in] input Input tensor shape
1107 * @param[in] block_x Block shape x value
1108 * @param[in] block_y Block shape y value
1109 * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed
Michalis Spyroud33fe342019-01-04 17:10:25 +00001110 *
1111 * @return the calculated shape
1112 */
SiCong Li5a7d1572023-03-21 12:00:15 +00001113inline TensorShape compute_batch_to_space_shape(DataLayout data_layout, const TensorShape &input, int block_x, int block_y, const CropInfo &crop_info = CropInfo{})
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001114{
SiCong Li5a7d1572023-03-21 12:00:15 +00001115 ARM_COMPUTE_ERROR_ON(block_x < 1 || block_y < 1);
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001116
SiCong Li5a7d1572023-03-21 12:00:15 +00001117 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1118 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1119 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001120
SiCong Li5a7d1572023-03-21 12:00:15 +00001121 TensorShape output_shape{ input };
SiCong Li4ceb4532023-03-13 15:02:23 +00001122
SiCong Li5a7d1572023-03-21 12:00:15 +00001123 unsigned int new_width = input[idx_width] * static_cast<unsigned int>(block_x);
1124 unsigned int new_height = input[idx_height] * static_cast<unsigned int>(block_y);
1125 const unsigned int width_crop = crop_info.left + crop_info.right;
1126 const unsigned int height_crop = crop_info.top + crop_info.bottom;
SiCong Li4ceb4532023-03-13 15:02:23 +00001127 ARM_COMPUTE_ERROR_ON(new_width <= width_crop);
1128 ARM_COMPUTE_ERROR_ON(new_height <= height_crop);
1129 new_width -= width_crop;
1130 new_height -= height_crop;
1131
1132 output_shape.set(idx_width, new_width);
1133 output_shape.set(idx_height, new_height);
SiCong Li5a7d1572023-03-21 12:00:15 +00001134 output_shape.set(idx_batch, input[idx_batch] / (block_x * block_y));
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001135
1136 return output_shape;
1137}
Georgios Pinitas77589b52018-08-21 14:41:35 +01001138
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001139/** Calculate the depth to space output shape of a tensor
1140 *
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001141 * @param[in] input_shape Input tensor shape
1142 * @param[in] data_layout Operation data layout
1143 * @param[in] block Block shape value
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001144 *
1145 * @return the calculated shape
1146 */
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001147inline TensorShape compute_depth_to_space_shape(const TensorShape &input_shape, DataLayout data_layout, int block)
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001148{
1149 ARM_COMPUTE_ERROR_ON(block < 2);
1150
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001151 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1152 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1153 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001154
Georgios Pinitas8a14b2c2020-09-04 20:20:56 +01001155 TensorShape output_shape{ input_shape };
1156 output_shape.set(idx_width, input_shape[idx_width] * block);
1157 output_shape.set(idx_height, input_shape[idx_height] * block);
1158 output_shape.set(idx_channel, input_shape[idx_channel] / (block * block));
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001159
1160 return output_shape;
1161}
1162
Michalis Spyroud33fe342019-01-04 17:10:25 +00001163/** Calculate the split output shape of a tensor
1164 *
1165 * @param[in] input Input tensor info
1166 * @param[in] axis Axis on which to split the input
1167 * @param[in] num_splits Number of splits
1168 *
1169 * @return the calculated shape
1170 */
Georgios Pinitase1a352c2018-09-03 12:42:19 +01001171inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int axis, unsigned int num_splits)
1172{
1173 TensorShape empty_shape;
1174 empty_shape.set(0, 0);
1175
1176 TensorShape out_shape{ input->tensor_shape() };
1177
1178 // Return empty shape if axis is invalid
1179 if(axis > input->tensor_shape().num_dimensions())
1180 {
1181 return empty_shape;
1182 }
1183
1184 size_t axis_size = out_shape[axis];
1185
1186 // Return empty shape if num_split is not valid
1187 if(axis_size % num_splits)
1188 {
1189 return empty_shape;
1190 }
1191
1192 out_shape[axis] = axis_size / num_splits;
1193 return out_shape;
1194}
1195
Michalis Spyroud33fe342019-01-04 17:10:25 +00001196/** Calculate the space to batch output shape of a tensor
1197 *
1198 * @param[in] input Input tensor info
1199 * @param[in] block_x Block shape x value
1200 * @param[in] block_y Block shape y value
1201 * @param[in] padding_left Left padding values
1202 * @param[in] padding_right Right padding values
1203 *
1204 * @return the calculated shape
1205 */
SiCong Li8893e452023-03-23 12:06:45 +00001206inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, int block_x, int block_y, const Size2D &padding_left, const Size2D &padding_right)
Michalis Spyrou16934a52018-08-21 18:03:58 +01001207{
1208 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001209
1210 const DataLayout data_layout = input->data_layout();
1211 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1212 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1213 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
1214
SiCong Li18bdfae2020-11-08 21:58:01 +00001215 ARM_COMPUTE_ERROR_ON((input->tensor_shape()[idx_width] + padding_left.x() + padding_right.x()) % block_x != 0);
1216 ARM_COMPUTE_ERROR_ON((input->tensor_shape()[idx_height] + padding_left.y() + padding_right.y()) % block_y != 0);
1217
1218 output_shape.set(idx_width, (input->tensor_shape()[idx_width] + padding_left.x() + padding_right.x()) / block_x);
1219 output_shape.set(idx_height, (input->tensor_shape()[idx_height] + padding_left.y() + padding_right.y()) / block_y);
1220 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] * block_x * block_y);
Michalis Spyrou16934a52018-08-21 18:03:58 +01001221
1222 return output_shape;
1223}
Pablo Tello32521432018-11-15 14:43:10 +00001224
Manuel Bottini5b7d5372019-05-17 14:04:22 +01001225/** Calculate the space to batch output shape of a tensor
1226 *
1227 * @param[in] input Input tensor info
1228 * @param[in] block_shape Block shape value
1229 *
1230 * @return the calculated shape
1231 */
1232inline TensorShape compute_space_to_depth_shape(const ITensorInfo *input, int32_t block_shape)
1233{
1234 TensorShape output_shape{ input->tensor_shape() };
1235
1236 const DataLayout data_layout = input->data_layout();
1237 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1238 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1239 const int idx_depth = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
1240
Ramy Elgammalca1a52d2022-11-18 16:03:21 +00001241 output_shape.set(idx_width, input->tensor_shape()[idx_width] / block_shape);
1242 output_shape.set(idx_height, input->tensor_shape()[idx_height] / block_shape);
1243 output_shape.set(idx_depth, input->tensor_shape()[idx_depth] * (block_shape * block_shape));
Manuel Bottini5b7d5372019-05-17 14:04:22 +01001244
1245 return output_shape;
1246}
1247
Michalis Spyroud33fe342019-01-04 17:10:25 +00001248/** Calculate the prior box output shape of a tensor
1249 *
1250 * @param[in] input Input tensor info
1251 * @param[in] info PriorBoxLayer info
1252 *
1253 * @return the calculated shape
1254 */
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001255inline TensorShape compute_prior_box_shape(const ITensorInfo &input, const PriorBoxLayerInfo &info)
1256{
1257 DataLayout data_layout = input.data_layout();
1258 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1259 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Pablo Tello32521432018-11-15 14:43:10 +00001260 const int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001261
1262 TensorShape output_shape{};
1263 output_shape.set(0, input.dimension(idx_w) * input.dimension(idx_h) * num_priors * 4);
1264 output_shape.set(1, 2);
1265
1266 return output_shape;
1267}
Michalis Spyrou16934a52018-08-21 18:03:58 +01001268
Michalis Spyroud33fe342019-01-04 17:10:25 +00001269/** Calculate the padded shape of a tensor
1270 *
1271 * @param[in] input_shape Input tensor shape
1272 * @param[in] padding Paddings list
1273 *
1274 * @return the calculated shape
1275 */
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001276inline TensorShape compute_padded_shape(const TensorShape &input_shape, const PaddingList &padding)
1277{
1278 TensorShape padded_shape = input_shape;
1279 for(size_t dim = 0; dim < padding.size(); ++dim)
1280 {
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +00001281 const auto &padding_pair = padding[dim];
1282 const uint32_t shape_on_index = (padded_shape.num_dimensions() <= dim) ? 1 : input_shape[dim];
1283 padded_shape.set(dim, padding_pair.first + shape_on_index + padding_pair.second);
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001284 }
1285 return padded_shape;
1286}
1287
Michalis Spyroud33fe342019-01-04 17:10:25 +00001288/** Calculate the tiled shape of a tensor
1289 *
1290 * @param[in] input_shape Input tensor shape
1291 * @param[in] multiples Paddings list
1292 *
1293 * @return the calculated shape
1294 */
giuros013175fcf2018-11-21 09:59:17 +00001295inline TensorShape compute_tiled_shape(const TensorShape &input_shape, const Multiples &multiples)
1296{
1297 TensorShape tiled_shape = input_shape;
1298 for(size_t dim = 0; dim < multiples.size(); ++dim)
1299 {
1300 tiled_shape.set(dim, input_shape[dim] * multiples[dim]);
1301 }
1302 return tiled_shape;
1303}
1304
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001305/** Calculate the reduced shape of a tensor given an axis
1306 *
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001307 * @param[in] input Input tensor info
1308 * @param[in] axis Axis on which to perform reduction
1309 * @param[in] keep_dims (Optional) Whether to keep the dimension after reduction operation. Defaults to true.
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001310 *
1311 * @return the calculated shape
1312 */
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001313inline TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims = true)
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001314{
1315 TensorShape output_shape{ input };
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001316
1317 if(!keep_dims)
1318 {
1319 output_shape.remove_dimension(axis);
1320 }
1321 else
1322 {
1323 output_shape.set(axis, 1);
1324 }
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001325
1326 return output_shape;
1327}
1328
Michalis Spyroud33fe342019-01-04 17:10:25 +00001329/** Calculate the upsampled shape of a tensor
1330 *
1331 * @param[in] input Input tensor info
1332 * @param[in] info Contains stride information (x and y)
1333 *
1334 * @return the calculated shape
1335 */
Michalis Spyrouceb889e2018-09-17 18:24:41 +01001336inline TensorShape compute_upsample_shape(const ITensorInfo &input, const Size2D &info)
1337{
1338 const DataLayout data_layout = input.data_layout();
1339 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1340 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1341
1342 TensorShape scale_out_shape(input.tensor_shape());
1343 const unsigned int out_x = input.dimension(idx_width) * info.x();
1344 const unsigned int out_y = input.dimension(idx_height) * info.y();
1345 scale_out_shape.set(idx_width, out_x);
1346 scale_out_shape.set(idx_height, out_y);
1347
1348 return scale_out_shape;
1349}
1350
Michalis Spyroud33fe342019-01-04 17:10:25 +00001351/** Get the tensor shape
1352 *
1353 * @param[in] data Input data
1354 *
1355 * @return the extracted tensor shape
1356 */
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001357template <typename T>
Georgios Pinitase2220552018-07-20 13:23:44 +01001358inline TensorShape extract_shape(T *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001359{
Georgios Pinitase2220552018-07-20 13:23:44 +01001360 return data->info()->tensor_shape();
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001361}
1362
John Kesapidescafec8f2019-02-19 15:53:59 +00001363inline TensorShape extract_shape(ITensorInfo *data)
John Kesapides917959c2019-02-04 12:37:29 +00001364{
1365 return data->tensor_shape();
1366}
John Kesapidescafec8f2019-02-19 15:53:59 +00001367inline TensorShape extract_shape(const ITensorInfo *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001368{
Georgios Pinitase2220552018-07-20 13:23:44 +01001369 return data->tensor_shape();
1370}
1371
1372inline TensorShape extract_shape(const TensorShape *data)
1373{
1374 return *data;
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001375}
1376
Michalis Spyroua9c44722019-04-05 17:18:36 +01001377inline TensorShape extract_shape(TensorShape *data)
1378{
1379 return *data;
1380}
1381
Michalis Spyroud33fe342019-01-04 17:10:25 +00001382/** Calculate the unstack shape of a tensor
1383 *
1384 * @param[in] input_shape Input tensor shape
1385 * @param[in] axis Axis on which to perform the unstack operation
1386 *
1387 * @return the calculated shape
1388 */
Pablo Tello54303692018-11-22 16:14:36 +00001389inline TensorShape calculate_unstack_shape(TensorShape input_shape, unsigned int axis)
1390{
1391 ARM_COMPUTE_ERROR_ON(axis > input_shape.num_dimensions());
1392 input_shape.remove_dimension(axis);
1393 return input_shape;
1394}
1395
Pablo Tello3dd5b682019-03-04 14:14:02 +00001396/** Calculate the concatenate output shape of the concatenate operation along a single axis
Michalis Spyroud33fe342019-01-04 17:10:25 +00001397 *
Pablo Tello3dd5b682019-03-04 14:14:02 +00001398 * @param[in] input Vector containing the shapes of the inputs
1399 * @param[in] axis Axis along which to concatenate the input tensors
Michalis Spyroud33fe342019-01-04 17:10:25 +00001400 *
1401 * @return the calculated shape
1402 */
Georgios Pinitase29acf12018-07-16 14:40:09 +01001403template <typename T>
Pablo Tello3dd5b682019-03-04 14:14:02 +00001404inline TensorShape calculate_concatenate_shape(const std::vector<T *> &input, size_t axis)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001405{
Pablo Tello3dd5b682019-03-04 14:14:02 +00001406 TensorShape out_shape = extract_shape(input[0]);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001407
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001408#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001409 // All dimensions must match except the axis one
1410 for(unsigned int i = 0; i < MAX_DIMS; ++i)
1411 {
1412 if(i == axis)
1413 {
1414 continue;
1415 }
1416
1417 for(const auto &tensor : input)
1418 {
1419 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
1420 const TensorShape shape = extract_shape(tensor);
1421 ARM_COMPUTE_ERROR_ON(out_shape[i] != shape[i]);
1422 }
1423 }
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001424#endif // defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001425
1426 // Calculate output shape
Pablo Tello3dd5b682019-03-04 14:14:02 +00001427 size_t new_size = 0;
1428 for(const auto &tensor : input)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001429 {
Georgios Pinitase2220552018-07-20 13:23:44 +01001430 const TensorShape shape = extract_shape(tensor);
Pablo Tello3dd5b682019-03-04 14:14:02 +00001431 new_size += shape[axis];
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001432 }
1433
Pablo Tello3dd5b682019-03-04 14:14:02 +00001434 out_shape.set(axis, new_size);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001435
1436 return out_shape;
1437}
Michalis Spyroud33fe342019-01-04 17:10:25 +00001438/** Calculate the stack output shape of a tensor
1439 *
1440 * @param[in] a Input tensor info
1441 * @param[in] axis Axis on which to perform the stack operation
1442 * @param[in] num_tensors Number of tensors to stack
1443 *
1444 * @return the calculated shape
1445 */
Gian Marco Iodice8aa985e2018-11-27 15:58:08 +00001446inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis, unsigned int num_tensors)
1447{
1448 ARM_COMPUTE_ERROR_ON(axis > a.num_dimensions());
1449 ARM_COMPUTE_ERROR_ON(a.num_dimensions() > 4);
1450
1451 TensorShape shape_out{ a.tensor_shape() };
1452 shape_out.set(axis, num_tensors);
1453
1454 unsigned int i_shift = 0;
1455
1456 for(unsigned int i = 0; i < a.num_dimensions(); ++i)
1457 {
1458 if(i == axis)
1459 {
1460 i_shift++;
1461 }
1462
1463 shape_out.set(i + i_shift, a.tensor_shape()[i]);
1464 }
1465 return shape_out;
1466}
Manuel Bottini8529bd62018-11-21 11:53:04 +00001467
Adnan AlSinane4563a02021-09-01 15:32:03 +01001468/** Calculate the output shape of 3d Convolution
1469 *
1470 * @param[in] src Input tensor shape
1471 * @param[in] weights Weights tensor shape
1472 * @param[in] conv3d_info 3d Convolution Parameters object
1473 *
1474 * @return the calculated shape
1475 */
1476inline TensorShape compute_conv3d_shape(const TensorShape &src, const TensorShape &weights, const Conv3dInfo &conv3d_info)
1477{
1478 // Weight tensor shape indices (D H W Cin Cout)
1479 constexpr unsigned int weights_depth_dim = 4u;
1480 constexpr unsigned int weights_height_dim = 3u;
1481 constexpr unsigned int weights_width_dim = 2u;
1482 constexpr unsigned int weights_CHout_dim = 0u;
1483
1484 // Source/Destination Tensor shape indices (N D H W C)
1485 constexpr unsigned int batch_dim = 4u;
1486 constexpr unsigned int depth_dim = 3u;
1487 constexpr unsigned int height_dim = 2u;
1488 constexpr unsigned int width_dim = 1u;
1489 constexpr unsigned int channel_dim = 0u;
1490
1491 TensorShape output_shape{ src };
1492 const size_t pad_left = conv3d_info.padding.left;
1493 const size_t pad_right = conv3d_info.padding.right;
1494 const size_t pad_top = conv3d_info.padding.top;
1495 const size_t pad_bottom = conv3d_info.padding.bottom;
1496 const size_t pad_front = conv3d_info.padding.front;
1497 const size_t pad_back = conv3d_info.padding.back;
1498 const size_t dilation_x = conv3d_info.dilation.width;
1499 const size_t dilation_y = conv3d_info.dilation.height;
1500 const size_t dilation_z = conv3d_info.dilation.depth;
1501 const size_t stride_x = conv3d_info.stride.x();
1502 const size_t stride_y = conv3d_info.stride.y();
1503 const size_t stride_z = conv3d_info.stride.z();
1504
1505 int output_width_size = 0;
1506 int output_height_size = 0;
1507 int output_depth_size = 0;
1508
1509 switch(conv3d_info.round_type)
1510 {
1511 case DimensionRoundingType::FLOOR:
1512 output_width_size = static_cast<int>(std::floor((static_cast<float>(src[width_dim] + pad_left + pad_right - (dilation_x * (weights[weights_width_dim] - 1) + 1)) / stride_x) + 1));
1513 output_height_size = static_cast<int>(std::floor((static_cast<float>(src[height_dim] + pad_top + pad_bottom - (dilation_y * (weights[weights_height_dim] - 1) + 1)) / stride_y) + 1));
1514 output_depth_size = static_cast<int>(std::floor((static_cast<float>(src[depth_dim] + pad_front + pad_back - (dilation_z * (weights[weights_depth_dim] - 1) + 1)) / stride_z) + 1));
1515 break;
1516 case DimensionRoundingType::CEIL:
1517 output_width_size = static_cast<int>(std::ceil((static_cast<float>(src[width_dim] + pad_left + pad_right - (dilation_x * (weights[weights_width_dim] - 1) + 1)) / stride_x) + 1));
1518 output_height_size = static_cast<int>(std::ceil((static_cast<float>(src[height_dim] + pad_top + pad_bottom - (dilation_y * (weights[weights_height_dim] - 1) + 1)) / stride_y) + 1));
1519 output_depth_size = static_cast<int>(std::ceil((static_cast<float>(src[depth_dim] + pad_front + pad_back - (dilation_z * (weights[weights_depth_dim] - 1) + 1)) / stride_z) + 1));
1520 break;
1521 default:
1522 ARM_COMPUTE_ERROR("Unsupported rounding type");
1523 }
1524
1525 output_shape.set(batch_dim, src[batch_dim]);
1526 output_shape.set(width_dim, output_width_size);
1527 output_shape.set(height_dim, output_height_size);
1528 output_shape.set(depth_dim, output_depth_size);
1529 output_shape.set(channel_dim, weights[weights_CHout_dim]);
1530 return output_shape;
1531}
1532
Gunes Bayir918a9fb2022-02-15 11:40:13 +00001533/** Calculate the output pool3d shape of a tensor
1534 *
1535 * @param[in] src Input tensor info
1536 * @param[in] pool3d_info Pooling layer info
1537 *
1538 * @return the calculated shape
1539 */
ramelg0137515692022-02-26 22:06:20 +00001540inline TensorShape compute_pool3d_shape(const TensorShape &src, Pooling3dLayerInfo pool3d_info)
Gunes Bayir918a9fb2022-02-15 11:40:13 +00001541{
1542 TensorShape output_shape{ src };
1543
ramelg0137515692022-02-26 22:06:20 +00001544 const auto data_layout = DataLayout::NDHWC;
1545 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1546 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1547 const int idx_depth = get_data_layout_dimension_index(data_layout, DataLayoutDimension::DEPTH);
1548 const int pool_size_width = pool3d_info.is_global_pooling ? src[idx_width] : pool3d_info.pool_size.width;
1549 const int pool_size_height = pool3d_info.is_global_pooling ? src[idx_height] : pool3d_info.pool_size.height;
1550 const int pool_size_depth = pool3d_info.is_global_pooling ? src[idx_depth] : pool3d_info.pool_size.depth;
1551 int output_width = 0;
1552 int output_height = 0;
1553 int output_depth = 0;
Gunes Bayir918a9fb2022-02-15 11:40:13 +00001554
ramelg0137515692022-02-26 22:06:20 +00001555 std::tie(output_width, output_height, output_depth) = scaled_3d_dimensions_signed(src[idx_width], src[idx_height], src[idx_depth], pool_size_width, pool_size_height,
1556 pool_size_depth, pool3d_info);
Gunes Bayir918a9fb2022-02-15 11:40:13 +00001557
ramelg0137515692022-02-26 22:06:20 +00001558 ARM_COMPUTE_ERROR_ON_MSG((output_width < 1 || output_height < 1 || output_depth < 1), "Calculated output dimension size is invalid");
Gunes Bayir918a9fb2022-02-15 11:40:13 +00001559
ramelg0137515692022-02-26 22:06:20 +00001560 output_shape.set(idx_width, static_cast<size_t>(output_width));
1561 output_shape.set(idx_height, static_cast<size_t>(output_height));
1562 output_shape.set(idx_depth, static_cast<size_t>(output_depth));
Gunes Bayir918a9fb2022-02-15 11:40:13 +00001563
1564 return output_shape;
1565}
1566
Pablo Marquez Tello894659a2022-05-13 12:20:16 +01001567/** Calculate the gather output shape of a tensor
1568 *
1569 * @param[in] input_shape Input tensor shape
1570 * @param[in] indices_shape Indices tensor shape. Only supports for 2d and 3d indices
1571 * @param[in] actual_axis Axis to be used in the computation
1572 *
1573 * @note Let input_shape be (X,Y,Z) and indices shape (W,O,P) and axis 1
1574 * the new shape is computed by replacing the axis in the input shape with
1575 * the indice shape so the output shape will be (X,W,O,P,Z)
1576 *
1577 * @return the calculated shape
1578 */
Manuel Bottini8529bd62018-11-21 11:53:04 +00001579inline TensorShape compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis)
1580{
SiCong Li4ceb4532023-03-13 15:02:23 +00001581 const auto input_num_dims = input_shape.num_dimensions();
Viet-Hoa Do37c989a2023-02-24 15:52:21 +00001582 const auto indices_num_dims = indices_shape.num_dimensions();
1583
1584 ARM_COMPUTE_ERROR_ON(actual_axis >= input_num_dims);
1585 ARM_COMPUTE_ERROR_ON(input_num_dims + indices_num_dims - 1 > Coordinates::num_max_dimensions);
1586
1587 TensorShape output_shape;
SiCong Li4ceb4532023-03-13 15:02:23 +00001588 size_t dim_no = 0;
Viet-Hoa Do37c989a2023-02-24 15:52:21 +00001589
1590 for(; dim_no < actual_axis; ++dim_no)
Pablo Marquez Tello894659a2022-05-13 12:20:16 +01001591 {
Viet-Hoa Do37c989a2023-02-24 15:52:21 +00001592 output_shape.set(dim_no, input_shape[dim_no]);
Pablo Marquez Tello894659a2022-05-13 12:20:16 +01001593 }
Viet-Hoa Do37c989a2023-02-24 15:52:21 +00001594
1595 for(; dim_no < actual_axis + indices_num_dims; ++dim_no)
Pablo Marquez Tello894659a2022-05-13 12:20:16 +01001596 {
Viet-Hoa Do37c989a2023-02-24 15:52:21 +00001597 output_shape.set(dim_no, indices_shape[dim_no - actual_axis]);
Pablo Marquez Tello894659a2022-05-13 12:20:16 +01001598 }
Viet-Hoa Do37c989a2023-02-24 15:52:21 +00001599
1600 for(; dim_no < input_num_dims + indices_num_dims - 1; ++dim_no)
1601 {
1602 output_shape.set(dim_no, input_shape[dim_no + 1 - indices_num_dims]);
1603 }
1604
1605 ARM_COMPUTE_ERROR_ON(input_shape.total_size() * indices_shape.total_size() != output_shape.total_size() * input_shape[actual_axis]);
1606
Manuel Bottini8529bd62018-11-21 11:53:04 +00001607 return output_shape;
1608}
Georgios Pinitas358ca202017-12-07 16:47:52 +00001609} // namespace shape_calculator
1610} // namespace misc
1611} // namespace arm_compute
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +00001612#endif /* ACL_ARM_COMPUTE_CORE_UTILS_MISC_SHAPECALCULATOR */