blob: 66a42d9667f87002a5733de29aba0445e98040d4 [file] [log] [blame]
Georgios Pinitas358ca202017-12-07 16:47:52 +00001/*
Manuel Bottini8529bd62018-11-21 11:53:04 +00002 * Copyright (c) 2017-2019 ARM Limited.
Georgios Pinitas358ca202017-12-07 16:47:52 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H
25#define ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H
Georgios Pinitas358ca202017-12-07 16:47:52 +000026
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +000027#include "arm_compute/core/Helpers.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/ITensorInfo.h"
Gian Marco Iodice7026b302019-06-26 17:18:11 +010029#include "arm_compute/core/KernelDescriptors.h"
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000030#include "arm_compute/core/Utils.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000031
Georgios Pinitas77589b52018-08-21 14:41:35 +010032#include "arm_compute/core/utils/helpers/tensor_transform.h"
33
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000034#include <cmath>
35
Georgios Pinitas358ca202017-12-07 16:47:52 +000036namespace arm_compute
37{
38namespace misc
39{
40namespace shape_calculator
41{
Michalis Spyroud33fe342019-01-04 17:10:25 +000042/** Calculate the output tensor shape of a vector input given the convolution dimensions
43 *
44 * @param[in] input Input tensor shape
45 * @param[in] conv_w Convolution width
46 * @param[in] conv_h Convolution height
47 * @param[in] data_layout Data layout
48 *
49 * @return the calculated shape
50 */
Abe Mbise7784c832018-05-31 16:48:41 +010051inline TensorShape compute_vector_to_tensor_output_shape(const TensorShape &input, size_t conv_w, size_t conv_h, const DataLayout &data_layout)
52{
53 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
54 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
55 const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
56
57 TensorShape output_shape(input);
58 output_shape.set(idx_w, conv_w);
59 output_shape.set(idx_h, conv_h);
60 output_shape.set(idx_c, input.x() / (conv_w * conv_h));
61
62 return output_shape;
63}
Georgios Pinitase1a352c2018-09-03 12:42:19 +010064
Michalis Spyroud33fe342019-01-04 17:10:25 +000065/** Calculate the permuted shape of an input given a permutation vector
66 *
67 * @param[in] input Input tensor info
68 * @param[in] perm Permutation vector
69 *
70 * @return the calculated shape
71 */
Pablo Tello00afd112018-01-04 10:34:24 +000072inline TensorShape compute_permutation_output_shape(const ITensorInfo &input, const PermutationVector &perm)
73{
74 TensorShape output_shape = input.tensor_shape();
75 permute(output_shape, perm);
76 return output_shape;
77}
Georgios Pinitase1a352c2018-09-03 12:42:19 +010078
Michalis Spyroud33fe342019-01-04 17:10:25 +000079/** Calculate the output shape of the reorg layer given a stride
80 *
81 * @param[in] input Input tensor info
82 * @param[in] stride Stride
83 *
84 * @return the calculated shape
85 */
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010086inline TensorShape compute_reorg_output_shape(const ITensorInfo &input, int32_t stride)
87{
Gian Marco Iodice477531c2018-08-21 17:53:38 +010088 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
89 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
90 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010091
Gian Marco Iodice477531c2018-08-21 17:53:38 +010092 ARM_COMPUTE_ERROR_ON(stride <= 0);
93 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_width] % stride != 0), "The width of the input tensor must be a multiple of stride");
94 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_height] % stride != 0), "The height of the input tensor must be a multiple of stride");
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010095
96 TensorShape output_shape{ input.tensor_shape() };
Gian Marco Iodice477531c2018-08-21 17:53:38 +010097
98 output_shape.set(idx_width, output_shape[idx_width] / stride);
99 output_shape.set(idx_height, output_shape[idx_height] / stride);
100 output_shape.set(idx_channel, output_shape[idx_channel] * stride * stride);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100101
102 return output_shape;
103}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100104
Michalis Spyroud33fe342019-01-04 17:10:25 +0000105/** Calculate the reshaped shape of the weights
106 *
107 * @param[in] weights Weights tensor info
108 * @param[in] has_bias (Optional) Set to true if there is bias
109 * @param[in] num_groups (Optional) Number of groups
110 *
111 * @return the calculated shape of the reshaped weights
112 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100113inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000114{
Giorgio Arena088c2b02018-08-07 16:59:05 +0100115 // Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100116 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100117 ARM_COMPUTE_ERROR_ON(weights.data_layout() == DataLayout::NHWC && num_groups > 1);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100118 ARM_COMPUTE_ERROR_ON((weights.dimension(3) % num_groups) != 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100119
Georgios Pinitas78c00902018-01-09 17:33:11 +0000120 // Calculate output shape
121 TensorShape weights_reshaped{ weights.tensor_shape() };
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100122 weights_reshaped.set(3, weights_reshaped[3] / num_groups);
123
Georgios Pinitas78c00902018-01-09 17:33:11 +0000124 weights_reshaped.collapse(3);
125 const size_t tmp_dim = weights_reshaped[0];
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100126 weights_reshaped.set(0, weights_reshaped[1]);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000127 weights_reshaped.set(1, tmp_dim + (has_bias ? 1 : 0));
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100128 if(weights.num_dimensions() < 5)
129 {
130 weights_reshaped.set(2, num_groups);
131 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000132
133 return weights_reshaped;
134}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100135
Michalis Spyroud33fe342019-01-04 17:10:25 +0000136/** Calculate the Left Hand Side matrix reshaped shape
137 *
138 * @param[in] a Input tensor info
139 * @param[in] lhs_info Left Hand Side matrix information
140 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
141 *
142 * @return the calculated shape
143 */
Gian Marco Iodice5ba5e092018-12-06 17:13:09 +0000144inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false)
145{
146 ARM_COMPUTE_ERROR_ON(lhs_info.m0 == 0);
147 ARM_COMPUTE_ERROR_ON(lhs_info.k0 == 0);
148 ARM_COMPUTE_ERROR_ON(lhs_info.v0 == 0);
149
150 // Input width/height
151 const unsigned int input_width = a.dimension(0);
152 const unsigned int input_height = reinterpret_input_as_3d ? a.dimension(1) * a.dimension(2) : a.dimension(1);
153
154 // Number of horizontal/vertical blocks in the input tensor
155 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(lhs_info.k0));
156 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(lhs_info.m0));
157
158 // Block size
159 const unsigned int block_size = lhs_info.m0 * lhs_info.k0;
160
161 // Output width/height
162 const unsigned int output_width = block_size * num_horiz_blocks * lhs_info.v0;
163 const unsigned int output_height = std::ceil(num_vert_blocks / static_cast<float>(lhs_info.v0));
164
165 TensorShape lhs_shape{ a.tensor_shape() };
166 lhs_shape.set(0, output_width);
167 lhs_shape.set(1, output_height);
168
169 if((reinterpret_input_as_3d) && (lhs_shape.num_dimensions() > 2))
170 {
171 // When the data format is NHWC and the shapes are Nx1x1
172 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
173 // To avoid failures by removing a dimension that doesn't exist
174 // check if the number of dimensions is greater than 2.
175 lhs_shape.remove_dimension(2);
176 }
177
178 return lhs_shape;
179}
180
Michalis Spyroud33fe342019-01-04 17:10:25 +0000181/** Calculate the Right Hand Side matrix reshaped shape
182 *
183 * @param[in] a Input tensor info
184 * @param[in] rhs_info Right Hand Side matrix information
185 *
186 * @return the calculated shape
187 */
Gian Marco Iodice3b0a2652018-12-07 11:18:09 +0000188inline TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRHSMatrixInfo &rhs_info)
189{
190 ARM_COMPUTE_ERROR_ON(rhs_info.n0 == 0);
191 ARM_COMPUTE_ERROR_ON(rhs_info.k0 == 0);
192 ARM_COMPUTE_ERROR_ON(rhs_info.h0 == 0);
193
194 // Input width/height
195 const unsigned int input_width = a.dimension(0);
196 const unsigned int input_height = a.dimension(1);
197
198 // Number of horizontal/vertical blocks in the input tensor
199 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(rhs_info.n0));
200 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(rhs_info.k0));
201
202 // Block size
203 const unsigned int block_size = rhs_info.n0 * rhs_info.k0;
204
205 // Output width/height
206 const unsigned int output_width = block_size * num_vert_blocks * rhs_info.h0;
207 const unsigned int output_height = std::ceil(num_horiz_blocks / static_cast<float>(rhs_info.h0));
208
209 TensorShape rhs_shape{ a.tensor_shape() };
210 rhs_shape.set(0, output_width);
211 rhs_shape.set(1, output_height);
212
213 return rhs_shape;
214}
215
Michalis Spyroud33fe342019-01-04 17:10:25 +0000216/** Calculate the interleaved shape of an input tensor
217 *
218 * @param[in] a Input tensor info
219 * @param[in] mult_interleave4x4_height (Optional) Interleave4x4 height
220 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
221 *
222 * @return the calculated shape
223 */
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100224inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000225{
Gian Marco36a0a462018-01-12 10:21:40 +0000226 // The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
227 ARM_COMPUTE_ERROR_ON(mult_interleave4x4_height < 1);
228 const int interleave_width = 4 * mult_interleave4x4_height;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000229 TensorShape shape_interleaved_a{ a.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000230 shape_interleaved_a.set(0, a.dimension(0) * interleave_width);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100231 if(reinterpret_input_as_3d)
232 {
233 const int M = a.dimension(1) * a.dimension(2);
234 const int height = std::ceil(M / static_cast<float>(interleave_width));
235 shape_interleaved_a.set(1, height);
Isabella Gottardi089695f2018-10-17 18:04:15 +0100236
237 // When the data format is NHWC and the shapes are Nx1x1
238 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
239 // To avoid failures by removing a dimension that doesn't exist
240 // check if the number of dimensions is greater than 2.
241 if(shape_interleaved_a.num_dimensions() > 2)
242 {
243 shape_interleaved_a.remove_dimension(2);
244 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100245 }
246 else
247 {
248 shape_interleaved_a.set(1, std::ceil(a.dimension(1) / static_cast<float>(interleave_width)));
249 }
Georgios Pinitas358ca202017-12-07 16:47:52 +0000250
251 return shape_interleaved_a;
252}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100253
giuros016d109962019-01-07 17:47:19 +0000254/** Calculate the reshaped shape of the weights to use in depthwise convolution
255 *
256 * @param[in] input Input tensor info
257 * @param[in] info Depthwise convolution information to be used for reshaping.
258 *
259 * @return the calculated shape
260 */
261inline TensorShape compute_reshaped_depthwise_weights_shape(const ITensorInfo &input, const DepthwiseConvolutionReshapeInfo &info)
262{
263 const auto data_layout = input.data_layout();
264 TensorShape weights_shape{};
265
266 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
267 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
268 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
269 const size_t num_channels = input.dimension(channel_idx);
270 const size_t num_rows = input.dimension(height_idx);
271 const size_t num_cols = input.dimension(width_idx);
272
273 weights_shape.set(0, num_rows * num_cols * info.c0);
274 weights_shape.set(1, DIV_CEIL(num_channels, info.c0));
275 return weights_shape;
276}
277
Michalis Spyroud33fe342019-01-04 17:10:25 +0000278/** Calculate the transposed 1xW shape
279 *
280 * @param[in] b Input tensor info
281 *
282 * @return the calculated shape
283 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000284inline TensorShape compute_transpose1xW_shape(const ITensorInfo &b)
285{
286 // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
287 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
288 shape_transposed1xW_b.set(0, b.dimension(1) * 16);
289 shape_transposed1xW_b.set(1, std::ceil(b.dimension(0) / 16.f));
290
291 return shape_transposed1xW_b;
292}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100293
Michalis Spyroud33fe342019-01-04 17:10:25 +0000294/** Calculate the transposed 1xW width element shape
295 *
296 * @param[in] b Input tensor info
297 * @param[in] mult_transpose1xW_width (Optional) Transpose1xW width
298 *
299 * @return the calculated shape
300 */
Gian Marco36a0a462018-01-12 10:21:40 +0000301inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInfo &b, int mult_transpose1xW_width = 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000302{
Gian Marco36a0a462018-01-12 10:21:40 +0000303 // Note: mult_transpose1xW_width expresses the number of chunks with size 1x(W) we want to store on the same row
304 // The transpose1xW output matrix will have the following shape:
305 // [ b_height * W, ceil(b_width / W) ] where W = (16 / element size of the tensor) * mult_transpose1xW_width
306 ARM_COMPUTE_ERROR_ON(mult_transpose1xW_width < 1);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000307 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000308 const size_t transpose_width = (16 / b.element_size()) * mult_transpose1xW_width;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000309 shape_transposed1xW_b.set(0, b.dimension(1) * transpose_width);
310 shape_transposed1xW_b.set(1, static_cast<size_t>(std::ceil(b.dimension(0) / static_cast<float>(transpose_width))));
311
312 return shape_transposed1xW_b;
313}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100314
Michalis Spyroud33fe342019-01-04 17:10:25 +0000315/** Calculate the reductionA shape used in GEMMLowp
316 *
317 * @param[in] b Input tensor info
318 *
319 * @return the calculated shape
320 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000321inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
322{
323 TensorShape shape_vector_sum_col{ b.tensor_shape() };
324 if(shape_vector_sum_col.num_dimensions() > 1)
325 {
326 shape_vector_sum_col.remove_dimension(1);
327 }
328
329 return shape_vector_sum_col;
330}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100331
Michalis Spyroud33fe342019-01-04 17:10:25 +0000332/** Calculate the reductionB shape used in GEMMLowp
333 *
334 * @param[in] a Input tensor info
335 *
336 * @return the calculated shape
337 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000338inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
339{
340 TensorShape shape_vector_sum_row{ a.tensor_shape() };
341 shape_vector_sum_row.set(Window::DimX, a.dimension(1));
Georgios Pinitas932491f2018-09-21 16:33:15 +0100342 if(shape_vector_sum_row.num_dimensions() > 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000343 {
344 shape_vector_sum_row.remove_dimension(1);
345 }
346
347 return shape_vector_sum_row;
348}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100349
Michalis Spyroud33fe342019-01-04 17:10:25 +0000350/** Calculate the Col2Im shape
351 *
352 * @param[in] input Input tensor info
353 * @param[in] convolved_dims Convolved dimensions
354 * @param[in] batch_size_on_z True if batch size is on z axis
355 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
356 *
357 * @return the calculated shape
358 */
Giorgio Arena226e4b92018-08-23 12:00:02 +0100359inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &convolved_dims, bool batch_size_on_z, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000360{
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100361 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arena226e4b92018-08-23 12:00:02 +0100362 ARM_COMPUTE_ERROR_ON(input.tensor_shape()[1] != (convolved_dims.area()));
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100363 ARM_COMPUTE_ERROR_ON((num_groups > 1) && input.tensor_shape()[2] != num_groups);
364
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100365 const DataLayout data_layout = input.data_layout();
366 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
367 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
368 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100369
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100370 TensorShape col2im_shape{ input.tensor_shape() };
371 // If batches start on 3rd dimension shift dimensions right by 1 to retain upper tensor shape,
372 // as first three will be override by H,W,C data
373 if(batch_size_on_z && num_groups == 1)
374 {
375 col2im_shape.shift_right(1);
376 }
377 col2im_shape.set(width_idx, convolved_dims.width);
378 col2im_shape.set(height_idx, convolved_dims.height);
379 col2im_shape.set(channel_idx, input.tensor_shape()[0] * num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000380
381 return col2im_shape;
382}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100383
Michalis Spyroud33fe342019-01-04 17:10:25 +0000384/** Calculate the transposed shape of a tensor
385 *
386 * @param[in] input Input tensor info
387 *
388 * @return the calculated shape
389 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000390inline TensorShape compute_transposed_shape(const ITensorInfo &input)
391{
392 TensorShape shape_transposed{ input.tensor_shape() };
393
394 shape_transposed.set(0, input.dimension(1));
395 shape_transposed.set(1, input.dimension(0));
396
397 return shape_transposed;
398}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100399
Michalis Spyroud33fe342019-01-04 17:10:25 +0000400/** Calculate the depthwise convolution output shape of a tensor
401 *
402 * @param[in] input Input tensor info
403 * @param[in] weights Weights tensor info
404 * @param[in] conv_info Padding and stride information to use for the convolution.
405 * @param[in] depth_multiplier Multiplier to apply to the input's depth in order to retrieve the output's depth.
Usama Arife73686a2019-04-08 17:30:48 +0100406 * @param[in] dilation Dilation, in elements, across x and y. Defaults to (1, 1).
Michalis Spyroud33fe342019-01-04 17:10:25 +0000407 *
408 * @return the calculated shape
409 */
Usama Arife73686a2019-04-08 17:30:48 +0100410inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info, unsigned int depth_multiplier, const Size2D &dilation = Size2D(1U,
411 1U))
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000412{
413 const TensorShape input_shape{ input.tensor_shape() };
414 const TensorShape weights_shape{ weights.tensor_shape() };
415
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000416 const DataLayout data_layout = input.data_layout();
417 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
418 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Giorgio Arena76572242018-04-04 17:44:26 +0100419 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000420
Usama Arife73686a2019-04-08 17:30:48 +0100421 const DataLayout weights_data_layout = weights.data_layout();
422 const int weights_width_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::WIDTH);
423 const int weights_height_idx = get_data_layout_dimension_index(weights_data_layout, DataLayoutDimension::HEIGHT);
giuros016d109962019-01-07 17:47:19 +0000424
425 unsigned int output_width = 0;
426 unsigned int output_height = 0;
427 std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
Usama Arife73686a2019-04-08 17:30:48 +0100428 weights_shape[weights_width_idx], weights_shape[weights_height_idx],
429 conv_info, dilation);
giuros016d109962019-01-07 17:47:19 +0000430
431 TensorShape output_shape{ input_shape };
432 output_shape.set(width_idx, output_width);
433 output_shape.set(height_idx, output_height);
434 output_shape.set(channel_idx, input_shape[channel_idx] * depth_multiplier);
435
436 return output_shape;
437}
438
Michalis Spyroud33fe342019-01-04 17:10:25 +0000439/** Calculate the upsampled output shape used for deconvolution
440 *
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100441 * @param[in] input Input tensor info
442 * @param[in] weights Weights tensor shape
443 * @param[in] sx Stride on x axis
444 * @param[in] sy Stride on y axis
445 * @param[in] out_dims Output shape dimensions
446 * @param[in] padx Padding on x axis
447 * @param[in] pady Padding on y axis
Michalis Spyroud33fe342019-01-04 17:10:25 +0000448 *
449 * @return the calculated shape
450 */
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100451inline TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy,
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100452 std::pair<unsigned int, unsigned int> &out_dims, unsigned int &padx, unsigned int &pady)
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000453{
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100454 const DataLayout data_layout = input.data_layout();
455 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
456 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
457
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100458 // Find the upsampled dimensions
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100459 unsigned int out_x = (input.dimension(idx_w) - 1) * sx + 1;
460 unsigned int out_y = (input.dimension(idx_h) - 1) * sy + 1;
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100461
462 // Find the padding needed for the convolution with stride 1 in order to match output shape
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100463 padx = out_dims.first - (out_x - weights.dimension(idx_w) + 1);
464 pady = out_dims.second - (out_y - weights.dimension(idx_h) + 1);
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100465 out_x += padx;
466 out_y += pady;
467
468 TensorShape scale_out_shape(input.tensor_shape());
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100469 scale_out_shape.set(idx_w, out_x);
470 scale_out_shape.set(idx_h, out_y);
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000471
472 return scale_out_shape;
473}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100474
Michalis Spyroud33fe342019-01-04 17:10:25 +0000475/** Calculate the output shape of the deconvolution layer
476 *
477 * @param[in] out_dims Output x and y shape dimensions
478 * @param[in] input Input tensor info
479 * @param[in] weights Weights tensor shape
480 *
481 * @return the calculated shape
482 */
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100483inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned int, unsigned int> &out_dims, const ITensorInfo &input, const ITensorInfo &weights)
484{
485 const TensorShape input_shape{ input.tensor_shape() };
486 const TensorShape weights_shape{ weights.tensor_shape() };
487
488 const DataLayout data_layout = input.data_layout();
489 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
490 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
491 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
492 const int batch_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
493
494 TensorShape out_shape{ input_shape };
495 out_shape.set(width_idx, out_dims.first);
496 out_shape.set(height_idx, out_dims.second);
497 out_shape.set(channel_idx, weights_shape[batch_idx]);
498 return out_shape;
499}
500
Michalis Spyroud33fe342019-01-04 17:10:25 +0000501/** Calculate the im2col output shape of a tensor
502 *
503 * @param[in] input Input tensor info
504 * @param[in] kernel_dims The kernel dimensions (width and height).
505 * @param[in] conv_info Contains padding and stride information
506 * @param[in] has_bias In case biases are provided expands the matrix with 1
507 * @param[in] dilation Dilation, in elements, across x and y
508 * @param[in] batch_size_on_z True if batch size is on z axis
509 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
510 *
511 * @return the calculated shape
512 */
Giorgio Arena0f170392018-07-18 16:13:12 +0100513inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, bool batch_size_on_z,
514 unsigned int num_groups = 1)
Giorgio Arena156fcf32018-03-09 15:30:43 +0000515{
Giorgio Arena0f170392018-07-18 16:13:12 +0100516 // The output shape will be the 3D shape [ out_channels * kernel_area, num_elems_per_out_channel, batches ] if batch_size_on_z == true
517 // or the 4D shape [ out_channels * kernel_area / num_groups, num_elems_per_out_channel, num_groups, batches ] if batch_size_on_z == false
518
519 ARM_COMPUTE_ERROR_ON(num_groups == 0);
520 ARM_COMPUTE_ERROR_ON(num_groups > 1 && input->data_layout() != DataLayout::NCHW);
521 ARM_COMPUTE_ERROR_ON(num_groups > 1 && batch_size_on_z);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000522
523 TensorShape output_shape{ input->tensor_shape() };
524
525 const DataLayout data_layout = input->data_layout();
526 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
527 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
528 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
529
530 std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
Giorgio Arena0f170392018-07-18 16:13:12 +0100531 output_shape.set(0, (output_shape[channel_idx] / num_groups * kernel_dims.area() + (has_bias ? 1 : 0))); // NOLINT
Giorgio Arenaf485a102018-04-20 16:06:21 +0100532 output_shape.set(1, (out_dims.first * out_dims.second));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100533 if(batch_size_on_z && output_shape.num_dimensions() >= 3)
534 {
535 output_shape.remove_dimension(2);
536 }
537 else
538 {
Giorgio Arena0f170392018-07-18 16:13:12 +0100539 output_shape.set(2, num_groups);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100540 }
Giorgio Arena156fcf32018-03-09 15:30:43 +0000541
542 return output_shape;
543}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100544
Michalis Spyroud33fe342019-01-04 17:10:25 +0000545/** Calculate the flattened output shape of a tensor
546 *
547 * @param[in] input Input tensor info
548 *
549 * @return the calculated shape
550 */
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100551inline TensorShape compute_flatten_shape(const ITensorInfo *input)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000552{
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100553 // The output shape will be the flatten version of the input (i.e. [ width * height * channels, num_batches, ... ] ). Used for FlattenLayer and FullyConnectedLayer.
554
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000555 TensorShape output_shape{ input->tensor_shape() };
556
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100557 output_shape.collapse(3);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000558
559 return output_shape;
560}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100561
Michalis Spyroud33fe342019-01-04 17:10:25 +0000562/** Calculate the softmax output shape of a tensor
563 *
564 * @param[in] input Input tensor info
565 * @param[in] axis (Optional) Softmax axis
566 *
567 * @return the calculated shape
568 */
giuros01efbf6c82018-09-03 09:53:53 +0100569inline TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis = 1)
570{
571 // The output shape will be a 2D version of the input. For instance:
572 // - [x,y,z] and axis 1 will return [x, y*z]
573 // - [x,y,z,w] and axis 2 will return [x*y, w*z]
574 // - [x,y,z,w] and axis 3 will return [x*y*z, w]
575 TensorShape shape2D = input->tensor_shape();
576
577 if(axis < input->num_dimensions())
578 {
579 // Collapse from axis onward (this changes the shape)
580 shape2D.collapse_from(axis);
581
582 // Collapse the rest (collapse is inclusive)
583 shape2D.collapse(shape2D.num_dimensions() - 1);
584 }
585 else
586 {
587 // Collapse everything
588 shape2D.collapse(shape2D.num_dimensions());
589 }
590
591 if(axis == 0)
592 {
593 // If axis is zero the first dim should be one. Since
594 // collapse is an inclusive operation we need to shift
595 shape2D.shift_right(1);
596 }
597
598 return shape2D;
599}
600
Michalis Spyroud33fe342019-01-04 17:10:25 +0000601/** Calculate the winograd filter transform shape
602 *
603 * @param[in] input Input tensor info
604 * @param[in] winograd_info Winograd information
605 *
606 * @return the calculated shape
607 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000608inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000609{
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000610 TensorShape tensor_shape{ input.tensor_shape() };
611
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000612 const Size2D kernel_size = winograd_info.kernel_size;
613 const Size2D output_tile_size = winograd_info.output_tile_size;
614 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
Giorgio Arena2d9de0a2018-03-15 17:58:20 +0000615
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000616 tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
617 tensor_shape.set(Window::DimX, input.dimension(3));
618 tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
619 tensor_shape.set(Window::DimZ, input_tile_size.area());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000620
621 return tensor_shape;
622}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100623
Michalis Spyroud33fe342019-01-04 17:10:25 +0000624/** Calculate the winograd input transform shape
625 *
626 * @param[in] input Input tensor info
627 * @param[in] winograd_info Winograd information
628 *
629 * @return the calculated shape
630 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000631inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000632{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000633 const PadStrideInfo conv_info = winograd_info.convolution_info;
634 const Size2D kernel_size = winograd_info.kernel_size;
635 const Size2D output_tile_size = winograd_info.output_tile_size;
636 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
637
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100638 const size_t idx_w = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
639 const size_t idx_h = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
640 const size_t idx_c = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000641
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100642 // Compute the number of output tiles along the x and y direction of size "output_tile_size"
643 const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input.tensor_shape()[idx_w], input.tensor_shape()[idx_h]),
644 kernel_size,
645 output_tile_size,
646 conv_info);
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100647
648 const unsigned int width = input.tensor_shape()[idx_c];
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100649 const unsigned int height = num_tiles.area();
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000650 const unsigned int depth = input_tile_size.area();
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000651
652 TensorShape output_shape{ input.tensor_shape() };
653 output_shape.set(0, width);
654 output_shape.set(1, height);
655 output_shape.set(2, depth);
656
657 return output_shape;
658}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100659
Michalis Spyroud33fe342019-01-04 17:10:25 +0000660/** Calculate the winograd output transform shape
661 *
662 * @param[in] input Input tensor info
663 * @param[in] winograd_info Winograd information
664 *
665 * @return the calculated shape
666 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000667inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000668{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000669 const PadStrideInfo conv_info = winograd_info.convolution_info;
670 const Size2D kernel_size = winograd_info.kernel_size;
671 const Size2D input_dimensions = winograd_info.input_dimensions;
672 const DataLayout data_layout = winograd_info.output_data_layout;
673
674 // Compute output shape
675 unsigned int output_width = 0;
676 unsigned int output_height = 0;
677 std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
678 kernel_size.width, kernel_size.height, conv_info);
679
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000680 TensorShape tensor_shape{ input.tensor_shape() };
681
682 // Output dimension
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000683 const unsigned int out_w = output_width;
684 const unsigned int out_h = output_height;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000685 const unsigned int out_c = input.dimension(0);
686
687 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w);
688 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT), out_h);
689 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL), out_c);
690
691 return tensor_shape;
692}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100693
Michalis Spyroud33fe342019-01-04 17:10:25 +0000694/** Calculate the deep convolution shape output shape of a tensor
695 *
696 * @param[in] input Input tensor info
697 * @param[in] weights Weights tensor info
698 * @param[in] conv_info Contains padding and stride information
699 *
700 * @return the calculated shape
701 */
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000702inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
703{
704 const TensorShape input_shape{ input.tensor_shape() };
705 const TensorShape weights_shape{ weights.tensor_shape() };
706
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000707 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
708 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
709 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
710
Giorgio Arenac0f54432018-03-16 14:02:34 +0000711 const unsigned int input_width = input_shape[idx_width];
712 const unsigned int input_height = input_shape[idx_height];
713 const unsigned int weights_width = weights_shape[idx_width];
714 const unsigned int weights_height = weights_shape[idx_height];
715 const unsigned int weights_out_channel = weights_shape[3];
716 unsigned int output_width = 0;
717 unsigned int output_height = 0;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000718 std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000719
720 TensorShape output_shape{ input_shape };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000721 output_shape.set(idx_width, output_width);
722 output_shape.set(idx_height, output_height);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000723 output_shape.set(idx_channel, weights_out_channel);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000724
725 return output_shape;
726}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100727
Michalis Spyroud33fe342019-01-04 17:10:25 +0000728/** Calculate the min/max shape output shape of a tensor
729 *
730 * @param[in] input Input tensor info
731 *
732 * @return the calculated shape
733 */
Alex Gilday60954c62018-03-05 16:22:48 +0000734inline TensorShape compute_min_max_shape(const ITensorInfo *input)
735{
736 TensorShape output_shape{ input->tensor_shape() };
737 output_shape.set(Window::DimX, 2);
738 output_shape.remove_dimension(1);
739 output_shape.remove_dimension(1);
740
741 return output_shape;
742}
743
Michalis Spyroud33fe342019-01-04 17:10:25 +0000744/** Calculate the output pool shape of a tensor
745 *
746 * @param[in] input Input tensor info
747 * @param[in] pool_info Pooling layer info
748 *
749 * @return the calculated shape
750 */
Michalis Spyroue74b2012018-04-18 09:49:16 +0100751inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
752{
753 unsigned int pooled_w = 0;
754 unsigned int pooled_h = 0;
755
Giorgio Arena3c520c52018-05-01 11:47:24 +0100756 TensorShape output_shape{ input.tensor_shape() };
Michalis Spyroue74b2012018-04-18 09:49:16 +0100757
Giorgio Arena3c520c52018-05-01 11:47:24 +0100758 const bool is_global_pooling = pool_info.is_global_pooling();
759 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
760 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
761 const unsigned int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size().width;
762 const unsigned int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size().height;
763
764 std::tie(pooled_w, pooled_h) = scaled_dimensions(output_shape[idx_width],
765 output_shape[idx_height],
Michalis Spyroue74b2012018-04-18 09:49:16 +0100766 pool_size_x,
767 pool_size_y,
768 pool_info.pad_stride_info());
769
Giorgio Arena3c520c52018-05-01 11:47:24 +0100770 output_shape.set(idx_width, pooled_w);
771 output_shape.set(idx_height, pooled_h);
Michalis Spyroue74b2012018-04-18 09:49:16 +0100772
773 return output_shape;
774}
775
George Wort44b4e972019-01-08 11:41:54 +0000776/** Calculate the output roi align shape of a tensor
777 *
778 * @param[in] input Input tensor info
779 * @param[in] rois Rois tensor info
780 * @param[in] pool_info Pooling layer info
781 *
782 * @return the calculated shape
783 */
784inline TensorShape compute_roi_align_shape(const ITensorInfo &input, const ITensorInfo &rois, ROIPoolingLayerInfo pool_info)
785{
786 TensorShape output_shape{ input.tensor_shape() };
787
788 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
789 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
790
791 output_shape.set(idx_width, pool_info.pooled_width());
792 output_shape.set(idx_height, pool_info.pooled_height());
793 output_shape.set(3, rois.dimension(1));
794
795 return output_shape;
796}
797
Michalis Spyroud33fe342019-01-04 17:10:25 +0000798/** Calculate the RNN shape of a tensor
799 *
800 * @param[in] input Input tensor info
801 * @param[in] batch_size Batch size
802 *
803 * @return the calculated shape
804 */
Michalis Spyrou36a559e2018-03-20 10:30:58 +0000805inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
806{
807 TensorShape output_shape{ input->tensor_shape() };
808 output_shape.set(1, batch_size);
809
810 return output_shape;
811}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100812
Michalis Spyroud33fe342019-01-04 17:10:25 +0000813/** Calculate the matrix multiplication output shape of two tensors
814 *
815 * @param[in] input0 First input tensor info
816 * @param[in] input1 Second input tensor info
817 * @param[in] is_interleaved_transposed True if the input is interleaved transposed
818 * @param[in] reshape_info GEMM reshape info
819 *
820 * @return the calculated shape
821 */
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100822inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
823{
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000824 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100825 ARM_COMPUTE_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The first input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100826
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100827 const bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000828 const bool reinterpret_output_as_3d = reshape_info.depth_output_gemm3d() != 0;
829 const int depth_output_gemm3d = reinterpret_output_as_3d ? reshape_info.depth_output_gemm3d() : 1;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100830 const int m = reshape_info.reinterpret_input_as_3d() ? input0.dimension(1) * input0.dimension(2) : input0.dimension(1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000831
832 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
833 // dimension of the output tensor
834 const int dim0 = is_interleaved_transposed ? reshape_info.n() : input1.dimension(0);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000835 const int dim1 = is_interleaved_transposed ? reshape_info.m() / depth_output_gemm3d : m / depth_output_gemm3d;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100836 const int dim2 = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
837 const int dim3 = reinterpret_input_as_3d ? 1 : input0.tensor_shape()[3];
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000838
839 TensorShape output_shape{ input0.tensor_shape() };
840
841 output_shape.set(0, dim0);
842 output_shape.set(1, dim1);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000843 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : dim2);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100844 output_shape.set(3, reinterpret_output_as_3d ? dim2 : dim3);
845 output_shape.set(4, reinterpret_output_as_3d ? dim3 : 1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000846
847 return output_shape;
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100848}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100849
Michalis Spyroud33fe342019-01-04 17:10:25 +0000850/** Calculate the matrix multiplication output shape of two tensors
851 *
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100852 * @note Deprecated. Remove when GEMMReshapeInfo is not used anymore by any other kernels
853 *
Michalis Spyroud33fe342019-01-04 17:10:25 +0000854 * @param[in] input0 First input tensor info
855 * @param[in] input1 Second input tensor info
856 * @param[in] gemm_info GEMM reshape info
857 *
858 * @return the calculated shape
859 */
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000860inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMReshapeInfo &gemm_info)
861{
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100862 ARM_COMPUTE_UNUSED(input1);
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000863 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
864
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000865 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000866 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d() != 0;
867 const int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d() : 1;
868
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000869 TensorShape output_shape{ input0.tensor_shape() };
870
Vidhya Sudhan Loganathanae1a89e2019-05-03 09:13:55 +0100871 if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
872 {
873 output_shape.set(0, gemm_info.n());
874 output_shape.set(1, gemm_info.m());
875 }
876 else
877 {
878 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
879 // dimension of the output tensor
880 const int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
881 output_shape.set(0, gemm_info.n());
882 output_shape.set(1, gemm_info.m() / depth_output_gemm3d);
883 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
884 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
885 }
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000886
887 return output_shape;
888}
889
Michalis Spyroud33fe342019-01-04 17:10:25 +0000890/** Calculate the matrix multiplication output shape of two tensors
891 *
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100892 * @param[in] input0 First input tensor info
893 * @param[in] input1 Second input tensor info
894 * @param[in] gemm_info GEMM kernel info used to retrieve the original dimensions of the input matrices
895 *
896 * @return the calculated shape
897 */
898inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMKernelInfo &gemm_info)
899{
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100900 ARM_COMPUTE_UNUSED(input1);
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100901 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
902
903 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
904 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
905 const unsigned int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d : 1;
906
907 TensorShape output_shape{ input0.tensor_shape() };
908
909 if(!reinterpret_input_as_3d && !reinterpret_output_as_3d)
910 {
911 output_shape.set(0, gemm_info.n);
912 output_shape.set(1, gemm_info.m);
913 }
914 else
915 {
916 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
917 // dimension of the output tensor
918 const unsigned int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
919 output_shape.set(0, gemm_info.n);
920 output_shape.set(1, gemm_info.m / depth_output_gemm3d);
921 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
922 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
923 }
924
925 return output_shape;
926}
927
928/** Calculate the matrix multiplication output shape of two tensors
929 *
Michalis Spyroud33fe342019-01-04 17:10:25 +0000930 * @param[in] input Input tensor info
931 * @param[in] gemm_3d_depth (Optional) GEMM 3d depth
932 * @param[in] batch_size_on_z (Optional) True if batch size is on z axis
933 *
934 * @return the calculated shape
935 */
Georgios Pinitas932491f2018-09-21 16:33:15 +0100936inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1, bool batch_size_on_z = false)
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100937{
938 ARM_COMPUTE_ERROR_ON(input.data_layout() != DataLayout::NHWC && gemm_3d_depth > 1);
939
940 TensorShape output_shape = input.tensor_shape();
941 if(gemm_3d_depth > 1)
942 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100943 if(batch_size_on_z)
944 {
945 output_shape.shift_right(1);
946 }
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100947 output_shape.set(0, input.tensor_shape().x());
948 output_shape.set(1, input.tensor_shape().y() / gemm_3d_depth);
949 output_shape.set(2, gemm_3d_depth);
950 }
951
952 return output_shape;
953}
954
Michalis Spyroud33fe342019-01-04 17:10:25 +0000955/** Calculate the strided slice output shape of a tensor
956 *
957 * @param[in] input Input tensor info
958 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
959 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
960 * @param[in] strides The strides of the dimensions of the input tensor to be sliced
961 * @param[in] begin_mask If the ith bit of begin_mask is set, starts[i] is ignored and the fullest possible range in that dimension is used instead.
962 * @param[in] end_mask If the ith bit of end_mask is set, ends[i] is ignored and the fullest possible range in that dimension is used instead.
963 * @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1
964 *
965 * @return the calculated shape
966 */
Georgios Pinitas77589b52018-08-21 14:41:35 +0100967inline TensorShape compute_strided_slice_shape(const ITensorInfo &input,
968 const Coordinates &starts, const Coordinates &ends, const Coordinates &strides,
969 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
970{
971 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitasb4af2c62018-12-10 18:45:35 +0000972 return compute_strided_slice_output_shape(input.tensor_shape(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
973}
Georgios Pinitas77589b52018-08-21 14:41:35 +0100974
Michalis Spyroud33fe342019-01-04 17:10:25 +0000975/** Calculate the slice output shape of a tensor
976 *
977 * @param[in] input_shape Input tensor info
978 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
979 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
980 *
981 * @return the calculated shape
982 */
Georgios Pinitasb4af2c62018-12-10 18:45:35 +0000983inline TensorShape compute_slice_shape(const TensorShape &input_shape, const Coordinates &starts, const Coordinates &ends)
984{
985 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitas77589b52018-08-21 14:41:35 +0100986
Georgios Pinitasb4af2c62018-12-10 18:45:35 +0000987 return compute_strided_slice_output_shape(input_shape,
988 starts, ends, BiStrides(),
989 0, construct_slice_end_mask(ends), 0);
Georgios Pinitas77589b52018-08-21 14:41:35 +0100990}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100991
Michalis Spyroud33fe342019-01-04 17:10:25 +0000992/** Calculate the batch to space output shape of a tensor
993 *
994 * @param[in] input Input tensor info
995 * @param[in] block_x Block shape x value
996 * @param[in] block_y Block shape y value
997 *
998 * @return the calculated shape
999 */
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001000inline TensorShape compute_batch_to_space_shape(const ITensorInfo *input, const int block_x, const int block_y)
1001{
1002 ARM_COMPUTE_ERROR_ON(block_x <= 0 || block_y <= 0);
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001003
1004 const DataLayout data_layout = input->data_layout();
1005 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1006 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001007 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001008
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001009 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrouf1addb62018-09-11 11:16:47 +01001010 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x);
1011 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y);
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001012 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y));
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +01001013
1014 return output_shape;
1015}
Georgios Pinitas77589b52018-08-21 14:41:35 +01001016
Michalis Spyrou22f917c2019-05-21 13:30:10 +01001017/** Calculate the depth to space output shape of a tensor
1018 *
1019 * @param[in] input Input tensor info
1020 * @param[in] block Block shape value
1021 *
1022 * @return the calculated shape
1023 */
1024inline TensorShape compute_depth_to_space_shape(const ITensorInfo *input, int block)
1025{
1026 ARM_COMPUTE_ERROR_ON(block < 2);
1027
1028 const DataLayout data_layout = input->data_layout();
1029 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1030 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1031 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
1032
1033 TensorShape output_shape{ input->tensor_shape() };
1034 output_shape.set(idx_width, input->dimension(idx_width) * block);
1035 output_shape.set(idx_height, input->dimension(idx_height) * block);
1036 output_shape.set(idx_channel, input->dimension(idx_channel) / (block * block));
1037
1038 return output_shape;
1039}
1040
Michalis Spyroud33fe342019-01-04 17:10:25 +00001041/** Calculate the split output shape of a tensor
1042 *
1043 * @param[in] input Input tensor info
1044 * @param[in] axis Axis on which to split the input
1045 * @param[in] num_splits Number of splits
1046 *
1047 * @return the calculated shape
1048 */
Georgios Pinitase1a352c2018-09-03 12:42:19 +01001049inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int axis, unsigned int num_splits)
1050{
1051 TensorShape empty_shape;
1052 empty_shape.set(0, 0);
1053
1054 TensorShape out_shape{ input->tensor_shape() };
1055
1056 // Return empty shape if axis is invalid
1057 if(axis > input->tensor_shape().num_dimensions())
1058 {
1059 return empty_shape;
1060 }
1061
1062 size_t axis_size = out_shape[axis];
1063
1064 // Return empty shape if num_split is not valid
1065 if(axis_size % num_splits)
1066 {
1067 return empty_shape;
1068 }
1069
1070 out_shape[axis] = axis_size / num_splits;
1071 return out_shape;
1072}
1073
Michalis Spyroud33fe342019-01-04 17:10:25 +00001074/** Calculate the space to batch output shape of a tensor
1075 *
1076 * @param[in] input Input tensor info
1077 * @param[in] block_x Block shape x value
1078 * @param[in] block_y Block shape y value
1079 * @param[in] padding_left Left padding values
1080 * @param[in] padding_right Right padding values
1081 *
1082 * @return the calculated shape
1083 */
Michalis Spyrou16934a52018-08-21 18:03:58 +01001084inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const int block_x, const int block_y, const Size2D &padding_left, const Size2D &padding_right)
1085{
1086 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001087
1088 const DataLayout data_layout = input->data_layout();
1089 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1090 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1091 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
1092
1093 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x + padding_left.x() + padding_right.x());
1094 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y + padding_left.y() + padding_right.y());
1095 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y));
Michalis Spyrou16934a52018-08-21 18:03:58 +01001096
1097 return output_shape;
1098}
Pablo Tello32521432018-11-15 14:43:10 +00001099
Manuel Bottini5b7d5372019-05-17 14:04:22 +01001100/** Calculate the space to batch output shape of a tensor
1101 *
1102 * @param[in] input Input tensor info
1103 * @param[in] block_shape Block shape value
1104 *
1105 * @return the calculated shape
1106 */
1107inline TensorShape compute_space_to_depth_shape(const ITensorInfo *input, int32_t block_shape)
1108{
1109 TensorShape output_shape{ input->tensor_shape() };
1110
1111 const DataLayout data_layout = input->data_layout();
1112 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1113 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1114 const int idx_depth = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
1115
1116 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_shape);
1117 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_shape);
1118 output_shape.set(idx_depth, input->tensor_shape()[idx_depth] / (block_shape * block_shape));
1119
1120 return output_shape;
1121}
1122
Michalis Spyroud33fe342019-01-04 17:10:25 +00001123/** Calculate the prior box output shape of a tensor
1124 *
1125 * @param[in] input Input tensor info
1126 * @param[in] info PriorBoxLayer info
1127 *
1128 * @return the calculated shape
1129 */
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001130inline TensorShape compute_prior_box_shape(const ITensorInfo &input, const PriorBoxLayerInfo &info)
1131{
1132 DataLayout data_layout = input.data_layout();
1133 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1134 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Pablo Tello32521432018-11-15 14:43:10 +00001135 const int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001136
1137 TensorShape output_shape{};
1138 output_shape.set(0, input.dimension(idx_w) * input.dimension(idx_h) * num_priors * 4);
1139 output_shape.set(1, 2);
1140
1141 return output_shape;
1142}
Michalis Spyrou16934a52018-08-21 18:03:58 +01001143
Michalis Spyroud33fe342019-01-04 17:10:25 +00001144/** Calculate the padded shape of a tensor
1145 *
1146 * @param[in] input_shape Input tensor shape
1147 * @param[in] padding Paddings list
1148 *
1149 * @return the calculated shape
1150 */
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001151inline TensorShape compute_padded_shape(const TensorShape &input_shape, const PaddingList &padding)
1152{
1153 TensorShape padded_shape = input_shape;
1154 for(size_t dim = 0; dim < padding.size(); ++dim)
1155 {
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +00001156 const auto &padding_pair = padding[dim];
1157 const uint32_t shape_on_index = (padded_shape.num_dimensions() <= dim) ? 1 : input_shape[dim];
1158 padded_shape.set(dim, padding_pair.first + shape_on_index + padding_pair.second);
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001159 }
1160 return padded_shape;
1161}
1162
Michalis Spyroud33fe342019-01-04 17:10:25 +00001163/** Calculate the tiled shape of a tensor
1164 *
1165 * @param[in] input_shape Input tensor shape
1166 * @param[in] multiples Paddings list
1167 *
1168 * @return the calculated shape
1169 */
giuros013175fcf2018-11-21 09:59:17 +00001170inline TensorShape compute_tiled_shape(const TensorShape &input_shape, const Multiples &multiples)
1171{
1172 TensorShape tiled_shape = input_shape;
1173 for(size_t dim = 0; dim < multiples.size(); ++dim)
1174 {
1175 tiled_shape.set(dim, input_shape[dim] * multiples[dim]);
1176 }
1177 return tiled_shape;
1178}
1179
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001180/** Calculate the reduced shape of a tensor given an axis
1181 *
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001182 * @param[in] input Input tensor info
1183 * @param[in] axis Axis on which to perform reduction
1184 * @param[in] keep_dims (Optional) Whether to keep the dimension after reduction operation. Defaults to true.
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001185 *
1186 * @return the calculated shape
1187 */
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001188inline TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims = true)
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001189{
1190 TensorShape output_shape{ input };
Sang-Hoon Park2697fd82019-10-15 16:49:24 +01001191
1192 if(!keep_dims)
1193 {
1194 output_shape.remove_dimension(axis);
1195 }
1196 else
1197 {
1198 output_shape.set(axis, 1);
1199 }
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001200
1201 return output_shape;
1202}
1203
Michalis Spyroud33fe342019-01-04 17:10:25 +00001204/** Calculate the upsampled shape of a tensor
1205 *
1206 * @param[in] input Input tensor info
1207 * @param[in] info Contains stride information (x and y)
1208 *
1209 * @return the calculated shape
1210 */
Michalis Spyrouceb889e2018-09-17 18:24:41 +01001211inline TensorShape compute_upsample_shape(const ITensorInfo &input, const Size2D &info)
1212{
1213 const DataLayout data_layout = input.data_layout();
1214 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1215 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1216
1217 TensorShape scale_out_shape(input.tensor_shape());
1218 const unsigned int out_x = input.dimension(idx_width) * info.x();
1219 const unsigned int out_y = input.dimension(idx_height) * info.y();
1220 scale_out_shape.set(idx_width, out_x);
1221 scale_out_shape.set(idx_height, out_y);
1222
1223 return scale_out_shape;
1224}
1225
Michalis Spyroud33fe342019-01-04 17:10:25 +00001226/** Get the tensor shape
1227 *
1228 * @param[in] data Input data
1229 *
1230 * @return the extracted tensor shape
1231 */
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001232template <typename T>
Georgios Pinitase2220552018-07-20 13:23:44 +01001233inline TensorShape extract_shape(T *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001234{
Georgios Pinitase2220552018-07-20 13:23:44 +01001235 return data->info()->tensor_shape();
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001236}
1237
John Kesapidescafec8f2019-02-19 15:53:59 +00001238inline TensorShape extract_shape(ITensorInfo *data)
John Kesapides917959c2019-02-04 12:37:29 +00001239{
1240 return data->tensor_shape();
1241}
John Kesapidescafec8f2019-02-19 15:53:59 +00001242inline TensorShape extract_shape(const ITensorInfo *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001243{
Georgios Pinitase2220552018-07-20 13:23:44 +01001244 return data->tensor_shape();
1245}
1246
1247inline TensorShape extract_shape(const TensorShape *data)
1248{
1249 return *data;
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001250}
1251
Michalis Spyroua9c44722019-04-05 17:18:36 +01001252inline TensorShape extract_shape(TensorShape *data)
1253{
1254 return *data;
1255}
1256
Michalis Spyroud33fe342019-01-04 17:10:25 +00001257/** Calculate the unstack shape of a tensor
1258 *
1259 * @param[in] input_shape Input tensor shape
1260 * @param[in] axis Axis on which to perform the unstack operation
1261 *
1262 * @return the calculated shape
1263 */
Pablo Tello54303692018-11-22 16:14:36 +00001264inline TensorShape calculate_unstack_shape(TensorShape input_shape, unsigned int axis)
1265{
1266 ARM_COMPUTE_ERROR_ON(axis > input_shape.num_dimensions());
1267 input_shape.remove_dimension(axis);
1268 return input_shape;
1269}
1270
Pablo Tello3dd5b682019-03-04 14:14:02 +00001271/** Calculate the concatenate output shape of the concatenate operation along a single axis
Michalis Spyroud33fe342019-01-04 17:10:25 +00001272 *
Pablo Tello3dd5b682019-03-04 14:14:02 +00001273 * @param[in] input Vector containing the shapes of the inputs
1274 * @param[in] axis Axis along which to concatenate the input tensors
Michalis Spyroud33fe342019-01-04 17:10:25 +00001275 *
1276 * @return the calculated shape
1277 */
Georgios Pinitase29acf12018-07-16 14:40:09 +01001278template <typename T>
Pablo Tello3dd5b682019-03-04 14:14:02 +00001279inline TensorShape calculate_concatenate_shape(const std::vector<T *> &input, size_t axis)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001280{
Pablo Tello3dd5b682019-03-04 14:14:02 +00001281 TensorShape out_shape = extract_shape(input[0]);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001282
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001283#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001284 // All dimensions must match except the axis one
1285 for(unsigned int i = 0; i < MAX_DIMS; ++i)
1286 {
1287 if(i == axis)
1288 {
1289 continue;
1290 }
1291
1292 for(const auto &tensor : input)
1293 {
1294 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
1295 const TensorShape shape = extract_shape(tensor);
1296 ARM_COMPUTE_ERROR_ON(out_shape[i] != shape[i]);
1297 }
1298 }
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001299#endif // defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001300
1301 // Calculate output shape
Pablo Tello3dd5b682019-03-04 14:14:02 +00001302 size_t new_size = 0;
1303 for(const auto &tensor : input)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001304 {
Georgios Pinitase2220552018-07-20 13:23:44 +01001305 const TensorShape shape = extract_shape(tensor);
Pablo Tello3dd5b682019-03-04 14:14:02 +00001306 new_size += shape[axis];
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001307 }
1308
Pablo Tello3dd5b682019-03-04 14:14:02 +00001309 out_shape.set(axis, new_size);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001310
1311 return out_shape;
1312}
Michalis Spyroud33fe342019-01-04 17:10:25 +00001313/** Calculate the stack output shape of a tensor
1314 *
1315 * @param[in] a Input tensor info
1316 * @param[in] axis Axis on which to perform the stack operation
1317 * @param[in] num_tensors Number of tensors to stack
1318 *
1319 * @return the calculated shape
1320 */
Gian Marco Iodice8aa985e2018-11-27 15:58:08 +00001321inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis, unsigned int num_tensors)
1322{
1323 ARM_COMPUTE_ERROR_ON(axis > a.num_dimensions());
1324 ARM_COMPUTE_ERROR_ON(a.num_dimensions() > 4);
1325
1326 TensorShape shape_out{ a.tensor_shape() };
1327 shape_out.set(axis, num_tensors);
1328
1329 unsigned int i_shift = 0;
1330
1331 for(unsigned int i = 0; i < a.num_dimensions(); ++i)
1332 {
1333 if(i == axis)
1334 {
1335 i_shift++;
1336 }
1337
1338 shape_out.set(i + i_shift, a.tensor_shape()[i]);
1339 }
1340 return shape_out;
1341}
Manuel Bottini8529bd62018-11-21 11:53:04 +00001342
1343inline TensorShape compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis)
1344{
1345 ARM_COMPUTE_ERROR_ON(indices_shape.num_dimensions() > 1);
1346 ARM_COMPUTE_ERROR_ON(input_shape.num_dimensions() > 4);
1347 ARM_COMPUTE_ERROR_ON(actual_axis >= input_shape.num_dimensions());
1348
1349 TensorShape output_shape = input_shape;
1350 output_shape[actual_axis] = indices_shape[0];
1351
1352 return output_shape;
1353}
Georgios Pinitas358ca202017-12-07 16:47:52 +00001354} // namespace shape_calculator
1355} // namespace misc
1356} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +00001357#endif /* ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H */