blob: 26b337d5c5ffe328278ca1281b3450cd519bddd9 [file] [log] [blame]
Georgios Pinitas358ca202017-12-07 16:47:52 +00001/*
Manuel Bottini8529bd62018-11-21 11:53:04 +00002 * Copyright (c) 2017-2019 ARM Limited.
Georgios Pinitas358ca202017-12-07 16:47:52 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__
25#define __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__
26
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +000027#include "arm_compute/core/Helpers.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/ITensorInfo.h"
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000029#include "arm_compute/core/Utils.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000030
Georgios Pinitas77589b52018-08-21 14:41:35 +010031#include "arm_compute/core/utils/helpers/tensor_transform.h"
32
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000033#include <cmath>
34
Georgios Pinitas358ca202017-12-07 16:47:52 +000035namespace arm_compute
36{
37namespace misc
38{
39namespace shape_calculator
40{
Michalis Spyroud33fe342019-01-04 17:10:25 +000041/** Calculate the output tensor shape of a vector input given the convolution dimensions
42 *
43 * @param[in] input Input tensor shape
44 * @param[in] conv_w Convolution width
45 * @param[in] conv_h Convolution height
46 * @param[in] data_layout Data layout
47 *
48 * @return the calculated shape
49 */
Abe Mbise7784c832018-05-31 16:48:41 +010050inline TensorShape compute_vector_to_tensor_output_shape(const TensorShape &input, size_t conv_w, size_t conv_h, const DataLayout &data_layout)
51{
52 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
53 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
54 const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
55
56 TensorShape output_shape(input);
57 output_shape.set(idx_w, conv_w);
58 output_shape.set(idx_h, conv_h);
59 output_shape.set(idx_c, input.x() / (conv_w * conv_h));
60
61 return output_shape;
62}
Georgios Pinitase1a352c2018-09-03 12:42:19 +010063
Michalis Spyroud33fe342019-01-04 17:10:25 +000064/** Calculate the permuted shape of an input given a permutation vector
65 *
66 * @param[in] input Input tensor info
67 * @param[in] perm Permutation vector
68 *
69 * @return the calculated shape
70 */
Pablo Tello00afd112018-01-04 10:34:24 +000071inline TensorShape compute_permutation_output_shape(const ITensorInfo &input, const PermutationVector &perm)
72{
73 TensorShape output_shape = input.tensor_shape();
74 permute(output_shape, perm);
75 return output_shape;
76}
Georgios Pinitase1a352c2018-09-03 12:42:19 +010077
Michalis Spyroud33fe342019-01-04 17:10:25 +000078/** Calculate the output shape of the reorg layer given a stride
79 *
80 * @param[in] input Input tensor info
81 * @param[in] stride Stride
82 *
83 * @return the calculated shape
84 */
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010085inline TensorShape compute_reorg_output_shape(const ITensorInfo &input, int32_t stride)
86{
Gian Marco Iodice477531c2018-08-21 17:53:38 +010087 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
88 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
89 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010090
Gian Marco Iodice477531c2018-08-21 17:53:38 +010091 ARM_COMPUTE_ERROR_ON(stride <= 0);
92 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_width] % stride != 0), "The width of the input tensor must be a multiple of stride");
93 ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_height] % stride != 0), "The height of the input tensor must be a multiple of stride");
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +010094
95 TensorShape output_shape{ input.tensor_shape() };
Gian Marco Iodice477531c2018-08-21 17:53:38 +010096
97 output_shape.set(idx_width, output_shape[idx_width] / stride);
98 output_shape.set(idx_height, output_shape[idx_height] / stride);
99 output_shape.set(idx_channel, output_shape[idx_channel] * stride * stride);
Georgios Pinitasaa6a04a2018-08-29 12:53:41 +0100100
101 return output_shape;
102}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100103
Michalis Spyroud33fe342019-01-04 17:10:25 +0000104/** Calculate the reshaped shape of the weights
105 *
106 * @param[in] weights Weights tensor info
107 * @param[in] has_bias (Optional) Set to true if there is bias
108 * @param[in] num_groups (Optional) Number of groups
109 *
110 * @return the calculated shape of the reshaped weights
111 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100112inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000113{
Giorgio Arena088c2b02018-08-07 16:59:05 +0100114 // Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100115 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100116 ARM_COMPUTE_ERROR_ON(weights.data_layout() == DataLayout::NHWC && num_groups > 1);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100117 ARM_COMPUTE_ERROR_ON((weights.dimension(3) % num_groups) != 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100118
Georgios Pinitas78c00902018-01-09 17:33:11 +0000119 // Calculate output shape
120 TensorShape weights_reshaped{ weights.tensor_shape() };
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100121 weights_reshaped.set(3, weights_reshaped[3] / num_groups);
122
Georgios Pinitas78c00902018-01-09 17:33:11 +0000123 weights_reshaped.collapse(3);
124 const size_t tmp_dim = weights_reshaped[0];
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100125 weights_reshaped.set(0, weights_reshaped[1]);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000126 weights_reshaped.set(1, tmp_dim + (has_bias ? 1 : 0));
Giorgio Arenac6aa49b2018-08-07 11:53:30 +0100127 if(weights.num_dimensions() < 5)
128 {
129 weights_reshaped.set(2, num_groups);
130 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000131
132 return weights_reshaped;
133}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100134
Michalis Spyroud33fe342019-01-04 17:10:25 +0000135/** Calculate the Left Hand Side matrix reshaped shape
136 *
137 * @param[in] a Input tensor info
138 * @param[in] lhs_info Left Hand Side matrix information
139 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
140 *
141 * @return the calculated shape
142 */
Gian Marco Iodice5ba5e092018-12-06 17:13:09 +0000143inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false)
144{
145 ARM_COMPUTE_ERROR_ON(lhs_info.m0 == 0);
146 ARM_COMPUTE_ERROR_ON(lhs_info.k0 == 0);
147 ARM_COMPUTE_ERROR_ON(lhs_info.v0 == 0);
148
149 // Input width/height
150 const unsigned int input_width = a.dimension(0);
151 const unsigned int input_height = reinterpret_input_as_3d ? a.dimension(1) * a.dimension(2) : a.dimension(1);
152
153 // Number of horizontal/vertical blocks in the input tensor
154 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(lhs_info.k0));
155 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(lhs_info.m0));
156
157 // Block size
158 const unsigned int block_size = lhs_info.m0 * lhs_info.k0;
159
160 // Output width/height
161 const unsigned int output_width = block_size * num_horiz_blocks * lhs_info.v0;
162 const unsigned int output_height = std::ceil(num_vert_blocks / static_cast<float>(lhs_info.v0));
163
164 TensorShape lhs_shape{ a.tensor_shape() };
165 lhs_shape.set(0, output_width);
166 lhs_shape.set(1, output_height);
167
168 if((reinterpret_input_as_3d) && (lhs_shape.num_dimensions() > 2))
169 {
170 // When the data format is NHWC and the shapes are Nx1x1
171 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
172 // To avoid failures by removing a dimension that doesn't exist
173 // check if the number of dimensions is greater than 2.
174 lhs_shape.remove_dimension(2);
175 }
176
177 return lhs_shape;
178}
179
Michalis Spyroud33fe342019-01-04 17:10:25 +0000180/** Calculate the Right Hand Side matrix reshaped shape
181 *
182 * @param[in] a Input tensor info
183 * @param[in] rhs_info Right Hand Side matrix information
184 *
185 * @return the calculated shape
186 */
Gian Marco Iodice3b0a2652018-12-07 11:18:09 +0000187inline TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRHSMatrixInfo &rhs_info)
188{
189 ARM_COMPUTE_ERROR_ON(rhs_info.n0 == 0);
190 ARM_COMPUTE_ERROR_ON(rhs_info.k0 == 0);
191 ARM_COMPUTE_ERROR_ON(rhs_info.h0 == 0);
192
193 // Input width/height
194 const unsigned int input_width = a.dimension(0);
195 const unsigned int input_height = a.dimension(1);
196
197 // Number of horizontal/vertical blocks in the input tensor
198 const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(rhs_info.n0));
199 const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(rhs_info.k0));
200
201 // Block size
202 const unsigned int block_size = rhs_info.n0 * rhs_info.k0;
203
204 // Output width/height
205 const unsigned int output_width = block_size * num_vert_blocks * rhs_info.h0;
206 const unsigned int output_height = std::ceil(num_horiz_blocks / static_cast<float>(rhs_info.h0));
207
208 TensorShape rhs_shape{ a.tensor_shape() };
209 rhs_shape.set(0, output_width);
210 rhs_shape.set(1, output_height);
211
212 return rhs_shape;
213}
214
Michalis Spyroud33fe342019-01-04 17:10:25 +0000215/** Calculate the interleaved shape of an input tensor
216 *
217 * @param[in] a Input tensor info
218 * @param[in] mult_interleave4x4_height (Optional) Interleave4x4 height
219 * @param[in] reinterpret_input_as_3d (Optional) Set to true if the input need to be interpreted as 3d
220 *
221 * @return the calculated shape
222 */
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100223inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000224{
Gian Marco36a0a462018-01-12 10:21:40 +0000225 // The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
226 ARM_COMPUTE_ERROR_ON(mult_interleave4x4_height < 1);
227 const int interleave_width = 4 * mult_interleave4x4_height;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000228 TensorShape shape_interleaved_a{ a.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000229 shape_interleaved_a.set(0, a.dimension(0) * interleave_width);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100230 if(reinterpret_input_as_3d)
231 {
232 const int M = a.dimension(1) * a.dimension(2);
233 const int height = std::ceil(M / static_cast<float>(interleave_width));
234 shape_interleaved_a.set(1, height);
Isabella Gottardi089695f2018-10-17 18:04:15 +0100235
236 // When the data format is NHWC and the shapes are Nx1x1
237 // the tensor shape num_dimensions is automatically set to 1 instead of 3.
238 // To avoid failures by removing a dimension that doesn't exist
239 // check if the number of dimensions is greater than 2.
240 if(shape_interleaved_a.num_dimensions() > 2)
241 {
242 shape_interleaved_a.remove_dimension(2);
243 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100244 }
245 else
246 {
247 shape_interleaved_a.set(1, std::ceil(a.dimension(1) / static_cast<float>(interleave_width)));
248 }
Georgios Pinitas358ca202017-12-07 16:47:52 +0000249
250 return shape_interleaved_a;
251}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100252
giuros016d109962019-01-07 17:47:19 +0000253/** Calculate the reshaped shape of the weights to use in depthwise convolution
254 *
255 * @param[in] input Input tensor info
256 * @param[in] info Depthwise convolution information to be used for reshaping.
257 *
258 * @return the calculated shape
259 */
260inline TensorShape compute_reshaped_depthwise_weights_shape(const ITensorInfo &input, const DepthwiseConvolutionReshapeInfo &info)
261{
262 const auto data_layout = input.data_layout();
263 TensorShape weights_shape{};
264
265 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
266 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
267 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
268 const size_t num_channels = input.dimension(channel_idx);
269 const size_t num_rows = input.dimension(height_idx);
270 const size_t num_cols = input.dimension(width_idx);
271
272 weights_shape.set(0, num_rows * num_cols * info.c0);
273 weights_shape.set(1, DIV_CEIL(num_channels, info.c0));
274 return weights_shape;
275}
276
Michalis Spyroud33fe342019-01-04 17:10:25 +0000277/** Calculate the transposed 1xW shape
278 *
279 * @param[in] b Input tensor info
280 *
281 * @return the calculated shape
282 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000283inline TensorShape compute_transpose1xW_shape(const ITensorInfo &b)
284{
285 // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
286 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
287 shape_transposed1xW_b.set(0, b.dimension(1) * 16);
288 shape_transposed1xW_b.set(1, std::ceil(b.dimension(0) / 16.f));
289
290 return shape_transposed1xW_b;
291}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100292
Michalis Spyroud33fe342019-01-04 17:10:25 +0000293/** Calculate the transposed 1xW width element shape
294 *
295 * @param[in] b Input tensor info
296 * @param[in] mult_transpose1xW_width (Optional) Transpose1xW width
297 *
298 * @return the calculated shape
299 */
Gian Marco36a0a462018-01-12 10:21:40 +0000300inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInfo &b, int mult_transpose1xW_width = 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000301{
Gian Marco36a0a462018-01-12 10:21:40 +0000302 // Note: mult_transpose1xW_width expresses the number of chunks with size 1x(W) we want to store on the same row
303 // The transpose1xW output matrix will have the following shape:
304 // [ b_height * W, ceil(b_width / W) ] where W = (16 / element size of the tensor) * mult_transpose1xW_width
305 ARM_COMPUTE_ERROR_ON(mult_transpose1xW_width < 1);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000306 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000307 const size_t transpose_width = (16 / b.element_size()) * mult_transpose1xW_width;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000308 shape_transposed1xW_b.set(0, b.dimension(1) * transpose_width);
309 shape_transposed1xW_b.set(1, static_cast<size_t>(std::ceil(b.dimension(0) / static_cast<float>(transpose_width))));
310
311 return shape_transposed1xW_b;
312}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100313
Michalis Spyroud33fe342019-01-04 17:10:25 +0000314/** Calculate the reductionA shape used in GEMMLowp
315 *
316 * @param[in] b Input tensor info
317 *
318 * @return the calculated shape
319 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000320inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
321{
322 TensorShape shape_vector_sum_col{ b.tensor_shape() };
323 if(shape_vector_sum_col.num_dimensions() > 1)
324 {
325 shape_vector_sum_col.remove_dimension(1);
326 }
327
328 return shape_vector_sum_col;
329}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100330
Michalis Spyroud33fe342019-01-04 17:10:25 +0000331/** Calculate the reductionB shape used in GEMMLowp
332 *
333 * @param[in] a Input tensor info
334 *
335 * @return the calculated shape
336 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000337inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
338{
339 TensorShape shape_vector_sum_row{ a.tensor_shape() };
340 shape_vector_sum_row.set(Window::DimX, a.dimension(1));
Georgios Pinitas932491f2018-09-21 16:33:15 +0100341 if(shape_vector_sum_row.num_dimensions() > 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000342 {
343 shape_vector_sum_row.remove_dimension(1);
344 }
345
346 return shape_vector_sum_row;
347}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100348
Michalis Spyroud33fe342019-01-04 17:10:25 +0000349/** Calculate the Col2Im shape
350 *
351 * @param[in] input Input tensor info
352 * @param[in] convolved_dims Convolved dimensions
353 * @param[in] batch_size_on_z True if batch size is on z axis
354 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
355 *
356 * @return the calculated shape
357 */
Giorgio Arena226e4b92018-08-23 12:00:02 +0100358inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &convolved_dims, bool batch_size_on_z, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000359{
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100360 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arena226e4b92018-08-23 12:00:02 +0100361 ARM_COMPUTE_ERROR_ON(input.tensor_shape()[1] != (convolved_dims.area()));
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100362 ARM_COMPUTE_ERROR_ON((num_groups > 1) && input.tensor_shape()[2] != num_groups);
363
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100364 const DataLayout data_layout = input.data_layout();
365 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
366 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
367 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100368
Georgios Pinitase55b40a2018-09-13 17:20:04 +0100369 TensorShape col2im_shape{ input.tensor_shape() };
370 // If batches start on 3rd dimension shift dimensions right by 1 to retain upper tensor shape,
371 // as first three will be override by H,W,C data
372 if(batch_size_on_z && num_groups == 1)
373 {
374 col2im_shape.shift_right(1);
375 }
376 col2im_shape.set(width_idx, convolved_dims.width);
377 col2im_shape.set(height_idx, convolved_dims.height);
378 col2im_shape.set(channel_idx, input.tensor_shape()[0] * num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000379
380 return col2im_shape;
381}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100382
Michalis Spyroud33fe342019-01-04 17:10:25 +0000383/** Calculate the transposed shape of a tensor
384 *
385 * @param[in] input Input tensor info
386 *
387 * @return the calculated shape
388 */
Georgios Pinitas358ca202017-12-07 16:47:52 +0000389inline TensorShape compute_transposed_shape(const ITensorInfo &input)
390{
391 TensorShape shape_transposed{ input.tensor_shape() };
392
393 shape_transposed.set(0, input.dimension(1));
394 shape_transposed.set(1, input.dimension(0));
395
396 return shape_transposed;
397}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100398
Michalis Spyroud33fe342019-01-04 17:10:25 +0000399/** Calculate the depthwise convolution output shape of a tensor
400 *
401 * @param[in] input Input tensor info
402 * @param[in] weights Weights tensor info
403 * @param[in] conv_info Padding and stride information to use for the convolution.
404 * @param[in] depth_multiplier Multiplier to apply to the input's depth in order to retrieve the output's depth.
405 *
406 * @return the calculated shape
407 */
Giorgio Arena76572242018-04-04 17:44:26 +0100408inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info, unsigned int depth_multiplier)
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000409{
410 const TensorShape input_shape{ input.tensor_shape() };
411 const TensorShape weights_shape{ weights.tensor_shape() };
412
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000413 const DataLayout data_layout = input.data_layout();
414 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
415 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Giorgio Arena76572242018-04-04 17:44:26 +0100416 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000417
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000418 unsigned int output_width = 0;
419 unsigned int output_height = 0;
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000420 std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
421 weights_shape[width_idx], weights_shape[height_idx],
Georgios Pinitasd05dce42018-01-22 16:29:17 +0000422 conv_info);
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000423
424 TensorShape output_shape{ input_shape };
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000425 output_shape.set(width_idx, output_width);
426 output_shape.set(height_idx, output_height);
Giorgio Arena76572242018-04-04 17:44:26 +0100427 output_shape.set(channel_idx, input_shape[channel_idx] * depth_multiplier);
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000428
429 return output_shape;
430}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100431
giuros016d109962019-01-07 17:47:19 +0000432/** Calculate the depthwise convolution output shape of a tensor
433 *
434 * @param[in] input Input tensor info
435 * @param[in] weights_width Weights width
436 * @param[in] weights_height Weights height
437 * @param[in] conv_info Padding and stride information to use for the convolution.
438 * @param[in] depth_multiplier Multiplier to apply to the input's depth in order to retrieve the output's depth.
439 *
440 * @return the calculated shape
441 */
442inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, int weights_width, int weights_height, PadStrideInfo conv_info, unsigned int depth_multiplier)
443{
444 const TensorShape input_shape{ input.tensor_shape() };
445
446 const DataLayout data_layout = input.data_layout();
447 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
448 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
449 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
450
451 unsigned int output_width = 0;
452 unsigned int output_height = 0;
453 std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
454 weights_width, weights_width, conv_info);
455
456 TensorShape output_shape{ input_shape };
457 output_shape.set(width_idx, output_width);
458 output_shape.set(height_idx, output_height);
459 output_shape.set(channel_idx, input_shape[channel_idx] * depth_multiplier);
460
461 return output_shape;
462}
463
Michalis Spyroud33fe342019-01-04 17:10:25 +0000464/** Calculate the upsampled output shape used for deconvolution
465 *
466 * @param[in] input Input tensor info
467 * @param[in] weights Weights tensor shape
468 * @param[in] sx Stride on x axis
469 * @param[in] sy Stride on y axis
470 * @param[in] inner_border_right The number of zeros added to right edge of the input.
471 * @param[in] inner_border_top The number of zeros added to top edge of the input.
472 * @param[in] out_dims Output shape dimensions
473 * @param[in] padx Padding on x axis
474 * @param[in] pady Padding on y axis
475 *
476 * @return the calculated shape
477 */
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100478inline TensorShape compute_deconvolution_upsampled_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy, unsigned int inner_border_right,
479 unsigned int inner_border_top,
480 std::pair<unsigned int, unsigned int> &out_dims, unsigned int &padx, unsigned int &pady)
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000481{
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100482 const DataLayout data_layout = input.data_layout();
483 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
484 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
485
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100486 // Find the upsampled dimensions
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100487 unsigned int out_x = (input.dimension(idx_w) - 1) * sx + inner_border_right + 1;
488 unsigned int out_y = (input.dimension(idx_h) - 1) * sy + inner_border_top + 1;
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100489
490 // Find the padding needed for the convolution with stride 1 in order to match output shape
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100491 padx = out_dims.first - (out_x - weights.dimension(idx_w) + 1);
492 pady = out_dims.second - (out_y - weights.dimension(idx_h) + 1);
Michalis Spyrouafbc5ff2018-10-03 14:18:19 +0100493 out_x += padx;
494 out_y += pady;
495
496 TensorShape scale_out_shape(input.tensor_shape());
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100497 scale_out_shape.set(idx_w, out_x);
498 scale_out_shape.set(idx_h, out_y);
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000499
500 return scale_out_shape;
501}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100502
Michalis Spyroud33fe342019-01-04 17:10:25 +0000503/** Calculate the output shape of the deconvolution layer
504 *
505 * @param[in] out_dims Output x and y shape dimensions
506 * @param[in] input Input tensor info
507 * @param[in] weights Weights tensor shape
508 *
509 * @return the calculated shape
510 */
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100511inline TensorShape compute_deconvolution_output_shape(const std::pair<unsigned int, unsigned int> &out_dims, const ITensorInfo &input, const ITensorInfo &weights)
512{
513 const TensorShape input_shape{ input.tensor_shape() };
514 const TensorShape weights_shape{ weights.tensor_shape() };
515
516 const DataLayout data_layout = input.data_layout();
517 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
518 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
519 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
520 const int batch_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
521
522 TensorShape out_shape{ input_shape };
523 out_shape.set(width_idx, out_dims.first);
524 out_shape.set(height_idx, out_dims.second);
525 out_shape.set(channel_idx, weights_shape[batch_idx]);
526 return out_shape;
527}
528
Michalis Spyroud33fe342019-01-04 17:10:25 +0000529/** Calculate the im2col output shape of a tensor
530 *
531 * @param[in] input Input tensor info
532 * @param[in] kernel_dims The kernel dimensions (width and height).
533 * @param[in] conv_info Contains padding and stride information
534 * @param[in] has_bias In case biases are provided expands the matrix with 1
535 * @param[in] dilation Dilation, in elements, across x and y
536 * @param[in] batch_size_on_z True if batch size is on z axis
537 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
538 *
539 * @return the calculated shape
540 */
Giorgio Arena0f170392018-07-18 16:13:12 +0100541inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, bool batch_size_on_z,
542 unsigned int num_groups = 1)
Giorgio Arena156fcf32018-03-09 15:30:43 +0000543{
Giorgio Arena0f170392018-07-18 16:13:12 +0100544 // The output shape will be the 3D shape [ out_channels * kernel_area, num_elems_per_out_channel, batches ] if batch_size_on_z == true
545 // or the 4D shape [ out_channels * kernel_area / num_groups, num_elems_per_out_channel, num_groups, batches ] if batch_size_on_z == false
546
547 ARM_COMPUTE_ERROR_ON(num_groups == 0);
548 ARM_COMPUTE_ERROR_ON(num_groups > 1 && input->data_layout() != DataLayout::NCHW);
549 ARM_COMPUTE_ERROR_ON(num_groups > 1 && batch_size_on_z);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000550
551 TensorShape output_shape{ input->tensor_shape() };
552
553 const DataLayout data_layout = input->data_layout();
554 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
555 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
556 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
557
558 std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
Giorgio Arena0f170392018-07-18 16:13:12 +0100559 output_shape.set(0, (output_shape[channel_idx] / num_groups * kernel_dims.area() + (has_bias ? 1 : 0))); // NOLINT
Giorgio Arenaf485a102018-04-20 16:06:21 +0100560 output_shape.set(1, (out_dims.first * out_dims.second));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100561 if(batch_size_on_z && output_shape.num_dimensions() >= 3)
562 {
563 output_shape.remove_dimension(2);
564 }
565 else
566 {
Giorgio Arena0f170392018-07-18 16:13:12 +0100567 output_shape.set(2, num_groups);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100568 }
Giorgio Arena156fcf32018-03-09 15:30:43 +0000569
570 return output_shape;
571}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100572
Michalis Spyroud33fe342019-01-04 17:10:25 +0000573/** Calculate the flattened output shape of a tensor
574 *
575 * @param[in] input Input tensor info
576 *
577 * @return the calculated shape
578 */
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100579inline TensorShape compute_flatten_shape(const ITensorInfo *input)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000580{
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100581 // The output shape will be the flatten version of the input (i.e. [ width * height * channels, num_batches, ... ] ). Used for FlattenLayer and FullyConnectedLayer.
582
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000583 TensorShape output_shape{ input->tensor_shape() };
584
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100585 output_shape.collapse(3);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000586
587 return output_shape;
588}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100589
Michalis Spyroud33fe342019-01-04 17:10:25 +0000590/** Calculate the softmax output shape of a tensor
591 *
592 * @param[in] input Input tensor info
593 * @param[in] axis (Optional) Softmax axis
594 *
595 * @return the calculated shape
596 */
giuros01efbf6c82018-09-03 09:53:53 +0100597inline TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis = 1)
598{
599 // The output shape will be a 2D version of the input. For instance:
600 // - [x,y,z] and axis 1 will return [x, y*z]
601 // - [x,y,z,w] and axis 2 will return [x*y, w*z]
602 // - [x,y,z,w] and axis 3 will return [x*y*z, w]
603 TensorShape shape2D = input->tensor_shape();
604
605 if(axis < input->num_dimensions())
606 {
607 // Collapse from axis onward (this changes the shape)
608 shape2D.collapse_from(axis);
609
610 // Collapse the rest (collapse is inclusive)
611 shape2D.collapse(shape2D.num_dimensions() - 1);
612 }
613 else
614 {
615 // Collapse everything
616 shape2D.collapse(shape2D.num_dimensions());
617 }
618
619 if(axis == 0)
620 {
621 // If axis is zero the first dim should be one. Since
622 // collapse is an inclusive operation we need to shift
623 shape2D.shift_right(1);
624 }
625
626 return shape2D;
627}
628
Michalis Spyroud33fe342019-01-04 17:10:25 +0000629/** Calculate the winograd filter transform shape
630 *
631 * @param[in] input Input tensor info
632 * @param[in] winograd_info Winograd information
633 *
634 * @return the calculated shape
635 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000636inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000637{
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000638 TensorShape tensor_shape{ input.tensor_shape() };
639
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000640 const Size2D kernel_size = winograd_info.kernel_size;
641 const Size2D output_tile_size = winograd_info.output_tile_size;
642 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
Giorgio Arena2d9de0a2018-03-15 17:58:20 +0000643
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000644 tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
645 tensor_shape.set(Window::DimX, input.dimension(3));
646 tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
647 tensor_shape.set(Window::DimZ, input_tile_size.area());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000648
649 return tensor_shape;
650}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100651
Michalis Spyroud33fe342019-01-04 17:10:25 +0000652/** Calculate the winograd input transform shape
653 *
654 * @param[in] input Input tensor info
655 * @param[in] winograd_info Winograd information
656 *
657 * @return the calculated shape
658 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000659inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000660{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000661 const PadStrideInfo conv_info = winograd_info.convolution_info;
662 const Size2D kernel_size = winograd_info.kernel_size;
663 const Size2D output_tile_size = winograd_info.output_tile_size;
664 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
665
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100666 const size_t idx_w = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
667 const size_t idx_h = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
668 const size_t idx_c = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000669
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100670 // Compute the number of output tiles along the x and y direction of size "output_tile_size"
671 const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input.tensor_shape()[idx_w], input.tensor_shape()[idx_h]),
672 kernel_size,
673 output_tile_size,
674 conv_info);
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100675
676 const unsigned int width = input.tensor_shape()[idx_c];
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100677 const unsigned int height = num_tiles.area();
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000678 const unsigned int depth = input_tile_size.area();
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000679
680 TensorShape output_shape{ input.tensor_shape() };
681 output_shape.set(0, width);
682 output_shape.set(1, height);
683 output_shape.set(2, depth);
684
685 return output_shape;
686}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100687
Michalis Spyroud33fe342019-01-04 17:10:25 +0000688/** Calculate the winograd output transform shape
689 *
690 * @param[in] input Input tensor info
691 * @param[in] winograd_info Winograd information
692 *
693 * @return the calculated shape
694 */
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000695inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000696{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000697 const PadStrideInfo conv_info = winograd_info.convolution_info;
698 const Size2D kernel_size = winograd_info.kernel_size;
699 const Size2D input_dimensions = winograd_info.input_dimensions;
700 const DataLayout data_layout = winograd_info.output_data_layout;
701
702 // Compute output shape
703 unsigned int output_width = 0;
704 unsigned int output_height = 0;
705 std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
706 kernel_size.width, kernel_size.height, conv_info);
707
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000708 TensorShape tensor_shape{ input.tensor_shape() };
709
710 // Output dimension
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000711 const unsigned int out_w = output_width;
712 const unsigned int out_h = output_height;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000713 const unsigned int out_c = input.dimension(0);
714
715 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w);
716 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT), out_h);
717 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL), out_c);
718
719 return tensor_shape;
720}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100721
Michalis Spyroud33fe342019-01-04 17:10:25 +0000722/** Calculate the deep convolution shape output shape of a tensor
723 *
724 * @param[in] input Input tensor info
725 * @param[in] weights Weights tensor info
726 * @param[in] conv_info Contains padding and stride information
727 *
728 * @return the calculated shape
729 */
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000730inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
731{
732 const TensorShape input_shape{ input.tensor_shape() };
733 const TensorShape weights_shape{ weights.tensor_shape() };
734
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000735 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
736 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
737 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
738
Giorgio Arenac0f54432018-03-16 14:02:34 +0000739 const unsigned int input_width = input_shape[idx_width];
740 const unsigned int input_height = input_shape[idx_height];
741 const unsigned int weights_width = weights_shape[idx_width];
742 const unsigned int weights_height = weights_shape[idx_height];
743 const unsigned int weights_out_channel = weights_shape[3];
744 unsigned int output_width = 0;
745 unsigned int output_height = 0;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000746 std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000747
748 TensorShape output_shape{ input_shape };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000749 output_shape.set(idx_width, output_width);
750 output_shape.set(idx_height, output_height);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000751 output_shape.set(idx_channel, weights_out_channel);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000752
753 return output_shape;
754}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100755
Michalis Spyroud33fe342019-01-04 17:10:25 +0000756/** Calculate the min/max shape output shape of a tensor
757 *
758 * @param[in] input Input tensor info
759 *
760 * @return the calculated shape
761 */
Alex Gilday60954c62018-03-05 16:22:48 +0000762inline TensorShape compute_min_max_shape(const ITensorInfo *input)
763{
764 TensorShape output_shape{ input->tensor_shape() };
765 output_shape.set(Window::DimX, 2);
766 output_shape.remove_dimension(1);
767 output_shape.remove_dimension(1);
768
769 return output_shape;
770}
771
Michalis Spyroud33fe342019-01-04 17:10:25 +0000772/** Calculate the output pool shape of a tensor
773 *
774 * @param[in] input Input tensor info
775 * @param[in] pool_info Pooling layer info
776 *
777 * @return the calculated shape
778 */
Michalis Spyroue74b2012018-04-18 09:49:16 +0100779inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
780{
781 unsigned int pooled_w = 0;
782 unsigned int pooled_h = 0;
783
Giorgio Arena3c520c52018-05-01 11:47:24 +0100784 TensorShape output_shape{ input.tensor_shape() };
Michalis Spyroue74b2012018-04-18 09:49:16 +0100785
Giorgio Arena3c520c52018-05-01 11:47:24 +0100786 const bool is_global_pooling = pool_info.is_global_pooling();
787 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
788 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
789 const unsigned int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size().width;
790 const unsigned int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size().height;
791
792 std::tie(pooled_w, pooled_h) = scaled_dimensions(output_shape[idx_width],
793 output_shape[idx_height],
Michalis Spyroue74b2012018-04-18 09:49:16 +0100794 pool_size_x,
795 pool_size_y,
796 pool_info.pad_stride_info());
797
Giorgio Arena3c520c52018-05-01 11:47:24 +0100798 output_shape.set(idx_width, pooled_w);
799 output_shape.set(idx_height, pooled_h);
Michalis Spyroue74b2012018-04-18 09:49:16 +0100800
801 return output_shape;
802}
803
George Wort44b4e972019-01-08 11:41:54 +0000804/** Calculate the output roi align shape of a tensor
805 *
806 * @param[in] input Input tensor info
807 * @param[in] rois Rois tensor info
808 * @param[in] pool_info Pooling layer info
809 *
810 * @return the calculated shape
811 */
812inline TensorShape compute_roi_align_shape(const ITensorInfo &input, const ITensorInfo &rois, ROIPoolingLayerInfo pool_info)
813{
814 TensorShape output_shape{ input.tensor_shape() };
815
816 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
817 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
818
819 output_shape.set(idx_width, pool_info.pooled_width());
820 output_shape.set(idx_height, pool_info.pooled_height());
821 output_shape.set(3, rois.dimension(1));
822
823 return output_shape;
824}
825
Michalis Spyroud33fe342019-01-04 17:10:25 +0000826/** Calculate the RNN shape of a tensor
827 *
828 * @param[in] input Input tensor info
829 * @param[in] batch_size Batch size
830 *
831 * @return the calculated shape
832 */
Michalis Spyrou36a559e2018-03-20 10:30:58 +0000833inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
834{
835 TensorShape output_shape{ input->tensor_shape() };
836 output_shape.set(1, batch_size);
837
838 return output_shape;
839}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100840
Michalis Spyroud33fe342019-01-04 17:10:25 +0000841/** Calculate the matrix multiplication output shape of two tensors
842 *
843 * @param[in] input0 First input tensor info
844 * @param[in] input1 Second input tensor info
845 * @param[in] is_interleaved_transposed True if the input is interleaved transposed
846 * @param[in] reshape_info GEMM reshape info
847 *
848 * @return the calculated shape
849 */
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100850inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
851{
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000852 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100853 ARM_COMPUTE_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The first input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100854
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100855 const bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000856 const bool reinterpret_output_as_3d = reshape_info.depth_output_gemm3d() != 0;
857 const int depth_output_gemm3d = reinterpret_output_as_3d ? reshape_info.depth_output_gemm3d() : 1;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100858 const int m = reshape_info.reinterpret_input_as_3d() ? input0.dimension(1) * input0.dimension(2) : input0.dimension(1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000859
860 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
861 // dimension of the output tensor
862 const int dim0 = is_interleaved_transposed ? reshape_info.n() : input1.dimension(0);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000863 const int dim1 = is_interleaved_transposed ? reshape_info.m() / depth_output_gemm3d : m / depth_output_gemm3d;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100864 const int dim2 = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
865 const int dim3 = reinterpret_input_as_3d ? 1 : input0.tensor_shape()[3];
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000866
867 TensorShape output_shape{ input0.tensor_shape() };
868
869 output_shape.set(0, dim0);
870 output_shape.set(1, dim1);
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000871 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : dim2);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100872 output_shape.set(3, reinterpret_output_as_3d ? dim2 : dim3);
873 output_shape.set(4, reinterpret_output_as_3d ? dim3 : 1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000874
875 return output_shape;
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100876}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100877
Michalis Spyroud33fe342019-01-04 17:10:25 +0000878/** Calculate the matrix multiplication output shape of two tensors
879 *
880 * @param[in] input0 First input tensor info
881 * @param[in] input1 Second input tensor info
882 * @param[in] gemm_info GEMM reshape info
883 *
884 * @return the calculated shape
885 */
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000886inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, const GEMMReshapeInfo &gemm_info)
887{
888 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
889
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000890 const bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000891 const bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d() != 0;
892 const int depth_output_gemm3d = reinterpret_output_as_3d ? gemm_info.depth_output_gemm3d() : 1;
893
894 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
895 // dimension of the output tensor
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000896 const int batch_size = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000897
898 TensorShape output_shape{ input0.tensor_shape() };
899
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000900 output_shape.set(0, gemm_info.n());
901 output_shape.set(1, gemm_info.m() / depth_output_gemm3d);
902 output_shape.set(2, reinterpret_output_as_3d ? depth_output_gemm3d : batch_size);
903 output_shape.set(3, reinterpret_output_as_3d ? batch_size : 1);
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000904
905 return output_shape;
906}
907
Michalis Spyroud33fe342019-01-04 17:10:25 +0000908/** Calculate the matrix multiplication output shape of two tensors
909 *
910 * @param[in] input Input tensor info
911 * @param[in] gemm_3d_depth (Optional) GEMM 3d depth
912 * @param[in] batch_size_on_z (Optional) True if batch size is on z axis
913 *
914 * @return the calculated shape
915 */
Georgios Pinitas932491f2018-09-21 16:33:15 +0100916inline TensorShape compute_output_stage_shape(const ITensorInfo &input, unsigned int gemm_3d_depth = 1, bool batch_size_on_z = false)
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100917{
918 ARM_COMPUTE_ERROR_ON(input.data_layout() != DataLayout::NHWC && gemm_3d_depth > 1);
919
920 TensorShape output_shape = input.tensor_shape();
921 if(gemm_3d_depth > 1)
922 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100923 if(batch_size_on_z)
924 {
925 output_shape.shift_right(1);
926 }
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100927 output_shape.set(0, input.tensor_shape().x());
928 output_shape.set(1, input.tensor_shape().y() / gemm_3d_depth);
929 output_shape.set(2, gemm_3d_depth);
930 }
931
932 return output_shape;
933}
934
Michalis Spyroud33fe342019-01-04 17:10:25 +0000935/** Calculate the strided slice output shape of a tensor
936 *
937 * @param[in] input Input tensor info
938 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
939 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
940 * @param[in] strides The strides of the dimensions of the input tensor to be sliced
941 * @param[in] begin_mask If the ith bit of begin_mask is set, starts[i] is ignored and the fullest possible range in that dimension is used instead.
942 * @param[in] end_mask If the ith bit of end_mask is set, ends[i] is ignored and the fullest possible range in that dimension is used instead.
943 * @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1
944 *
945 * @return the calculated shape
946 */
Georgios Pinitas77589b52018-08-21 14:41:35 +0100947inline TensorShape compute_strided_slice_shape(const ITensorInfo &input,
948 const Coordinates &starts, const Coordinates &ends, const Coordinates &strides,
949 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
950{
951 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitasb4af2c62018-12-10 18:45:35 +0000952 return compute_strided_slice_output_shape(input.tensor_shape(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
953}
Georgios Pinitas77589b52018-08-21 14:41:35 +0100954
Michalis Spyroud33fe342019-01-04 17:10:25 +0000955/** Calculate the slice output shape of a tensor
956 *
957 * @param[in] input_shape Input tensor info
958 * @param[in] starts The starts of the dimensions of the input tensor to be sliced
959 * @param[in] ends The ends of the dimensions of the input tensor to be sliced
960 *
961 * @return the calculated shape
962 */
Georgios Pinitasb4af2c62018-12-10 18:45:35 +0000963inline TensorShape compute_slice_shape(const TensorShape &input_shape, const Coordinates &starts, const Coordinates &ends)
964{
965 using namespace arm_compute::helpers::tensor_transform;
Georgios Pinitas77589b52018-08-21 14:41:35 +0100966
Georgios Pinitasb4af2c62018-12-10 18:45:35 +0000967 return compute_strided_slice_output_shape(input_shape,
968 starts, ends, BiStrides(),
969 0, construct_slice_end_mask(ends), 0);
Georgios Pinitas77589b52018-08-21 14:41:35 +0100970}
Georgios Pinitase1a352c2018-09-03 12:42:19 +0100971
Michalis Spyroud33fe342019-01-04 17:10:25 +0000972/** Calculate the batch to space output shape of a tensor
973 *
974 * @param[in] input Input tensor info
975 * @param[in] block_x Block shape x value
976 * @param[in] block_y Block shape y value
977 *
978 * @return the calculated shape
979 */
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +0100980inline TensorShape compute_batch_to_space_shape(const ITensorInfo *input, const int block_x, const int block_y)
981{
982 ARM_COMPUTE_ERROR_ON(block_x <= 0 || block_y <= 0);
Michalis Spyrouf1addb62018-09-11 11:16:47 +0100983
984 const DataLayout data_layout = input->data_layout();
985 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
986 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Michalis Spyrou13a51e12018-09-18 13:09:30 +0100987 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyrouf1addb62018-09-11 11:16:47 +0100988
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +0100989 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrouf1addb62018-09-11 11:16:47 +0100990 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x);
991 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y);
Michalis Spyrou13a51e12018-09-18 13:09:30 +0100992 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y));
Michalis Spyrou6a8d3b62018-08-31 10:07:09 +0100993
994 return output_shape;
995}
Georgios Pinitas77589b52018-08-21 14:41:35 +0100996
Michalis Spyroud33fe342019-01-04 17:10:25 +0000997/** Calculate the split output shape of a tensor
998 *
999 * @param[in] input Input tensor info
1000 * @param[in] axis Axis on which to split the input
1001 * @param[in] num_splits Number of splits
1002 *
1003 * @return the calculated shape
1004 */
Georgios Pinitase1a352c2018-09-03 12:42:19 +01001005inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int axis, unsigned int num_splits)
1006{
1007 TensorShape empty_shape;
1008 empty_shape.set(0, 0);
1009
1010 TensorShape out_shape{ input->tensor_shape() };
1011
1012 // Return empty shape if axis is invalid
1013 if(axis > input->tensor_shape().num_dimensions())
1014 {
1015 return empty_shape;
1016 }
1017
1018 size_t axis_size = out_shape[axis];
1019
1020 // Return empty shape if num_split is not valid
1021 if(axis_size % num_splits)
1022 {
1023 return empty_shape;
1024 }
1025
1026 out_shape[axis] = axis_size / num_splits;
1027 return out_shape;
1028}
1029
Michalis Spyroud33fe342019-01-04 17:10:25 +00001030/** Calculate the space to batch output shape of a tensor
1031 *
1032 * @param[in] input Input tensor info
1033 * @param[in] block_x Block shape x value
1034 * @param[in] block_y Block shape y value
1035 * @param[in] padding_left Left padding values
1036 * @param[in] padding_right Right padding values
1037 *
1038 * @return the calculated shape
1039 */
Michalis Spyrou16934a52018-08-21 18:03:58 +01001040inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const int block_x, const int block_y, const Size2D &padding_left, const Size2D &padding_right)
1041{
1042 TensorShape output_shape{ input->tensor_shape() };
Michalis Spyrou13a51e12018-09-18 13:09:30 +01001043
1044 const DataLayout data_layout = input->data_layout();
1045 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1046 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1047 const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
1048
1049 output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x + padding_left.x() + padding_right.x());
1050 output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y + padding_left.y() + padding_right.y());
1051 output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y));
Michalis Spyrou16934a52018-08-21 18:03:58 +01001052
1053 return output_shape;
1054}
Pablo Tello32521432018-11-15 14:43:10 +00001055
Michalis Spyroud33fe342019-01-04 17:10:25 +00001056/** Calculate the prior box output shape of a tensor
1057 *
1058 * @param[in] input Input tensor info
1059 * @param[in] info PriorBoxLayer info
1060 *
1061 * @return the calculated shape
1062 */
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001063inline TensorShape compute_prior_box_shape(const ITensorInfo &input, const PriorBoxLayerInfo &info)
1064{
1065 DataLayout data_layout = input.data_layout();
1066 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1067 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Pablo Tello32521432018-11-15 14:43:10 +00001068 const int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +01001069
1070 TensorShape output_shape{};
1071 output_shape.set(0, input.dimension(idx_w) * input.dimension(idx_h) * num_priors * 4);
1072 output_shape.set(1, 2);
1073
1074 return output_shape;
1075}
Michalis Spyrou16934a52018-08-21 18:03:58 +01001076
Michalis Spyroud33fe342019-01-04 17:10:25 +00001077/** Calculate the padded shape of a tensor
1078 *
1079 * @param[in] input_shape Input tensor shape
1080 * @param[in] padding Paddings list
1081 *
1082 * @return the calculated shape
1083 */
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001084inline TensorShape compute_padded_shape(const TensorShape &input_shape, const PaddingList &padding)
1085{
1086 TensorShape padded_shape = input_shape;
1087 for(size_t dim = 0; dim < padding.size(); ++dim)
1088 {
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +00001089 const auto &padding_pair = padding[dim];
1090 const uint32_t shape_on_index = (padded_shape.num_dimensions() <= dim) ? 1 : input_shape[dim];
1091 padded_shape.set(dim, padding_pair.first + shape_on_index + padding_pair.second);
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001092 }
1093 return padded_shape;
1094}
1095
Michalis Spyroud33fe342019-01-04 17:10:25 +00001096/** Calculate the tiled shape of a tensor
1097 *
1098 * @param[in] input_shape Input tensor shape
1099 * @param[in] multiples Paddings list
1100 *
1101 * @return the calculated shape
1102 */
giuros013175fcf2018-11-21 09:59:17 +00001103inline TensorShape compute_tiled_shape(const TensorShape &input_shape, const Multiples &multiples)
1104{
1105 TensorShape tiled_shape = input_shape;
1106 for(size_t dim = 0; dim < multiples.size(); ++dim)
1107 {
1108 tiled_shape.set(dim, input_shape[dim] * multiples[dim]);
1109 }
1110 return tiled_shape;
1111}
1112
Michalis Spyrouaea14c62019-01-03 11:10:25 +00001113/** Calculate the reduced shape of a tensor given an axis
1114 *
1115 * @param[in] input Input tensor info
1116 * @param[in] axis Axis on which to perform reduction
1117 *
1118 * @return the calculated shape
1119 */
1120inline TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis)
1121{
1122 TensorShape output_shape{ input };
1123 output_shape.set(axis, 1);
1124
1125 return output_shape;
1126}
1127
Michalis Spyroud33fe342019-01-04 17:10:25 +00001128/** Calculate the upsampled shape of a tensor
1129 *
1130 * @param[in] input Input tensor info
1131 * @param[in] info Contains stride information (x and y)
1132 *
1133 * @return the calculated shape
1134 */
Michalis Spyrouceb889e2018-09-17 18:24:41 +01001135inline TensorShape compute_upsample_shape(const ITensorInfo &input, const Size2D &info)
1136{
1137 const DataLayout data_layout = input.data_layout();
1138 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
1139 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
1140
1141 TensorShape scale_out_shape(input.tensor_shape());
1142 const unsigned int out_x = input.dimension(idx_width) * info.x();
1143 const unsigned int out_y = input.dimension(idx_height) * info.y();
1144 scale_out_shape.set(idx_width, out_x);
1145 scale_out_shape.set(idx_height, out_y);
1146
1147 return scale_out_shape;
1148}
1149
Michalis Spyroud33fe342019-01-04 17:10:25 +00001150/** Get the tensor shape
1151 *
1152 * @param[in] data Input data
1153 *
1154 * @return the extracted tensor shape
1155 */
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001156template <typename T>
Georgios Pinitase2220552018-07-20 13:23:44 +01001157inline TensorShape extract_shape(T *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001158{
Georgios Pinitase2220552018-07-20 13:23:44 +01001159 return data->info()->tensor_shape();
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001160}
1161
John Kesapidescafec8f2019-02-19 15:53:59 +00001162inline TensorShape extract_shape(ITensorInfo *data)
John Kesapides917959c2019-02-04 12:37:29 +00001163{
1164 return data->tensor_shape();
1165}
John Kesapidescafec8f2019-02-19 15:53:59 +00001166inline TensorShape extract_shape(const ITensorInfo *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001167{
Georgios Pinitase2220552018-07-20 13:23:44 +01001168 return data->tensor_shape();
1169}
1170
1171inline TensorShape extract_shape(const TensorShape *data)
1172{
1173 return *data;
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001174}
1175
Michalis Spyroua9c44722019-04-05 17:18:36 +01001176inline TensorShape extract_shape(TensorShape *data)
1177{
1178 return *data;
1179}
1180
Michalis Spyroud33fe342019-01-04 17:10:25 +00001181/** Calculate the unstack shape of a tensor
1182 *
1183 * @param[in] input_shape Input tensor shape
1184 * @param[in] axis Axis on which to perform the unstack operation
1185 *
1186 * @return the calculated shape
1187 */
Pablo Tello54303692018-11-22 16:14:36 +00001188inline TensorShape calculate_unstack_shape(TensorShape input_shape, unsigned int axis)
1189{
1190 ARM_COMPUTE_ERROR_ON(axis > input_shape.num_dimensions());
1191 input_shape.remove_dimension(axis);
1192 return input_shape;
1193}
1194
Pablo Tello3dd5b682019-03-04 14:14:02 +00001195/** Calculate the concatenate output shape of the concatenate operation along a single axis
Michalis Spyroud33fe342019-01-04 17:10:25 +00001196 *
Pablo Tello3dd5b682019-03-04 14:14:02 +00001197 * @param[in] input Vector containing the shapes of the inputs
1198 * @param[in] axis Axis along which to concatenate the input tensors
Michalis Spyroud33fe342019-01-04 17:10:25 +00001199 *
1200 * @return the calculated shape
1201 */
Georgios Pinitase29acf12018-07-16 14:40:09 +01001202template <typename T>
Pablo Tello3dd5b682019-03-04 14:14:02 +00001203inline TensorShape calculate_concatenate_shape(const std::vector<T *> &input, size_t axis)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001204{
Pablo Tello3dd5b682019-03-04 14:14:02 +00001205 TensorShape out_shape = extract_shape(input[0]);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001206
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001207#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001208 // All dimensions must match except the axis one
1209 for(unsigned int i = 0; i < MAX_DIMS; ++i)
1210 {
1211 if(i == axis)
1212 {
1213 continue;
1214 }
1215
1216 for(const auto &tensor : input)
1217 {
1218 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
1219 const TensorShape shape = extract_shape(tensor);
1220 ARM_COMPUTE_ERROR_ON(out_shape[i] != shape[i]);
1221 }
1222 }
Georgios Pinitasdcd949d2019-04-17 11:04:28 +01001223#endif // defined(ARM_COMPUTE_ASSERTS_ENABLED)
Michalis Spyroua9c44722019-04-05 17:18:36 +01001224
1225 // Calculate output shape
Pablo Tello3dd5b682019-03-04 14:14:02 +00001226 size_t new_size = 0;
1227 for(const auto &tensor : input)
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001228 {
Georgios Pinitase2220552018-07-20 13:23:44 +01001229 const TensorShape shape = extract_shape(tensor);
Pablo Tello3dd5b682019-03-04 14:14:02 +00001230 new_size += shape[axis];
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001231 }
1232
Pablo Tello3dd5b682019-03-04 14:14:02 +00001233 out_shape.set(axis, new_size);
Michalis Spyrou55b3d122018-05-09 09:59:23 +01001234
1235 return out_shape;
1236}
Michalis Spyroud33fe342019-01-04 17:10:25 +00001237/** Calculate the stack output shape of a tensor
1238 *
1239 * @param[in] a Input tensor info
1240 * @param[in] axis Axis on which to perform the stack operation
1241 * @param[in] num_tensors Number of tensors to stack
1242 *
1243 * @return the calculated shape
1244 */
Gian Marco Iodice8aa985e2018-11-27 15:58:08 +00001245inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis, unsigned int num_tensors)
1246{
1247 ARM_COMPUTE_ERROR_ON(axis > a.num_dimensions());
1248 ARM_COMPUTE_ERROR_ON(a.num_dimensions() > 4);
1249
1250 TensorShape shape_out{ a.tensor_shape() };
1251 shape_out.set(axis, num_tensors);
1252
1253 unsigned int i_shift = 0;
1254
1255 for(unsigned int i = 0; i < a.num_dimensions(); ++i)
1256 {
1257 if(i == axis)
1258 {
1259 i_shift++;
1260 }
1261
1262 shape_out.set(i + i_shift, a.tensor_shape()[i]);
1263 }
1264 return shape_out;
1265}
Manuel Bottini8529bd62018-11-21 11:53:04 +00001266
1267inline TensorShape compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis)
1268{
1269 ARM_COMPUTE_ERROR_ON(indices_shape.num_dimensions() > 1);
1270 ARM_COMPUTE_ERROR_ON(input_shape.num_dimensions() > 4);
1271 ARM_COMPUTE_ERROR_ON(actual_axis >= input_shape.num_dimensions());
1272
1273 TensorShape output_shape = input_shape;
1274 output_shape[actual_axis] = indices_shape[0];
1275
1276 return output_shape;
1277}
Georgios Pinitas358ca202017-12-07 16:47:52 +00001278} // namespace shape_calculator
1279} // namespace misc
1280} // namespace arm_compute
1281#endif /* __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__ */