blob: e0b6ce639fb10d2946e89c41ff30efa94cfc66b0 [file] [log] [blame]
Georgios Pinitas358ca202017-12-07 16:47:52 +00001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2017-2018 ARM Limited.
Georgios Pinitas358ca202017-12-07 16:47:52 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__
25#define __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__
26
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +000027#include "arm_compute/core/Helpers.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/ITensorInfo.h"
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000029#include "arm_compute/core/Utils.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000030
Georgios Pinitas77589b52018-08-21 14:41:35 +010031#include "arm_compute/core/utils/helpers/tensor_transform.h"
32
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000033#include <cmath>
34
Georgios Pinitas358ca202017-12-07 16:47:52 +000035namespace arm_compute
36{
37namespace misc
38{
39namespace shape_calculator
40{
Abe Mbise7784c832018-05-31 16:48:41 +010041inline TensorShape compute_vector_to_tensor_output_shape(const TensorShape &input, size_t conv_w, size_t conv_h, const DataLayout &data_layout)
42{
43 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
44 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
45 const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
46
47 TensorShape output_shape(input);
48 output_shape.set(idx_w, conv_w);
49 output_shape.set(idx_h, conv_h);
50 output_shape.set(idx_c, input.x() / (conv_w * conv_h));
51
52 return output_shape;
53}
Pablo Tello00afd112018-01-04 10:34:24 +000054inline TensorShape compute_permutation_output_shape(const ITensorInfo &input, const PermutationVector &perm)
55{
56 TensorShape output_shape = input.tensor_shape();
57 permute(output_shape, perm);
58 return output_shape;
59}
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010060inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +000061{
Giorgio Arena088c2b02018-08-07 16:59:05 +010062 // Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
Giorgio Arenac6aa49b2018-08-07 11:53:30 +010063 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +010064 ARM_COMPUTE_ERROR_ON(weights.data_layout() == DataLayout::NHWC && num_groups > 1);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010065 ARM_COMPUTE_ERROR_ON((weights.dimension(3) % num_groups) != 0);
Giorgio Arenac6aa49b2018-08-07 11:53:30 +010066
Georgios Pinitas78c00902018-01-09 17:33:11 +000067 // Calculate output shape
68 TensorShape weights_reshaped{ weights.tensor_shape() };
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010069 weights_reshaped.set(3, weights_reshaped[3] / num_groups);
70
Georgios Pinitas78c00902018-01-09 17:33:11 +000071 weights_reshaped.collapse(3);
72 const size_t tmp_dim = weights_reshaped[0];
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010073 weights_reshaped.set(0, weights_reshaped[1]);
Georgios Pinitas78c00902018-01-09 17:33:11 +000074 weights_reshaped.set(1, tmp_dim + (has_bias ? 1 : 0));
Giorgio Arenac6aa49b2018-08-07 11:53:30 +010075 if(weights.num_dimensions() < 5)
76 {
77 weights_reshaped.set(2, num_groups);
78 }
Georgios Pinitas78c00902018-01-09 17:33:11 +000079
80 return weights_reshaped;
81}
Gian Marco Iodice68a3f562018-07-26 11:44:03 +010082inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
Georgios Pinitas358ca202017-12-07 16:47:52 +000083{
Gian Marco36a0a462018-01-12 10:21:40 +000084 // The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
85 ARM_COMPUTE_ERROR_ON(mult_interleave4x4_height < 1);
86 const int interleave_width = 4 * mult_interleave4x4_height;
Georgios Pinitas358ca202017-12-07 16:47:52 +000087 TensorShape shape_interleaved_a{ a.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +000088 shape_interleaved_a.set(0, a.dimension(0) * interleave_width);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +010089 if(reinterpret_input_as_3d)
90 {
91 const int M = a.dimension(1) * a.dimension(2);
92 const int height = std::ceil(M / static_cast<float>(interleave_width));
93 shape_interleaved_a.set(1, height);
94 shape_interleaved_a.remove_dimension(2);
95 }
96 else
97 {
98 shape_interleaved_a.set(1, std::ceil(a.dimension(1) / static_cast<float>(interleave_width)));
99 }
Georgios Pinitas358ca202017-12-07 16:47:52 +0000100
101 return shape_interleaved_a;
102}
103inline TensorShape compute_transpose1xW_shape(const ITensorInfo &b)
104{
105 // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
106 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
107 shape_transposed1xW_b.set(0, b.dimension(1) * 16);
108 shape_transposed1xW_b.set(1, std::ceil(b.dimension(0) / 16.f));
109
110 return shape_transposed1xW_b;
111}
Gian Marco36a0a462018-01-12 10:21:40 +0000112inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInfo &b, int mult_transpose1xW_width = 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000113{
Gian Marco36a0a462018-01-12 10:21:40 +0000114 // Note: mult_transpose1xW_width expresses the number of chunks with size 1x(W) we want to store on the same row
115 // The transpose1xW output matrix will have the following shape:
116 // [ b_height * W, ceil(b_width / W) ] where W = (16 / element size of the tensor) * mult_transpose1xW_width
117 ARM_COMPUTE_ERROR_ON(mult_transpose1xW_width < 1);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000118 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +0000119 const size_t transpose_width = (16 / b.element_size()) * mult_transpose1xW_width;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000120 shape_transposed1xW_b.set(0, b.dimension(1) * transpose_width);
121 shape_transposed1xW_b.set(1, static_cast<size_t>(std::ceil(b.dimension(0) / static_cast<float>(transpose_width))));
122
123 return shape_transposed1xW_b;
124}
125inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
126{
127 TensorShape shape_vector_sum_col{ b.tensor_shape() };
128 if(shape_vector_sum_col.num_dimensions() > 1)
129 {
130 shape_vector_sum_col.remove_dimension(1);
131 }
132
133 return shape_vector_sum_col;
134}
135inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
136{
137 TensorShape shape_vector_sum_row{ a.tensor_shape() };
138 shape_vector_sum_row.set(Window::DimX, a.dimension(1));
139 if(a.num_dimensions() > 1)
140 {
141 shape_vector_sum_row.remove_dimension(1);
142 }
143
144 return shape_vector_sum_row;
145}
Giorgio Arena226e4b92018-08-23 12:00:02 +0100146inline TensorShape compute_col2im_shape(const ITensorInfo &input, const Size2D &convolved_dims, bool batch_size_on_z, unsigned int num_groups = 1)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000147{
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100148 ARM_COMPUTE_ERROR_ON(num_groups == 0);
Giorgio Arena226e4b92018-08-23 12:00:02 +0100149 ARM_COMPUTE_ERROR_ON(input.tensor_shape()[1] != (convolved_dims.area()));
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100150 ARM_COMPUTE_ERROR_ON((num_groups > 1) && input.tensor_shape()[2] != num_groups);
151
Georgios Pinitas78c00902018-01-09 17:33:11 +0000152 TensorShape col2im_shape{ input.tensor_shape() };
Giorgio Arena226e4b92018-08-23 12:00:02 +0100153 col2im_shape.set(0, convolved_dims.width);
154 col2im_shape.set(1, convolved_dims.height);
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100155 col2im_shape.set(2, input.tensor_shape()[0] * num_groups);
156
Giorgio Arena226e4b92018-08-23 12:00:02 +0100157 const unsigned int batch_idx = (batch_size_on_z && num_groups == 1) ? 2 : 3;
Michele Di Giorgio980002b2018-08-08 09:25:51 +0100158 col2im_shape.set(3, input.tensor_shape()[batch_idx]);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000159
160 return col2im_shape;
161}
Georgios Pinitas358ca202017-12-07 16:47:52 +0000162inline TensorShape compute_transposed_shape(const ITensorInfo &input)
163{
164 TensorShape shape_transposed{ input.tensor_shape() };
165
166 shape_transposed.set(0, input.dimension(1));
167 shape_transposed.set(1, input.dimension(0));
168
169 return shape_transposed;
170}
Giorgio Arena76572242018-04-04 17:44:26 +0100171inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info, unsigned int depth_multiplier)
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000172{
173 const TensorShape input_shape{ input.tensor_shape() };
174 const TensorShape weights_shape{ weights.tensor_shape() };
175
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000176 const DataLayout data_layout = input.data_layout();
177 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
178 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Giorgio Arena76572242018-04-04 17:44:26 +0100179 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000180
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000181 unsigned int output_width = 0;
182 unsigned int output_height = 0;
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000183 std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
184 weights_shape[width_idx], weights_shape[height_idx],
Georgios Pinitasd05dce42018-01-22 16:29:17 +0000185 conv_info);
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000186
187 TensorShape output_shape{ input_shape };
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000188 output_shape.set(width_idx, output_width);
189 output_shape.set(height_idx, output_height);
Giorgio Arena76572242018-04-04 17:44:26 +0100190 output_shape.set(channel_idx, input_shape[channel_idx] * depth_multiplier);
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000191
192 return output_shape;
193}
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000194inline TensorShape compute_deconvolution_shape(const ITensorInfo &input, unsigned int sx, unsigned int sy, unsigned int inner_border_right, unsigned int inner_border_top, const PadStrideInfo &info)
195{
196 TensorShape scale_out_shape(input.tensor_shape());
197 const unsigned int out_x = input.dimension(0) + (input.dimension(0) - 1) * (sx - 1) + inner_border_right + 2 * info.pad().first;
198 const unsigned int out_y = input.dimension(1) + (input.dimension(1) - 1) * (sy - 1) + inner_border_top + 2 * info.pad().second;
199 scale_out_shape.set(0, out_x);
200 scale_out_shape.set(1, out_y);
201
202 return scale_out_shape;
203}
Giorgio Arena0f170392018-07-18 16:13:12 +0100204inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, bool batch_size_on_z,
205 unsigned int num_groups = 1)
Giorgio Arena156fcf32018-03-09 15:30:43 +0000206{
Giorgio Arena0f170392018-07-18 16:13:12 +0100207 // The output shape will be the 3D shape [ out_channels * kernel_area, num_elems_per_out_channel, batches ] if batch_size_on_z == true
208 // or the 4D shape [ out_channels * kernel_area / num_groups, num_elems_per_out_channel, num_groups, batches ] if batch_size_on_z == false
209
210 ARM_COMPUTE_ERROR_ON(num_groups == 0);
211 ARM_COMPUTE_ERROR_ON(num_groups > 1 && input->data_layout() != DataLayout::NCHW);
212 ARM_COMPUTE_ERROR_ON(num_groups > 1 && batch_size_on_z);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000213
214 TensorShape output_shape{ input->tensor_shape() };
215
216 const DataLayout data_layout = input->data_layout();
217 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
218 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
219 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
220
221 std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
Giorgio Arena0f170392018-07-18 16:13:12 +0100222 output_shape.set(0, (output_shape[channel_idx] / num_groups * kernel_dims.area() + (has_bias ? 1 : 0))); // NOLINT
Giorgio Arenaf485a102018-04-20 16:06:21 +0100223 output_shape.set(1, (out_dims.first * out_dims.second));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100224 if(batch_size_on_z && output_shape.num_dimensions() >= 3)
225 {
226 output_shape.remove_dimension(2);
227 }
228 else
229 {
Giorgio Arena0f170392018-07-18 16:13:12 +0100230 output_shape.set(2, num_groups);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100231 }
Giorgio Arena156fcf32018-03-09 15:30:43 +0000232
233 return output_shape;
234}
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100235inline TensorShape compute_flatten_shape(const ITensorInfo *input)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000236{
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100237 // The output shape will be the flatten version of the input (i.e. [ width * height * channels, num_batches, ... ] ). Used for FlattenLayer and FullyConnectedLayer.
238
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000239 TensorShape output_shape{ input->tensor_shape() };
240
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100241 output_shape.collapse(3);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000242
243 return output_shape;
244}
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000245inline TensorShape compute_interleave_custom_shape(const TensorShape &input, const int x_interleave, const int y_interleave)
246{
247 TensorShape output_shape{ input };
248
249 output_shape.set(0, output_shape.x() * x_interleave);
250 output_shape.set(1, std::ceil(output_shape.y() / static_cast<float>(y_interleave)));
251
252 return output_shape;
253}
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000254inline TensorShape compute_fully_connected_reshaped_weights_shape(const ITensorInfo *input, bool transpose_weights, bool is_batched_fc_layer, const int interleave)
255{
256 TensorShape output_shape{ input->tensor_shape() };
257
258 // Transpose weights if the user hasn't done it
259 if(transpose_weights)
260 {
261 output_shape = compute_transposed_shape(*input);
262 }
263
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000264 // If we run multiple batches we need 1xW transpose, too.
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000265 if(is_batched_fc_layer)
266 {
267 output_shape = compute_transposed_shape(input->clone()->set_tensor_shape(output_shape));
268 output_shape = compute_interleave_custom_shape(output_shape, interleave, interleave);
269 }
270
271 return output_shape;
272}
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000273
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000274inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000275{
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000276 TensorShape tensor_shape{ input.tensor_shape() };
277
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000278 const Size2D kernel_size = winograd_info.kernel_size;
279 const Size2D output_tile_size = winograd_info.output_tile_size;
280 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
Giorgio Arena2d9de0a2018-03-15 17:58:20 +0000281
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000282 tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
283 tensor_shape.set(Window::DimX, input.dimension(3));
284 tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
285 tensor_shape.set(Window::DimZ, input_tile_size.area());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000286
287 return tensor_shape;
288}
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000289inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000290{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000291 const PadStrideInfo conv_info = winograd_info.convolution_info;
292 const Size2D kernel_size = winograd_info.kernel_size;
293 const Size2D output_tile_size = winograd_info.output_tile_size;
294 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
295
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100296 const size_t idx_w = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
297 const size_t idx_h = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
298 const size_t idx_c = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000299
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100300 // Compute the number of output tiles along the x and y direction of size "output_tile_size"
301 const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input.tensor_shape()[idx_w], input.tensor_shape()[idx_h]),
302 kernel_size,
303 output_tile_size,
304 conv_info);
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100305
306 const unsigned int width = input.tensor_shape()[idx_c];
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100307 const unsigned int height = num_tiles.area();
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000308 const unsigned int depth = input_tile_size.area();
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000309
310 TensorShape output_shape{ input.tensor_shape() };
311 output_shape.set(0, width);
312 output_shape.set(1, height);
313 output_shape.set(2, depth);
314
315 return output_shape;
316}
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000317inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000318{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000319 const PadStrideInfo conv_info = winograd_info.convolution_info;
320 const Size2D kernel_size = winograd_info.kernel_size;
321 const Size2D input_dimensions = winograd_info.input_dimensions;
322 const DataLayout data_layout = winograd_info.output_data_layout;
323
324 // Compute output shape
325 unsigned int output_width = 0;
326 unsigned int output_height = 0;
327 std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
328 kernel_size.width, kernel_size.height, conv_info);
329
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000330 TensorShape tensor_shape{ input.tensor_shape() };
331
332 // Output dimension
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000333 const unsigned int out_w = output_width;
334 const unsigned int out_h = output_height;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000335 const unsigned int out_c = input.dimension(0);
336
337 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w);
338 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT), out_h);
339 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL), out_c);
340
341 return tensor_shape;
342}
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000343inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
344{
345 const TensorShape input_shape{ input.tensor_shape() };
346 const TensorShape weights_shape{ weights.tensor_shape() };
347
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000348 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
349 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
350 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
351
Giorgio Arenac0f54432018-03-16 14:02:34 +0000352 const unsigned int input_width = input_shape[idx_width];
353 const unsigned int input_height = input_shape[idx_height];
354 const unsigned int weights_width = weights_shape[idx_width];
355 const unsigned int weights_height = weights_shape[idx_height];
356 const unsigned int weights_out_channel = weights_shape[3];
357 unsigned int output_width = 0;
358 unsigned int output_height = 0;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000359 std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000360
361 TensorShape output_shape{ input_shape };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000362 output_shape.set(idx_width, output_width);
363 output_shape.set(idx_height, output_height);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000364 output_shape.set(idx_channel, weights_out_channel);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000365
366 return output_shape;
367}
Alex Gilday60954c62018-03-05 16:22:48 +0000368inline TensorShape compute_min_max_shape(const ITensorInfo *input)
369{
370 TensorShape output_shape{ input->tensor_shape() };
371 output_shape.set(Window::DimX, 2);
372 output_shape.remove_dimension(1);
373 output_shape.remove_dimension(1);
374
375 return output_shape;
376}
377
Michalis Spyroue74b2012018-04-18 09:49:16 +0100378inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
379{
380 unsigned int pooled_w = 0;
381 unsigned int pooled_h = 0;
382
Giorgio Arena3c520c52018-05-01 11:47:24 +0100383 TensorShape output_shape{ input.tensor_shape() };
Michalis Spyroue74b2012018-04-18 09:49:16 +0100384
Giorgio Arena3c520c52018-05-01 11:47:24 +0100385 const bool is_global_pooling = pool_info.is_global_pooling();
386 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
387 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
388 const unsigned int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size().width;
389 const unsigned int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size().height;
390
391 std::tie(pooled_w, pooled_h) = scaled_dimensions(output_shape[idx_width],
392 output_shape[idx_height],
Michalis Spyroue74b2012018-04-18 09:49:16 +0100393 pool_size_x,
394 pool_size_y,
395 pool_info.pad_stride_info());
396
Giorgio Arena3c520c52018-05-01 11:47:24 +0100397 output_shape.set(idx_width, pooled_w);
398 output_shape.set(idx_height, pooled_h);
Michalis Spyroue74b2012018-04-18 09:49:16 +0100399
400 return output_shape;
401}
402
Michalis Spyrou36a559e2018-03-20 10:30:58 +0000403inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
404{
405 TensorShape output_shape{ input->tensor_shape() };
406 output_shape.set(1, batch_size);
407
408 return output_shape;
409}
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100410inline TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
411{
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000412 ARM_COMPUTE_ERROR_ON_MSG(input0.num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100413 ARM_COMPUTE_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The first input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100414
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100415 const bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
416 const bool reinterpret_output_as_3d = reshape_info.depth_output_gemm3d() != 1;
417 const int m = reshape_info.reinterpret_input_as_3d() ? input0.dimension(1) * input0.dimension(2) : input0.dimension(1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000418
419 // If the output of GEMM has to be reinterpreted as 3D, the number of input0 rows (M) is obtained collapsing the second and third
420 // dimension of the output tensor
421 const int dim0 = is_interleaved_transposed ? reshape_info.n() : input1.dimension(0);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100422 const int dim1 = is_interleaved_transposed ? reshape_info.m() / reshape_info.depth_output_gemm3d() : m / reshape_info.depth_output_gemm3d();
423 const int dim2 = reinterpret_input_as_3d ? input0.tensor_shape()[3] : input0.tensor_shape()[2];
424 const int dim3 = reinterpret_input_as_3d ? 1 : input0.tensor_shape()[3];
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000425
426 TensorShape output_shape{ input0.tensor_shape() };
427
428 output_shape.set(0, dim0);
429 output_shape.set(1, dim1);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100430 output_shape.set(2, reinterpret_output_as_3d ? reshape_info.depth_output_gemm3d() : dim2);
431 output_shape.set(3, reinterpret_output_as_3d ? dim2 : dim3);
432 output_shape.set(4, reinterpret_output_as_3d ? dim3 : 1);
Isabella Gottardi8e74f442018-03-01 16:42:00 +0000433
434 return output_shape;
Gian Marco Iodice750641d2018-05-08 12:01:57 +0100435}
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100436
Georgios Pinitas77589b52018-08-21 14:41:35 +0100437inline TensorShape compute_strided_slice_shape(const ITensorInfo &input,
438 const Coordinates &starts, const Coordinates &ends, const Coordinates &strides,
439 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
440{
441 using namespace arm_compute::helpers::tensor_transform;
442
443 const TensorShape &input_shape = input.tensor_shape();
444
445 // Get actual start, end coordinates and strides
446 const Coordinates final_strides = strided_slice_strides(input_shape, strides);
447 const Coordinates starts_abs = strided_slice_absolute_start_coords(input_shape, starts, final_strides, begin_mask);
448 const Coordinates ends_abs = strided_slice_absolute_end_coords(input_shape, starts_abs, ends, final_strides, end_mask, shrink_axis_mask);
449
450 return compute_strided_slice_output_shape(input_shape, starts_abs, ends_abs, final_strides);
451}
452
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100453template <typename T>
Georgios Pinitase2220552018-07-20 13:23:44 +0100454inline TensorShape extract_shape(T *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100455{
Georgios Pinitase2220552018-07-20 13:23:44 +0100456 return data->info()->tensor_shape();
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100457}
458
Georgios Pinitase2220552018-07-20 13:23:44 +0100459inline TensorShape extract_shape(ITensorInfo *data)
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100460{
Georgios Pinitase2220552018-07-20 13:23:44 +0100461 return data->tensor_shape();
462}
463
464inline TensorShape extract_shape(const TensorShape *data)
465{
466 return *data;
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100467}
468
469template <typename T>
Georgios Pinitase29acf12018-07-16 14:40:09 +0100470inline TensorShape calculate_depth_concatenate_shape(const std::vector<T *> &inputs_vector)
471{
Georgios Pinitase2220552018-07-20 13:23:44 +0100472 TensorShape out_shape = extract_shape(inputs_vector[0]);
Georgios Pinitase29acf12018-07-16 14:40:09 +0100473
474 size_t max_x = 0;
475 size_t max_y = 0;
476 size_t depth = 0;
477
478 for(const auto &tensor : inputs_vector)
479 {
480 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
Georgios Pinitase2220552018-07-20 13:23:44 +0100481 const TensorShape shape = extract_shape(tensor);
Georgios Pinitase29acf12018-07-16 14:40:09 +0100482 max_x = std::max(shape.x(), max_x);
483 max_y = std::max(shape.y(), max_y);
484 depth += shape.z();
485 }
486
487 out_shape.set(0, max_x);
488 out_shape.set(1, max_y);
489 out_shape.set(2, depth);
490
491 return out_shape;
492}
493
494template <typename T>
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100495inline TensorShape calculate_width_concatenate_shape(const std::vector<T *> &inputs_vector)
496{
Georgios Pinitase2220552018-07-20 13:23:44 +0100497 TensorShape out_shape = extract_shape(inputs_vector[0]);
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100498
499 size_t width = 0;
500 for(const auto &tensor : inputs_vector)
501 {
502 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
Georgios Pinitase2220552018-07-20 13:23:44 +0100503 const TensorShape shape = extract_shape(tensor);
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100504 width += shape.x();
505 }
506
507 out_shape.set(0, width);
508
509 return out_shape;
510}
Georgios Pinitas358ca202017-12-07 16:47:52 +0000511} // namespace shape_calculator
512} // namespace misc
513} // namespace arm_compute
514#endif /* __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__ */