blob: 9543d989b81074ad91e11600567e1b1650b68304 [file] [log] [blame]
Georgios Pinitas358ca202017-12-07 16:47:52 +00001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2017-2018 ARM Limited.
Georgios Pinitas358ca202017-12-07 16:47:52 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__
25#define __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__
26
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +000027#include "arm_compute/core/Helpers.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/ITensorInfo.h"
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000029#include "arm_compute/core/Utils.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000030
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000031#include <cmath>
32
Georgios Pinitas358ca202017-12-07 16:47:52 +000033namespace arm_compute
34{
35namespace misc
36{
37namespace shape_calculator
38{
Pablo Tello00afd112018-01-04 10:34:24 +000039inline TensorShape compute_permutation_output_shape(const ITensorInfo &input, const PermutationVector &perm)
40{
41 TensorShape output_shape = input.tensor_shape();
42 permute(output_shape, perm);
43 return output_shape;
44}
Georgios Pinitas78c00902018-01-09 17:33:11 +000045inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias = false)
46{
47 // Calculate output shape
48 TensorShape weights_reshaped{ weights.tensor_shape() };
49 weights_reshaped.collapse(3);
50 const size_t tmp_dim = weights_reshaped[0];
51 weights_reshaped.set(0, weights_reshaped[1]);
52 weights_reshaped.set(1, tmp_dim + (has_bias ? 1 : 0));
53
54 return weights_reshaped;
55}
Gian Marco36a0a462018-01-12 10:21:40 +000056inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +000057{
Gian Marco36a0a462018-01-12 10:21:40 +000058 // The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
59 ARM_COMPUTE_ERROR_ON(mult_interleave4x4_height < 1);
60 const int interleave_width = 4 * mult_interleave4x4_height;
Georgios Pinitas358ca202017-12-07 16:47:52 +000061 TensorShape shape_interleaved_a{ a.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +000062 shape_interleaved_a.set(0, a.dimension(0) * interleave_width);
63 shape_interleaved_a.set(1, std::ceil(a.dimension(1) / static_cast<float>(interleave_width)));
Georgios Pinitas358ca202017-12-07 16:47:52 +000064
65 return shape_interleaved_a;
66}
67inline TensorShape compute_transpose1xW_shape(const ITensorInfo &b)
68{
69 // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
70 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
71 shape_transposed1xW_b.set(0, b.dimension(1) * 16);
72 shape_transposed1xW_b.set(1, std::ceil(b.dimension(0) / 16.f));
73
74 return shape_transposed1xW_b;
75}
Gian Marco36a0a462018-01-12 10:21:40 +000076inline TensorShape compute_transpose1xW_with_element_size_shape(const ITensorInfo &b, int mult_transpose1xW_width = 1)
Georgios Pinitas358ca202017-12-07 16:47:52 +000077{
Gian Marco36a0a462018-01-12 10:21:40 +000078 // Note: mult_transpose1xW_width expresses the number of chunks with size 1x(W) we want to store on the same row
79 // The transpose1xW output matrix will have the following shape:
80 // [ b_height * W, ceil(b_width / W) ] where W = (16 / element size of the tensor) * mult_transpose1xW_width
81 ARM_COMPUTE_ERROR_ON(mult_transpose1xW_width < 1);
Georgios Pinitas358ca202017-12-07 16:47:52 +000082 TensorShape shape_transposed1xW_b{ b.tensor_shape() };
Gian Marco36a0a462018-01-12 10:21:40 +000083 const size_t transpose_width = (16 / b.element_size()) * mult_transpose1xW_width;
Georgios Pinitas358ca202017-12-07 16:47:52 +000084 shape_transposed1xW_b.set(0, b.dimension(1) * transpose_width);
85 shape_transposed1xW_b.set(1, static_cast<size_t>(std::ceil(b.dimension(0) / static_cast<float>(transpose_width))));
86
87 return shape_transposed1xW_b;
88}
89inline TensorShape compute_reductionA_shape(const ITensorInfo &b)
90{
91 TensorShape shape_vector_sum_col{ b.tensor_shape() };
92 if(shape_vector_sum_col.num_dimensions() > 1)
93 {
94 shape_vector_sum_col.remove_dimension(1);
95 }
96
97 return shape_vector_sum_col;
98}
99inline TensorShape compute_reductionB_shape(const ITensorInfo &a)
100{
101 TensorShape shape_vector_sum_row{ a.tensor_shape() };
102 shape_vector_sum_row.set(Window::DimX, a.dimension(1));
103 if(a.num_dimensions() > 1)
104 {
105 shape_vector_sum_row.remove_dimension(1);
106 }
107
108 return shape_vector_sum_row;
109}
Georgios Pinitas78c00902018-01-09 17:33:11 +0000110inline TensorShape compute_col2im_shape(const ITensorInfo &input, std::pair<unsigned int, unsigned int> convolved_dims)
111{
112 TensorShape col2im_shape{ input.tensor_shape() };
113 col2im_shape.set(0, convolved_dims.first);
114 col2im_shape.set(1, convolved_dims.second);
115 col2im_shape.set(2, input.tensor_shape()[0]);
116
117 return col2im_shape;
118}
Georgios Pinitas358ca202017-12-07 16:47:52 +0000119inline TensorShape compute_transposed_shape(const ITensorInfo &input)
120{
121 TensorShape shape_transposed{ input.tensor_shape() };
122
123 shape_transposed.set(0, input.dimension(1));
124 shape_transposed.set(1, input.dimension(0));
125
126 return shape_transposed;
127}
Giorgio Arena76572242018-04-04 17:44:26 +0100128inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info, unsigned int depth_multiplier)
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000129{
130 const TensorShape input_shape{ input.tensor_shape() };
131 const TensorShape weights_shape{ weights.tensor_shape() };
132
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000133 const DataLayout data_layout = input.data_layout();
134 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
135 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Giorgio Arena76572242018-04-04 17:44:26 +0100136 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000137
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000138 unsigned int output_width = 0;
139 unsigned int output_height = 0;
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000140 std::tie(output_width, output_height) = scaled_dimensions(input_shape[width_idx], input_shape[height_idx],
141 weights_shape[width_idx], weights_shape[height_idx],
Georgios Pinitasd05dce42018-01-22 16:29:17 +0000142 conv_info);
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000143
144 TensorShape output_shape{ input_shape };
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000145 output_shape.set(width_idx, output_width);
146 output_shape.set(height_idx, output_height);
Giorgio Arena76572242018-04-04 17:44:26 +0100147 output_shape.set(channel_idx, input_shape[channel_idx] * depth_multiplier);
Georgios Pinitas1250a5a2018-01-02 13:27:37 +0000148
149 return output_shape;
150}
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000151inline TensorShape compute_deconvolution_shape(const ITensorInfo &input, unsigned int sx, unsigned int sy, unsigned int inner_border_right, unsigned int inner_border_top, const PadStrideInfo &info)
152{
153 TensorShape scale_out_shape(input.tensor_shape());
154 const unsigned int out_x = input.dimension(0) + (input.dimension(0) - 1) * (sx - 1) + inner_border_right + 2 * info.pad().first;
155 const unsigned int out_y = input.dimension(1) + (input.dimension(1) - 1) * (sy - 1) + inner_border_top + 2 * info.pad().second;
156 scale_out_shape.set(0, out_x);
157 scale_out_shape.set(1, out_y);
158
159 return scale_out_shape;
160}
Giorgio Arena156fcf32018-03-09 15:30:43 +0000161inline TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation)
162{
163 // The output shape will be the 2D shape used as input for GEMM [ out_channels * kernel_area, num_elems_per_out_channel ]
164
165 TensorShape output_shape{ input->tensor_shape() };
166
167 const DataLayout data_layout = input->data_layout();
168 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
169 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
170 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
171
172 std::pair<unsigned int, unsigned int> out_dims = scaled_dimensions(output_shape[width_idx], output_shape[height_idx], kernel_dims.width, kernel_dims.height, conv_info, dilation);
Giorgio Arenaf485a102018-04-20 16:06:21 +0100173 output_shape.set(0, (output_shape[channel_idx] * kernel_dims.area() + (has_bias ? 1 : 0)));
174 output_shape.set(1, (out_dims.first * out_dims.second));
175 output_shape.set(2, 1);
Giorgio Arena156fcf32018-03-09 15:30:43 +0000176
177 return output_shape;
178}
179inline TensorShape compute_im2col_fc_shape(const ITensorInfo *input, const int num_input_dimensions = 3)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000180{
181 TensorShape output_shape{ input->tensor_shape() };
182
183 output_shape.collapse(num_input_dimensions);
184
185 return output_shape;
186}
Giorgio Arena156fcf32018-03-09 15:30:43 +0000187inline TensorShape compute_im2col_flatten_shape(const ITensorInfo *input)
188{
189 // The output shape will be the flatten version of the input (i.e. [ width * height * channels, 1, 1, ... ] ). Used for FlattenLayer.
190
191 ARM_COMPUTE_ERROR_ON(input->num_dimensions() < 3);
192
193 TensorShape output_shape{ input->tensor_shape() };
194
195 const size_t flatten_shape = input->dimension(0) * input->dimension(1) * input->dimension(2);
196 output_shape.set(0, flatten_shape);
197 output_shape.remove_dimension(1);
198 output_shape.remove_dimension(1);
199
200 return output_shape;
201}
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000202inline TensorShape compute_interleave_custom_shape(const TensorShape &input, const int x_interleave, const int y_interleave)
203{
204 TensorShape output_shape{ input };
205
206 output_shape.set(0, output_shape.x() * x_interleave);
207 output_shape.set(1, std::ceil(output_shape.y() / static_cast<float>(y_interleave)));
208
209 return output_shape;
210}
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000211inline TensorShape compute_fully_connected_reshaped_weights_shape(const ITensorInfo *input, bool transpose_weights, bool is_batched_fc_layer, const int interleave)
212{
213 TensorShape output_shape{ input->tensor_shape() };
214
215 // Transpose weights if the user hasn't done it
216 if(transpose_weights)
217 {
218 output_shape = compute_transposed_shape(*input);
219 }
220
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000221 // If we run multiple batches we need 1xW transpose, too.
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000222 if(is_batched_fc_layer)
223 {
224 output_shape = compute_transposed_shape(input->clone()->set_tensor_shape(output_shape));
225 output_shape = compute_interleave_custom_shape(output_shape, interleave, interleave);
226 }
227
228 return output_shape;
229}
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000230
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000231inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000232{
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000233 TensorShape tensor_shape{ input.tensor_shape() };
234
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000235 const Size2D kernel_size = winograd_info.kernel_size;
236 const Size2D output_tile_size = winograd_info.output_tile_size;
237 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
Giorgio Arena2d9de0a2018-03-15 17:58:20 +0000238
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000239 tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
240 tensor_shape.set(Window::DimX, input.dimension(3));
241 tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
242 tensor_shape.set(Window::DimZ, input_tile_size.area());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000243
244 return tensor_shape;
245}
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000246inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000247{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000248 const PadStrideInfo conv_info = winograd_info.convolution_info;
249 const Size2D kernel_size = winograd_info.kernel_size;
250 const Size2D output_tile_size = winograd_info.output_tile_size;
251 const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
252
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000253 // Compute height
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000254 const unsigned int num_tiles_x = std::ceil((input.tensor_shape().x() - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right()) / static_cast<float>(output_tile_size.width));
255 const unsigned int num_tiles_y = std::ceil((input.tensor_shape().y() - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / static_cast<float>(output_tile_size.height));
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000256
257 const unsigned int width = input.tensor_shape()[get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)];
258 const unsigned int height = num_tiles_x * num_tiles_y;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000259 const unsigned int depth = input_tile_size.area();
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000260
261 TensorShape output_shape{ input.tensor_shape() };
262 output_shape.set(0, width);
263 output_shape.set(1, height);
264 output_shape.set(2, depth);
265
266 return output_shape;
267}
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000268inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000269{
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000270 const PadStrideInfo conv_info = winograd_info.convolution_info;
271 const Size2D kernel_size = winograd_info.kernel_size;
272 const Size2D input_dimensions = winograd_info.input_dimensions;
273 const DataLayout data_layout = winograd_info.output_data_layout;
274
275 // Compute output shape
276 unsigned int output_width = 0;
277 unsigned int output_height = 0;
278 std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
279 kernel_size.width, kernel_size.height, conv_info);
280
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000281 TensorShape tensor_shape{ input.tensor_shape() };
282
283 // Output dimension
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000284 const unsigned int out_w = output_width;
285 const unsigned int out_h = output_height;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000286 const unsigned int out_c = input.dimension(0);
287
288 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w);
289 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT), out_h);
290 tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL), out_c);
291
292 return tensor_shape;
293}
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000294inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
295{
296 const TensorShape input_shape{ input.tensor_shape() };
297 const TensorShape weights_shape{ weights.tensor_shape() };
298
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000299 const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
300 const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
301 const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL);
302
Giorgio Arenac0f54432018-03-16 14:02:34 +0000303 const unsigned int input_width = input_shape[idx_width];
304 const unsigned int input_height = input_shape[idx_height];
305 const unsigned int weights_width = weights_shape[idx_width];
306 const unsigned int weights_height = weights_shape[idx_height];
307 const unsigned int weights_out_channel = weights_shape[3];
308 unsigned int output_width = 0;
309 unsigned int output_height = 0;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000310 std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000311
312 TensorShape output_shape{ input_shape };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000313 output_shape.set(idx_width, output_width);
314 output_shape.set(idx_height, output_height);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000315 output_shape.set(idx_channel, weights_out_channel);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000316
317 return output_shape;
318}
Alex Gilday60954c62018-03-05 16:22:48 +0000319inline TensorShape compute_min_max_shape(const ITensorInfo *input)
320{
321 TensorShape output_shape{ input->tensor_shape() };
322 output_shape.set(Window::DimX, 2);
323 output_shape.remove_dimension(1);
324 output_shape.remove_dimension(1);
325
326 return output_shape;
327}
328
Michalis Spyroue74b2012018-04-18 09:49:16 +0100329inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
330{
331 unsigned int pooled_w = 0;
332 unsigned int pooled_h = 0;
333
Giorgio Arena3c520c52018-05-01 11:47:24 +0100334 TensorShape output_shape{ input.tensor_shape() };
Michalis Spyroue74b2012018-04-18 09:49:16 +0100335
Giorgio Arena3c520c52018-05-01 11:47:24 +0100336 const bool is_global_pooling = pool_info.is_global_pooling();
337 const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
338 const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
339 const unsigned int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size().width;
340 const unsigned int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size().height;
341
342 std::tie(pooled_w, pooled_h) = scaled_dimensions(output_shape[idx_width],
343 output_shape[idx_height],
Michalis Spyroue74b2012018-04-18 09:49:16 +0100344 pool_size_x,
345 pool_size_y,
346 pool_info.pad_stride_info());
347
Giorgio Arena3c520c52018-05-01 11:47:24 +0100348 output_shape.set(idx_width, pooled_w);
349 output_shape.set(idx_height, pooled_h);
Michalis Spyroue74b2012018-04-18 09:49:16 +0100350
351 return output_shape;
352}
353
Michalis Spyrou36a559e2018-03-20 10:30:58 +0000354inline TensorShape compute_rnn_shape(const ITensorInfo *input, const unsigned int batch_size)
355{
356 TensorShape output_shape{ input->tensor_shape() };
357 output_shape.set(1, batch_size);
358
359 return output_shape;
360}
Georgios Pinitas358ca202017-12-07 16:47:52 +0000361} // namespace shape_calculator
362} // namespace misc
363} // namespace arm_compute
364#endif /* __ARM_COMPUTE_MISC_SHAPE_CALCULATOR_H__ */