blob: fbc69bc4d50d15266be6809aa499a41fec674251 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__
25#define __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__
26
27#include "FixedPoint.h"
28#include "Tensor.h"
29#include "Types.h"
30#include "Utils.h"
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010031#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
33#include "FixedPoint.h"
34#include "Types.h"
35#include "arm_compute/core/FixedPoint.h"
36#include "arm_compute/core/Types.h"
37#include "tests/validation/FixedPoint.h"
Giorgio Arena50f9fd72017-06-19 17:05:30 +010038#include "tests/validation/ValidationUserConfiguration.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010039
40#include <algorithm>
41#include <array>
42#include <cmath>
Giorgio Arena50f9fd72017-06-19 17:05:30 +010043#include <random>
Georgios Pinitasd4f8c272017-06-30 16:16:19 +010044#include <vector>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010045
46namespace arm_compute
47{
48namespace test
49{
50namespace validation
51{
52namespace tensor_operations
53{
54namespace
55{
Pablo Tello383deec2017-06-23 10:40:05 +010056template <class T>
57struct is_floating_point
58 : std::integral_constant < bool,
59 std::is_same<float, typename std::remove_cv<T>::type>::value ||
60#if ARM_COMPUTE_ENABLE_FP16
61 std::is_same<float16_t, typename std::remove_cv<T>::type>::value ||
62#endif
63 std::is_same<double, typename std::remove_cv<T>::type>::value || std::is_same<long double, typename std::remove_cv<T>::type>::value >
64{
65};
66
Anthony Barbier6ff3b192017-09-04 18:44:23 +010067bool is_valid_pixel(int i, int min, int max)
68{
69 return (i >= min && i < max);
70}
71
72// 3D convolution for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +010073template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010074void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int8_t fixed_point_position)
75{
76 const int half_width_weights = width_weights / 2;
77 const int half_height_weights = height_weights / 2;
78
79 // Reset accumulator
80 T acc = static_cast<T>(0);
81
82 // Compute a 2D convolution for each IFM and accumulate the result
83 for(int ifm = 0; ifm < depth_in; ++ifm)
84 {
85 // Compute the offset for the input slice
86 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
87
88 // Compute 2D convolution
89 for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
90 {
91 for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
92 {
93 // Check if the pixel is out-of-bound
94 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
95 {
96 const int idx = xk + half_width_weights;
97 const int idy = yk + half_height_weights;
98
99 const T i_value = in[offset_slice_in + xk + yk * width_in];
100 const T w_value = weights[idx + idy * width_weights + ifm * width_weights * height_weights];
101
102 acc += i_value * w_value;
103 }
104 }
105 }
106 }
107
108 // Accumulate the bias and store the result
109 *out = acc + (*bias);
110}
111
112// 3D convolution for fixed point type
113template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
114void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights,
115 int8_t fixed_point_position)
116{
117 const int half_width_weights = width_weights / 2;
118 const int half_height_weights = height_weights / 2;
119
120 using namespace fixed_point_arithmetic;
121 using promoted_type = typename fixed_point_arithmetic::traits::promote<T>::type;
122
123 // Reset accumulator
124 fixed_point<promoted_type> acc(0, fixed_point_position);
125
126 // Compute a 2D convolution for each IFM and accumulate the result
127 for(int ifm = 0; ifm < depth_in; ++ifm)
128 {
129 // Compute the offset for the input slice
130 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
131
132 // Compute 2D convolution
133 for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
134 {
135 for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
136 {
137 // Check if the pixel is out-of-bound
138 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
139 {
140 const int idx = xk + half_width_weights;
141 const int idy = yk + half_height_weights;
142
143 const fixed_point<promoted_type> i_value(in[offset_slice_in + xk + yk * width_in], fixed_point_position, true);
144 const fixed_point<promoted_type> w_value(weights[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true);
145 const fixed_point<promoted_type> iw = i_value * w_value;
146 acc = iw + acc;
147 }
148 }
149 }
150 }
151
152 // Get the bias
153 const fixed_point<promoted_type> b(*bias, fixed_point_position, true);
154
155 // Accumulate the bias and covert back
156 acc = acc + b;
157 fixed_point<T> res(acc);
158 *out = res.raw();
159}
160
161template <typename T>
162void vector_matrix_multiply(const T *in, const T *weights, const T *bias, T *out, int cols_weights, int rows_weights, uint8_t fixed_point_position)
163{
164 for(int x = 0; x < cols_weights; ++x)
165 {
166 T acc = 0.0f;
167 for(int y = 0; y < rows_weights; ++y)
168 {
169 acc += in[y] * weights[x + y * cols_weights];
170 }
171 out[x] = acc + bias[x];
172 }
173}
174
175template <>
176void vector_matrix_multiply(const int8_t *in, const int8_t *weights, const int8_t *bias, int8_t *out, int cols_weights, int rows_weights, uint8_t fixed_point_position)
177{
178 using namespace fixed_point_arithmetic;
179 using promoted_type = typename fixed_point_arithmetic::traits::promote<int8_t>::type;
180
181 for(int x = 0; x < cols_weights; ++x)
182 {
183 // Reset accumulator
184 fixed_point<promoted_type> acc(0, fixed_point_position);
185
186 for(int y = 0; y < rows_weights; ++y)
187 {
188 const fixed_point<promoted_type> i_value(in[y], fixed_point_position, true);
189 const fixed_point<promoted_type> w_value(weights[x + y * cols_weights], fixed_point_position, true);
190 const fixed_point<promoted_type> iw = i_value * w_value;
191 acc = iw + acc;
192 }
193
194 // Get the bias
195 const fixed_point<int8_t> b(bias[x], fixed_point_position, true);
196
197 // Convert back and accumulate the bias
198 fixed_point<int8_t> res(acc);
199 res = res + b;
200
201 // Store the result
202 out[x] = res.raw();
203 }
204}
205
SiCong Libacaf9a2017-06-19 13:41:45 +0100206// Return a tensor element at a specified coordinate with different border modes
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100207template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
208T tensor_elem_at(const Tensor<T> &in, Coordinates &coord, BorderMode border_mode, T constant_border_value)
209{
210 const int x = coord.x();
211 const int y = coord.y();
212 const int width = static_cast<int>(in.shape().x());
213 const int height = static_cast<int>(in.shape().y());
214
SiCong Libacaf9a2017-06-19 13:41:45 +0100215 // If coordinates beyond range of tensor's width or height
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100216 if(x < 0 || y < 0 || x >= width || y >= height)
217 {
SiCong Libacaf9a2017-06-19 13:41:45 +0100218 if(border_mode == BorderMode::REPLICATE)
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100219 {
220 coord.set(0, std::max(0, std::min(x, width - 1)));
221 coord.set(1, std::max(0, std::min(y, height - 1)));
222 return in[coord2index(in.shape(), coord)];
223 }
224 else
225 {
SiCong Libacaf9a2017-06-19 13:41:45 +0100226 return constant_border_value;
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100227 }
228 }
229 else
230 {
231 return in[coord2index(in.shape(), coord)];
232 }
233}
234
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100235/** Apply 2D spatial filter on a single element of @p in at coordinates @p coord
236 *
237 * - filter sizes have to be odd number
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100238 * - Row major order of filter assumed
239 * - TO_ZERO rounding policy assumed
240 * - SATURATE convert policy assumed
241 *
242 */
243template <typename T1, typename T2, typename T3>
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100244void apply_2d_spatial_filter(Coordinates coord, const Tensor<T1> &in, Tensor<T3> &out, const TensorShape &filter_shape, const T2 *filter_itr, float scale, BorderMode border_mode,
245 T1 constant_border_value = 0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100246{
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100247 double val = 0;
248 const int x = coord.x();
249 const int y = coord.y();
250 for(int j = y - static_cast<int>(filter_shape[1] / 2); j <= y + static_cast<int>(filter_shape[1] / 2); ++j)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100251 {
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100252 for(int i = x - static_cast<int>(filter_shape[0] / 2); i <= x + static_cast<int>(filter_shape[0] / 2); ++i)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100253 {
254 coord.set(0, i);
255 coord.set(1, j);
SiCong Libacaf9a2017-06-19 13:41:45 +0100256 val += static_cast<double>(*filter_itr) * tensor_elem_at(in, coord, border_mode, constant_border_value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100257 ++filter_itr;
258 }
259 }
260 coord.set(0, x);
261 coord.set(1, y);
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100262 const double rounded_val = support::cpp11::trunc(val * static_cast<double>(scale));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100263 out[coord2index(in.shape(), coord)] = saturate_cast<T3>(rounded_val);
264}
265} // namespace
266
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100267// Sobel 3x3
268template <typename T1, typename T2>
269void sobel_3x3(Tensor<T1> &in, Tensor<T2> &out_x, Tensor<T2> &out_y, BorderMode border_mode, uint8_t constant_border_value)
270{
271 const std::array<int8_t, 9> sobel_x{ { -1, 0, 1, -2, 0, 2, -1, 0, 1 } };
272 const std::array<int8_t, 9> sobel_y{ { -1, -2, -1, 0, 0, 0, 1, 2, 1 } };
273
274 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
275 {
276 const Coordinates id = index2coord(in.shape(), element_idx);
277
278 apply_2d_spatial_filter(id, in, out_x, TensorShape(3U, 3U), sobel_x.data(), 1.f, border_mode, constant_border_value);
279 apply_2d_spatial_filter(id, in, out_y, TensorShape(3U, 3U), sobel_y.data(), 1.f, border_mode, constant_border_value);
280 }
281}
282
283// Sobel 5x5
284template <typename T1, typename T2>
285void sobel_5x5(Tensor<T1> &in, Tensor<T2> &out_x, Tensor<T2> &out_y, BorderMode border_mode, uint8_t constant_border_value)
286{
287 const std::array<int8_t, 25> sobel_x{ {
288 -1, -2, 0, 2, 1,
289 -4, -8, 0, 8, 4,
290 -6, -12, 0, 12, 6,
291 -4, -8, 0, 8, 4,
292 -1, -2, 0, 2, 1
293 } };
294
295 const std::array<int8_t, 25> sobel_y{ {
296 -1, -4, -6, -4, -1,
297 -2, -8, -12, -8, -2,
298 0, 0, 0, 0, 0,
299 2, 8, 12, 8, 2,
300 1, 4, 6, 4, 1
301 } };
302
303 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
304 {
305 const Coordinates id = index2coord(in.shape(), element_idx);
306
307 apply_2d_spatial_filter(id, in, out_x, TensorShape(5U, 5U), sobel_x.data(), 1.f, border_mode, constant_border_value);
308 apply_2d_spatial_filter(id, in, out_y, TensorShape(5U, 5U), sobel_y.data(), 1.f, border_mode, constant_border_value);
309 }
310}
311
Giorgio Arenaf7959862017-06-13 15:19:51 +0100312// Mean Standard Deviation
313template <typename T1>
314void mean_and_standard_deviation(const Tensor<T1> &in, float &mean, float &std_dev)
315{
316 int num_elements = in.num_elements();
317
318 // Calculate mean
319 mean = 0.f;
320 for(int i = 0; i < num_elements; ++i)
321 {
322 mean += in[i];
323 }
324 mean /= num_elements;
325
326 // Calculate standard deviation
327 std_dev = 0.f;
328 for(int i = 0; i < num_elements; ++i)
329 {
330 std_dev += (mean - in[i]) * (mean - in[i]);
331 }
332 std_dev = sqrt(std_dev / num_elements);
333}
334
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100335// Integral Image
336void integral_image(const Tensor<uint8_t> &in, Tensor<uint32_t> &out)
337{
338 // Length of dimensions
339 const size_t width = in.shape().x();
340 const size_t height = in.shape().y();
341 const size_t depth = in.shape().z() * in.shape()[3] * in.shape()[4] * in.shape()[5];
342
343 const size_t image_size = width * height;
344
345 for(size_t z = 0; z < depth; ++z)
346 {
347 size_t current_image = z * image_size;
348
349 //First element of each image
350 out[current_image] = in[current_image];
351
352 // First row of each image (add only pixel on the left)
353 for(size_t x = 1; x < width; ++x)
354 {
355 out[current_image + x] = static_cast<uint32_t>(in[current_image + x]) + out[current_image + x - 1];
356 }
357
358 // Subsequent rows
359 for(size_t y = 1; y < height; ++y)
360 {
361 size_t current_row = current_image + (width * y);
362
363 // First element of each row (add only pixel up)
364 out[current_row] = static_cast<uint32_t>(in[current_row]) + out[current_row - width];
365
366 // Following row elements
367 for(size_t x = 1; x < width; ++x)
368 {
369 size_t current_pixel = current_row + x;
370
371 // out = in + up(out) + left(out) - up_left(out)
372 out[current_pixel] = static_cast<uint32_t>(in[current_pixel]) + out[current_pixel - 1]
373 + out[current_pixel - width] - out[current_pixel - width - 1];
374 }
375 }
376 }
377}
378
379// Absolute difference
380template <typename T1, typename T2, typename T3>
381void absolute_difference(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out)
382{
383 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
384
385 for(int i = 0; i < in1.num_elements(); ++i)
386 {
387 intermediate_type val = std::abs(static_cast<intermediate_type>(in1[i]) - static_cast<intermediate_type>(in2[i]));
388 out[i] = saturate_cast<T3>(val);
389 }
390}
391
392// Accumulate
393template <typename T1, typename T2>
394void accumulate(const Tensor<T1> &in, Tensor<T2> &out)
395{
396 using intermediate_type = typename common_promoted_signed_type<T1, T2>::intermediate_type;
397
398 for(int i = 0; i < in.num_elements(); ++i)
399 {
400 intermediate_type val = static_cast<intermediate_type>(out[i]) + static_cast<intermediate_type>(in[i]);
401 out[i] = saturate_cast<T2>(val);
402 }
403}
404
405// Accumulate squared
406template <typename T1, typename T2>
407void accumulate_squared(const Tensor<T1> &in, Tensor<T2> &out, uint32_t shift)
408{
409 if(shift > 15)
410 {
411 ARM_COMPUTE_ERROR("Shift in accumulate_squared must be within the range [0, 15]");
412 }
413 using intermediate_type = typename common_promoted_signed_type<T1, T2>::intermediate_type;
414 intermediate_type denom = 1 << shift;
415
416 for(int i = 0; i < in.num_elements(); ++i)
417 {
418 intermediate_type val = static_cast<intermediate_type>(out[i]) + (static_cast<intermediate_type>(in[i]) * static_cast<intermediate_type>(in[i]) / denom);
419 out[i] = saturate_cast<T2>(val);
420 }
421}
422
423// Accumulate weighted
424template <typename T>
425void accumulate_weighted(const Tensor<T> &in, Tensor<T> &out, float alpha)
426{
427 if(alpha < 0.f || alpha > 1.f)
428 {
429 ARM_COMPUTE_ERROR("Weight (alpha) specified in accumulate_weighted must be within the range [0, 1]");
430 }
431 using intermediate_type = typename common_promoted_signed_type<T>::intermediate_type;
432
433 for(int i = 0; i < in.num_elements(); ++i)
434 {
435 double val = (1. - static_cast<double>(alpha)) * static_cast<intermediate_type>(out[i]) + static_cast<double>(alpha) * static_cast<intermediate_type>(in[i]);
436 out[i] = static_cast<T>(val);
437 }
438}
439
440// Arithmetic addition
441template <typename T1, typename T2, typename T3>
442void arithmetic_addition(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, ConvertPolicy convert_policy)
443{
444 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
445
446 for(int i = 0; i < in1.num_elements(); ++i)
447 {
448 intermediate_type val = static_cast<intermediate_type>(in1[i]) + static_cast<intermediate_type>(in2[i]);
449 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(val) : static_cast<T3>(val);
450 }
451}
452
453// Arithmetic Subtraction
454template <typename T1, typename T2, typename T3>
455void arithmetic_subtraction(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, ConvertPolicy convert_policy)
456{
457 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
458
459 for(int i = 0; i < in1.num_elements(); ++i)
460 {
461 intermediate_type val = static_cast<intermediate_type>(in1[i]) - static_cast<intermediate_type>(in2[i]);
462 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(val) : static_cast<T3>(val);
463 }
464}
465
466// Bitwise and
467template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
468void bitwise_and(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
469{
470 for(int i = 0; i < in1.num_elements(); ++i)
471 {
472 out[i] = in1[i] & in2[i];
473 }
474}
475
476// Bitwise or
477template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
478void bitwise_or(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
479{
480 for(int i = 0; i < in1.num_elements(); ++i)
481 {
482 out[i] = in1[i] | in2[i];
483 }
484}
485
486// Bitwise xor
487template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
488void bitwise_xor(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
489{
490 for(int i = 0; i < in1.num_elements(); ++i)
491 {
492 out[i] = in1[i] ^ in2[i];
493 }
494}
495
496// Bitwise not
497template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
498void bitwise_not(const Tensor<T> &in, Tensor<T> &out)
499{
500 for(int i = 0; i < in.num_elements(); ++i)
501 {
502 out[i] = ~in[i];
503 }
504}
505
SiCong Libacaf9a2017-06-19 13:41:45 +0100506// Box3x3 filter
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100507template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
SiCong Libacaf9a2017-06-19 13:41:45 +0100508void box3x3(const Tensor<T> &in, Tensor<T> &out, BorderMode border_mode, T constant_border_value)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100509{
510 const std::array<T, 9> filter{ { 1, 1, 1, 1, 1, 1, 1, 1, 1 } };
SiCong Libacaf9a2017-06-19 13:41:45 +0100511 float scale = 1.f / static_cast<float>(filter.size());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100512 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
513 {
514 const Coordinates id = index2coord(in.shape(), element_idx);
SiCong Libacaf9a2017-06-19 13:41:45 +0100515 apply_2d_spatial_filter(id, in, out, TensorShape(3U, 3U), filter.data(), scale, border_mode, constant_border_value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100516 }
517}
518
519// Depth conversion
520template <typename T1, typename T2>
521void depth_convert(const Tensor<T1> &in, Tensor<T2> &out, ConvertPolicy policy, uint32_t shift)
522{
523 ARM_COMPUTE_ERROR("The conversion is not supported");
524}
525
526template <>
527void depth_convert<int8_t, float>(const Tensor<int8_t> &in, Tensor<float> &out, ConvertPolicy policy, uint32_t shift)
528{
529 const int8_t fixed_point_position = static_cast<int8_t>(in.fixed_point_position());
530 for(int i = 0; i < in.num_elements(); ++i)
531 {
532 out[i] = static_cast<float>(in[i]) * (1.0f / (1 << fixed_point_position));
533 }
534}
535
536template <>
537void depth_convert<float, int8_t>(const Tensor<float> &in, Tensor<int8_t> &out, ConvertPolicy policy, uint32_t shift)
538{
539 const int8_t fixed_point_position = static_cast<int8_t>(in.fixed_point_position());
540 for(int i = 0; i < in.num_elements(); ++i)
541 {
542 float val = in[i] * (1 << fixed_point_position) + 0.5f;
543 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<int8_t>(val) : static_cast<int8_t>(val));
544 }
545}
546
547template <>
548void depth_convert<uint8_t, uint16_t>(const Tensor<uint8_t> &in, Tensor<uint16_t> &out, ConvertPolicy policy, uint32_t shift)
549{
550 for(int i = 0; i < in.num_elements(); ++i)
551 {
552 out[i] = static_cast<uint16_t>(in[i]) << shift;
553 }
554}
555
556template <>
557void depth_convert<uint8_t, int16_t>(const Tensor<uint8_t> &in, Tensor<int16_t> &out, ConvertPolicy policy, uint32_t shift)
558{
559 for(int i = 0; i < in.num_elements(); ++i)
560 {
561 out[i] = static_cast<int16_t>(in[i]) << shift;
562 }
563}
564
565template <>
566void depth_convert<uint8_t, int32_t>(const Tensor<uint8_t> &in, Tensor<int32_t> &out, ConvertPolicy policy, uint32_t shift)
567{
568 for(int i = 0; i < in.num_elements(); ++i)
569 {
570 out[i] = static_cast<int32_t>(in[i]) << shift;
571 }
572}
573
574template <>
575void depth_convert<uint16_t, uint8_t>(const Tensor<uint16_t> &in, Tensor<uint8_t> &out, ConvertPolicy policy, uint32_t shift)
576{
577 for(int i = 0; i < in.num_elements(); ++i)
578 {
579 uint16_t val = in[i] >> shift;
580 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<uint8_t>(val) : static_cast<uint8_t>(val));
581 }
582}
583
584template <>
585void depth_convert<uint16_t, uint32_t>(const Tensor<uint16_t> &in, Tensor<uint32_t> &out, ConvertPolicy policy, uint32_t shift)
586{
587 for(int i = 0; i < in.num_elements(); ++i)
588 {
589 out[i] = static_cast<uint32_t>(in[i]) << shift;
590 }
591}
592
593template <>
594void depth_convert<int16_t, uint8_t>(const Tensor<int16_t> &in, Tensor<uint8_t> &out, ConvertPolicy policy, uint32_t shift)
595{
596 for(int i = 0; i < in.num_elements(); ++i)
597 {
598 int16_t val = in[i] >> shift;
599 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<uint8_t>(val) : static_cast<uint8_t>(val));
600 }
601}
602template <>
603void depth_convert<int16_t, int32_t>(const Tensor<int16_t> &in, Tensor<int32_t> &out, ConvertPolicy policy, uint32_t shift)
604{
605 for(int i = 0; i < in.num_elements(); ++i)
606 {
607 out[i] = static_cast<int32_t>(in[i]) << shift;
608 }
609}
610
SiCong Li5a536642017-06-19 14:47:05 +0100611// Gaussian3x3 filter
612template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
613void gaussian3x3(const Tensor<T> &in, Tensor<T> &out, BorderMode border_mode, T constant_border_value)
614{
615 const std::array<T, 9> filter{ { 1, 2, 1, 2, 4, 2, 1, 2, 1 } };
616 const float scale = 1.f / 16.f;
617 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
618 {
619 const Coordinates id = index2coord(in.shape(), element_idx);
620 apply_2d_spatial_filter(id, in, out, TensorShape(3U, 3U), filter.data(), scale, border_mode, constant_border_value);
621 }
622}
623
SiCong Li3eb263e2017-06-19 15:31:43 +0100624// Gaussian5x5 filter
625template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
626void gaussian5x5(const Tensor<T> &in, Tensor<T> &out, BorderMode border_mode, T constant_border_value)
627{
628 const std::array<T, 25> filter{ {
629 1, 4, 6, 4, 1,
630 4, 16, 24, 16, 4,
631 6, 24, 36, 24, 6,
632 4, 16, 24, 16, 4,
633 1, 4, 6, 4, 1
634 } };
635 const float scale = 1.f / 256.f;
636 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
637 {
638 const Coordinates id = index2coord(in.shape(), element_idx);
639 apply_2d_spatial_filter(id, in, out, TensorShape(5U, 5U), filter.data(), scale, border_mode, constant_border_value);
640 }
641}
642
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100643// Matrix multiplication for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +0100644template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100645void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
646{
647 const int M = out.shape().y();
648 const int N = out.shape().x();
649 const int K = in1.shape().x();
650
651 for(int r = 0; r < M; ++r)
652 {
653 for(int c = 0; c < N; ++c)
654 {
655 T acc = 0.0f;
656
657 for(int k = 0; k < K; ++k)
658 {
659 const T a0 = in1[r * K + k];
660 const T b0 = in2[k * N + c];
661
662 acc += a0 * b0;
663 }
664
665 // Finalize the result: A * B * alpha + C * beta
666 const T c0 = in3[c + r * N];
667 out[c + r * N] = alpha * acc + beta * c0;
668 }
669 }
670}
671
672// Matrix multiplication for fixed point type
673template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
674void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
675{
676 using namespace fixed_point_arithmetic;
677
678 using promoted_type = typename fixed_point_arithmetic::traits::promote<T>::type;
679
680 const int M = out.shape().y();
681 const int N = out.shape().x();
682 const int K = in1.shape().x();
683 const int8_t fixed_point_position = static_cast<int8_t>(in1.fixed_point_position());
684
685 const fixed_point<T> alpha_q(alpha, fixed_point_position);
686 const fixed_point<T> beta_q(beta, fixed_point_position);
687
688 for(int r = 0; r < M; ++r)
689 {
690 for(int c = 0; c < N; ++c)
691 {
692 fixed_point<promoted_type> acc_q(0, fixed_point_position);
693
694 for(int k = 0; k < K; ++k)
695 {
696 const fixed_point<promoted_type> a0_q(in1[r * K + k], fixed_point_position, true);
697 const fixed_point<promoted_type> b0_q(in2[k * N + c], fixed_point_position, true);
698 const fixed_point<promoted_type> axb_q = a0_q * b0_q;
699
700 acc_q = axb_q + acc_q;
701 }
702
703 // Finalize the result: A * B * alpha + C * beta
704 const fixed_point<T> c0_q(in3[c + r * N], fixed_point_position, true);
705
706 fixed_point<T> res_q(acc_q);
707 res_q = alpha_q * res_q;
708 res_q = (c0_q * beta_q) + res_q;
709
710 // Store the result
711 out[c + r * N] = res_q.raw();
712 }
713 }
714}
715
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100716// Non linear filter
717template <typename T>
718void non_linear_filter(const Tensor<T> &in, Tensor<T> &out, NonLinearFilterFunction function, unsigned int mask_size,
719 MatrixPattern pattern, const uint8_t *mask, BorderMode border_mode, uint8_t constant_border_value)
720{
SiCong Li7a035752017-06-28 15:27:02 +0100721 ARM_COMPUTE_ERROR_ON(pattern == MatrixPattern::OTHER && mask == nullptr);
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100722
723 using intermediate_type = typename common_promoted_signed_type<T>::intermediate_type;
724
725 const int sq_mask_size = mask_size * mask_size;
726 const int half_mask_size = mask_size / 2;
727 std::vector<intermediate_type> vals(sq_mask_size);
728 intermediate_type current_value = 0;
729
SiCong Li7a035752017-06-28 15:27:02 +0100730 const ValidRegion valid_region = shape_to_valid_region(in.shape(), border_mode == BorderMode::UNDEFINED, BorderSize(half_mask_size));
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100731
732 for(int element_idx = 0, count = 0, index = 0; element_idx < in.num_elements(); ++element_idx, count = 0, index = 0)
733 {
734 Coordinates id = index2coord(in.shape(), element_idx);
735 if(is_in_valid_region(valid_region, id))
736 {
737 int idx = id.x();
738 int idy = id.y();
739 for(int y = idy - half_mask_size; y <= idy + half_mask_size; ++y)
740 {
741 for(int x = idx - half_mask_size; x <= idx + half_mask_size; ++x, ++index)
742 {
743 id.set(0, x);
744 id.set(1, y);
745 current_value = tensor_elem_at(in, id, border_mode, constant_border_value);
746
747 if(mask[index] == 255)
748 {
749 vals[count] = static_cast<intermediate_type>(current_value);
750 ++count;
751 }
752 }
753 }
754 std::sort(vals.begin(), vals.begin() + count);
755 switch(function)
756 {
757 case NonLinearFilterFunction::MIN:
758 out[element_idx] = saturate_cast<T>(vals[0]);
759 break;
760 case NonLinearFilterFunction::MAX:
761 out[element_idx] = saturate_cast<T>(vals[count - 1]);
762 break;
763 case NonLinearFilterFunction::MEDIAN:
764 out[element_idx] = saturate_cast<T>(vals[count / 2]);
765 break;
766 default:
767 ARM_COMPUTE_ERROR("Unsupported NonLinearFilter function.");
768 }
769 }
770 }
771}
772
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100773// Pixel-wise multiplication
774template <typename T1, typename T2, typename T3>
775void pixel_wise_multiplication(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
776{
777 if(scale < 0)
778 {
779 ARM_COMPUTE_ERROR("Scale of pixel-wise multiplication must be non-negative");
780 }
781 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
782 for(int i = 0; i < in1.num_elements(); ++i)
783 {
784 double val = static_cast<intermediate_type>(in1[i]) * static_cast<intermediate_type>(in2[i]) * static_cast<double>(scale);
Pablo Tello383deec2017-06-23 10:40:05 +0100785 if(is_floating_point<T3>::value)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100786 {
787 out[i] = val;
788 }
789 else
790 {
791 double rounded_val = 0;
792 switch(rounding_policy)
793 {
794 case(RoundingPolicy::TO_ZERO):
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100795 rounded_val = support::cpp11::trunc(val);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100796 break;
797 case(RoundingPolicy::TO_NEAREST_UP):
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100798 rounded_val = round_half_up(val);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100799 break;
800 case(RoundingPolicy::TO_NEAREST_EVEN):
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100801 rounded_val = round_half_even(val);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100802 break;
803 default:
804 ARM_COMPUTE_ERROR("Unsupported rounding policy");
805 }
806 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(rounded_val) : static_cast<T3>(rounded_val);
807 }
808 }
809}
810
811// Fixed-point Pixel-wise Multiplication
812template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
813void fixed_point_pixel_wise_multiplication(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out, int scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
814{
815 using namespace fixed_point_arithmetic;
816
817 const int fixed_point_position = in1.fixed_point_position();
818
819 ARM_COMPUTE_ERROR_ON_MSG(in1.data_type() != in2.data_type() || in1.data_type() != out.data_type(),
820 "Tensors must all have the same DataType");
821 ARM_COMPUTE_ERROR_ON_MSG(fixed_point_position != in2.fixed_point_position() || fixed_point_position != out.fixed_point_position(),
822 "Fixed-point position must be the same for both inputs and outputs");
823
824 // Validate fixed_point_position
825 ARM_COMPUTE_ERROR_ON((in1.data_type() == DataType::QS8) && (fixed_point_position == 0 || fixed_point_position > 7));
826 ARM_COMPUTE_ERROR_ON((in1.data_type() == DataType::QS16) && (fixed_point_position == 0 || fixed_point_position > 15));
827
828 fixed_point<T> fp_scale(scale, fixed_point_position);
829 const bool is_sat = convert_policy == ConvertPolicy::SATURATE;
830 const bool do_scaling = scale != 1;
831
832 for(int i = 0; i < in1.num_elements(); ++i)
833 {
834 fixed_point<T> val1(in1[i], fixed_point_position, true);
835 fixed_point<T> val2(in2[i], fixed_point_position, true);
836 fixed_point<T> res = (is_sat) ? val1 * val2 : mul<OverflowPolicy::WRAP>(val1, val2);
837 if(do_scaling)
838 {
839 res = (is_sat) ? res * fp_scale : mul<OverflowPolicy::WRAP>(res, fp_scale);
840 }
841 out[i] = res.raw();
842 }
843}
844
845// Threshold
846template <typename T>
847void threshold(const Tensor<T> &in, Tensor<T> &out, uint8_t threshold, uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper)
848{
849 switch(type)
850 {
851 case ThresholdType::BINARY:
852 for(int i = 0; i < in.num_elements(); ++i)
853 {
854 out[i] = ((in[i] > threshold) ? true_value : false_value);
855 }
856 break;
857 case ThresholdType::RANGE:
858 for(int i = 0; i < in.num_elements(); ++i)
859 {
860 if(in[i] > upper)
861 {
862 out[i] = false_value;
863 }
864 else if(in[i] < threshold)
865 {
866 out[i] = false_value;
867 }
868 else
869 {
870 out[i] = true_value;
871 }
872 }
873 break;
874 default:
875 ARM_COMPUTE_ERROR("Thresholding type not recognised");
876 break;
877 }
878}
879
880// Activation Layer for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +0100881template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100882void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
883{
884 const T a = static_cast<T>(act_info.a());
885 const T b = static_cast<T>(act_info.b());
886
887 for(int i = 0; i < in.num_elements(); ++i)
888 {
889 T x = in[i];
890 switch(act_info.activation())
891 {
892 case ActivationLayerInfo::ActivationFunction::ABS:
893 out[i] = std::abs(x);
894 break;
895 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
896 out[i] = std::min<T>(a, std::max<T>(0, x));
897 break;
898 case ActivationLayerInfo::ActivationFunction::LINEAR:
899 out[i] = a * x + b;
900 break;
901 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
902 out[i] = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-x));
903 break;
904 case ActivationLayerInfo::ActivationFunction::RELU:
905 out[i] = std::max<T>(0, x);
906 break;
907 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
908 out[i] = std::log(static_cast<T>(1) + std::exp(x));
909 break;
910 case ActivationLayerInfo::ActivationFunction::SQRT:
911 out[i] = std::sqrt(x);
912 break;
913 case ActivationLayerInfo::ActivationFunction::SQUARE:
914 out[i] = x * x;
915 break;
916 case ActivationLayerInfo::ActivationFunction::TANH:
917 out[i] = a * std::tanh(b * x);
918 break;
919 default:
920 ARM_COMPUTE_ERROR("Activation function not recognised");
921 break;
922 }
923 }
924}
925
926// Activation Layer for fixed point type
927template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
928void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
929{
930 using namespace fixed_point_arithmetic;
931 int fixed_point_position = in.fixed_point_position();
932 ActivationLayerInfo::ActivationFunction act_func = act_info.activation();
933 const fixed_point<T> a(act_info.a(), fixed_point_position);
934 const fixed_point<T> b(act_info.b(), fixed_point_position);
935 const fixed_point<T> const_0(0, fixed_point_position);
936 const fixed_point<T> const_1(1, fixed_point_position);
937
938 for(int i = 0; i < in.num_elements(); ++i)
939 {
940 fixed_point<T> x(in[i], fixed_point_position, true);
941 switch(act_func)
942 {
943 case ActivationLayerInfo::ActivationFunction::ABS:
944 out[i] = abs(x).raw();
945 break;
946 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
947 out[i] = min(a, max(const_0, x)).raw();
948 break;
949 case ActivationLayerInfo::ActivationFunction::LINEAR:
950 out[i] = add(b, mul(a, x)).raw();
951 break;
952 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
953 out[i] = (const_1 / (const_1 + exp(-x))).raw();
954 break;
955 case ActivationLayerInfo::ActivationFunction::RELU:
956 out[i] = max(const_0, x).raw();
957 break;
958 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
959 out[i] = log(const_1 + exp(x)).raw();
960 break;
961 case ActivationLayerInfo::ActivationFunction::SQRT:
962 out[i] = (const_1 / inv_sqrt(x)).raw();
963 break;
964 case ActivationLayerInfo::ActivationFunction::SQUARE:
965 out[i] = mul(x, x).raw();
966 break;
967 case ActivationLayerInfo::ActivationFunction::TANH:
968 out[i] = tanh(x).raw();
969 break;
970 default:
971 ARM_COMPUTE_ERROR("Activation function not recognised");
972 break;
973 }
974 }
975}
976
977// Batch Normalization Layer for fixed point type
978template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
979void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
980{
981 const int cols = static_cast<int>(in.shape()[0]);
982 const int rows = static_cast<int>(in.shape()[1]);
983 const int depth = static_cast<int>(in.shape()[2]);
984 int upper_dims = in.shape().total_size() / (cols * rows * depth);
985
986 for(int r = 0; r < upper_dims; ++r)
987 {
988 for(int i = 0; i < depth; ++i)
989 {
990 for(int k = 0; k < rows; ++k)
991 {
992 for(int l = 0; l < cols; ++l)
993 {
994 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
995 fixed_point_arithmetic::fixed_point<T> in_qs8(in[pos], fixed_point_position, true);
996 fixed_point_arithmetic::fixed_point<T> var_qs8(var[i], fixed_point_position, true);
997 fixed_point_arithmetic::fixed_point<T> mean_qs8(mean[i], fixed_point_position, true);
998 fixed_point_arithmetic::fixed_point<T> beta_qs8(beta[i], fixed_point_position, true);
999 fixed_point_arithmetic::fixed_point<T> gamma_qs8(gamma[i], fixed_point_position, true);
1000 fixed_point_arithmetic::fixed_point<T> epsilon_qs8(epsilon, fixed_point_position);
1001
1002 auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs8 + epsilon_qs8);
1003 auto numerator = in_qs8 - mean_qs8;
1004 auto x_bar = numerator * denominator;
1005 x_bar = beta_qs8 + x_bar * gamma_qs8;
1006 out[pos] = x_bar.raw();
1007 }
1008 }
1009 }
1010 }
1011}
1012
1013// Batch Normalization Layer for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +01001014template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001015void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
1016{
1017 const int cols = static_cast<int>(in.shape()[0]);
1018 const int rows = static_cast<int>(in.shape()[1]);
1019 const int depth = static_cast<int>(in.shape()[2]);
1020 int upper_dims = in.shape().total_size() / (cols * rows * depth);
1021
1022 for(int r = 0; r < upper_dims; ++r)
1023 {
1024 for(int i = 0; i < depth; ++i)
1025 {
1026 for(int k = 0; k < rows; ++k)
1027 {
1028 for(int l = 0; l < cols; ++l)
1029 {
1030 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
1031 const float denominator = sqrt(var[i] + epsilon);
1032 const float numerator = in[pos] - mean[i];
1033 const float x_bar = numerator / denominator;
1034 out[pos] = beta[i] + x_bar * gamma[i];
1035 }
1036 }
1037 }
1038 }
1039}
1040
1041// Convolution layer
1042template <typename T>
1043void convolution_layer(const Tensor<T> &in, const Tensor<T> &weights, const Tensor<T> &bias, Tensor<T> &out, const PadStrideInfo &conv_info)
1044{
1045 const int width_in = in.shape().x();
1046 const int height_in = in.shape().y();
1047 const int depth_in = in.shape().z();
1048 const int width_out = out.shape().x();
1049 const int height_out = out.shape().y();
1050 const int depth_out = out.shape().z();
1051 const int width_weights = weights.shape().x();
1052 const int height_weights = weights.shape().y();
1053 const int depth_weights = weights.shape().z();
1054 const int pad_xi = std::min(static_cast<int>(conv_info.pad().first), width_weights / 2);
1055 const int pad_yi = std::min(static_cast<int>(conv_info.pad().second), height_weights / 2);
1056 const int start_xi = width_weights / 2 - pad_xi;
1057 const int start_yi = height_weights / 2 - pad_yi;
1058 const int end_xi = width_in - start_xi;
1059 const int end_yi = height_in - start_yi;
1060 const int stride_xi = conv_info.stride().first;
1061 const int stride_yi = conv_info.stride().second;
1062 const int num_batches = in.shape().total_size() / (width_in * height_in * depth_in);
1063
1064 for(int r = 0; r < num_batches; ++r)
1065 {
1066 for(int yi = start_yi; yi < end_yi; yi += stride_yi)
1067 {
1068 for(int xi = start_xi; xi < end_xi; xi += stride_xi)
1069 {
1070 for(int ofm = 0; ofm < depth_out; ++ofm)
1071 {
1072 // Compute input and output offsets
1073 const int offset_in = r * width_in * height_in * depth_in;
1074 const int xo = (xi - start_xi) / stride_xi;
1075 const int yo = (yi - start_yi) / stride_yi;
1076 const int offset_out = xo + yo * width_out + ofm * width_out * height_out + r * width_out * height_out * depth_out;
1077
1078 // Compute 3D convolution
1079 convolution3d(in.data() + offset_in,
1080 weights.data() + ofm * width_weights * height_weights * depth_weights,
1081 bias.data() + ofm,
1082 out.data() + offset_out,
1083 xi, yi,
1084 width_in, height_in, depth_in,
1085 width_weights, height_weights,
1086 static_cast<int8_t>(in.fixed_point_position()));
1087 }
1088 }
1089 }
1090 }
1091}
1092
1093// Fully connected layer
1094template <typename T>
1095void fully_connected_layer(const Tensor<T> &in, const Tensor<T> &weights, const Tensor<T> &bias, Tensor<T> &out)
1096{
1097 ARM_COMPUTE_ERROR_ON(weights.shape().x() != out.shape().x());
1098 ARM_COMPUTE_ERROR_ON(weights.shape().y() != in.shape().x() * in.shape().y() * in.shape().z());
1099 const int cols_weights = weights.shape().x();
1100 const int rows_weights = weights.shape().y();
1101 const int num_batches = in.shape().total_size() / rows_weights;
1102
1103 for(int k = 0; k < num_batches; ++k)
1104 {
1105 vector_matrix_multiply<T>(in.data() + k * rows_weights,
1106 weights.data(),
1107 bias.data(),
1108 out.data() + k * cols_weights,
1109 cols_weights,
1110 rows_weights,
1111 in.fixed_point_position());
1112 }
1113}
1114
1115// Normalization Layer for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +01001116template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001117void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
1118{
1119 const uint32_t norm_size = norm_info.norm_size();
1120 NormType type = norm_info.type();
1121 float beta = norm_info.beta();
1122 uint32_t kappa = norm_info.kappa();
1123
1124 const int cols = static_cast<int>(in.shape()[0]);
1125 const int rows = static_cast<int>(in.shape()[1]);
1126 const int depth = static_cast<int>(in.shape()[2]);
1127 int upper_dims = in.shape().total_size() / (cols * rows);
1128
1129 float coeff = norm_info.scale_coeff();
1130 int radius_cols = norm_size / 2;
1131 // IN_MAP_1D and CROSS_MAP normalize over a single axis only
1132 int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
1133
1134 if(type == NormType::CROSS_MAP)
1135 {
1136 // Remove also depth from upper dimensions since it is the axes we want
1137 // to use for normalization
1138 upper_dims /= depth;
1139 for(int r = 0; r < upper_dims; ++r)
1140 {
1141 for(int i = 0; i < rows; ++i)
1142 {
1143 for(int k = 0; k < cols; ++k)
1144 {
1145 for(int l = 0; l < depth; ++l)
1146 {
1147 float accumulated_scale = 0.f;
1148 for(int j = -radius_cols; j <= radius_cols; ++j)
1149 {
1150 const int z = l + j;
1151 if(z >= 0 && z < depth)
1152 {
1153 const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth];
1154 accumulated_scale += value * value;
1155 }
1156 }
1157 out[k + i * cols + l * rows * cols + r * cols * rows * depth] = kappa + accumulated_scale * coeff;
1158 }
1159 }
1160 }
1161 }
1162 }
1163 else
1164 {
1165 for(int r = 0; r < upper_dims; ++r)
1166 {
1167 for(int i = 0; i < rows; ++i)
1168 {
1169 for(int k = 0; k < cols; ++k)
1170 {
1171 float accumulated_scale = 0.f;
1172 for(int j = -radius_rows; j <= radius_rows; ++j)
1173 {
1174 const int y = i + j;
1175 for(int l = -radius_cols; l <= radius_cols; ++l)
1176 {
1177 const int x = k + l;
1178 if((x >= 0 && y >= 0) && (x < cols && y < rows))
1179 {
1180 const T value = in[x + y * cols + r * cols * rows];
1181 accumulated_scale += value * value;
1182 }
1183 }
1184 }
1185 out[k + i * cols + r * cols * rows] = kappa + accumulated_scale * coeff;
1186 }
1187 }
1188 }
1189 }
1190
1191 if(beta == 1.f)
1192 {
1193 for(int i = 0; i < out.num_elements(); ++i)
1194 {
1195 out[i] = in[i] / out[i];
1196 }
1197 }
1198 else if(beta == 0.5f)
1199 {
1200 for(int i = 0; i < out.num_elements(); ++i)
1201 {
1202 out[i] = in[i] / std::sqrt(out[i]);
1203 }
1204 }
1205 else
1206 {
1207 for(int i = 0; i < out.num_elements(); ++i)
1208 {
1209 out[i] = in[i] * std::exp(std::log(out[i]) * -beta);
1210 }
1211 }
1212}
1213// Normalization Layer for fixed-point types
1214template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
1215void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
1216{
1217 using namespace fixed_point_arithmetic;
1218
1219 const int fixed_point_position = in.fixed_point_position();
1220
1221 const uint32_t norm_size = norm_info.norm_size();
1222 NormType type = norm_info.type();
1223 fixed_point<T> beta(norm_info.beta(), fixed_point_position);
1224 fixed_point<T> kappa(norm_info.kappa(), fixed_point_position);
1225
1226 const int cols = static_cast<int>(in.shape()[0]);
1227 const int rows = static_cast<int>(in.shape()[1]);
1228 const int depth = static_cast<int>(in.shape()[2]);
1229 int upper_dims = in.shape().total_size() / (cols * rows);
1230
1231 fixed_point<T> coeff(norm_info.scale_coeff(), fixed_point_position);
1232 int radius_cols = norm_size / 2;
1233 // IN_MAP_1D and CROSS_MAP normalize over a single axis only
1234 int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
1235
1236 if(type == NormType::CROSS_MAP)
1237 {
1238 // Remove also depth from upper dimensions since it is the axes we want
1239 // to use for normalization
1240 upper_dims /= depth;
1241 for(int r = 0; r < upper_dims; ++r)
1242 {
1243 for(int i = 0; i < rows; ++i)
1244 {
1245 for(int k = 0; k < cols; ++k)
1246 {
1247 for(int l = 0; l < depth; ++l)
1248 {
1249 fixed_point<T> accumulated_scale(0.f, fixed_point_position);
1250 for(int j = -radius_cols; j <= radius_cols; ++j)
1251 {
1252 const int z = l + j;
1253 if(z >= 0 && z < depth)
1254 {
1255 const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth];
1256 const fixed_point<T> fp_value(value, fixed_point_position, true);
1257 accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
1258 }
1259 }
1260 accumulated_scale = add(kappa, mul(accumulated_scale, coeff));
1261 out[k + i * cols + l * rows * cols + r * cols * rows * depth] = accumulated_scale.raw();
1262 }
1263 }
1264 }
1265 }
1266 }
1267 else
1268 {
1269 for(int r = 0; r < upper_dims; ++r)
1270 {
1271 for(int i = 0; i < rows; ++i)
1272 {
1273 for(int k = 0; k < cols; ++k)
1274 {
1275 fixed_point<T> accumulated_scale(0.f, fixed_point_position);
1276 for(int j = -radius_rows; j <= radius_rows; ++j)
1277 {
1278 const int y = i + j;
1279 for(int l = -radius_cols; l <= radius_cols; ++l)
1280 {
1281 const int x = k + l;
1282 if((x >= 0 && y >= 0) && (x < cols && y < rows))
1283 {
1284 const T value = in[x + y * cols + r * cols * rows];
1285 const fixed_point<T> fp_value(value, fixed_point_position, true);
1286 accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
1287 }
1288 }
1289 }
1290 accumulated_scale = add(kappa, mul(accumulated_scale, coeff));
1291 out[k + i * cols + r * cols * rows] = accumulated_scale.raw();
1292 }
1293 }
1294 }
1295 }
1296
1297 if(norm_info.beta() == 1.f)
1298 {
1299 for(int i = 0; i < out.num_elements(); ++i)
1300 {
1301 fixed_point<T> res = div(fixed_point<T>(in[i], fixed_point_position, true), fixed_point<T>(out[i], fixed_point_position, true));
1302 out[i] = res.raw();
1303 }
1304 }
1305 else
1306 {
1307 const fixed_point<T> beta(norm_info.beta(), fixed_point_position);
1308 for(int i = 0; i < out.num_elements(); ++i)
1309 {
1310 fixed_point<T> res = pow(fixed_point<T>(out[i], fixed_point_position, true), beta);
1311 res = div(fixed_point<T>(in[i], fixed_point_position, true), res);
1312 out[i] = res.raw();
1313 }
1314 }
1315}
1316
1317// Pooling layer
1318template <typename T>
1319void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info, int fixed_point_position)
1320{
1321 const int pool_size = pool_info.pool_size();
1322 PoolingType type = pool_info.pool_type();
1323 int pool_stride_x = 0;
1324 int pool_stride_y = 0;
1325 int pad_x = 0;
1326 int pad_y = 0;
1327 std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info().stride();
1328 std::tie(pad_x, pad_y) = pool_info.pad_stride_info().pad();
1329
Georgios Pinitasce093142017-06-19 16:11:53 +01001330 const int w_in = static_cast<int>(in.shape()[0]);
1331 const int h_in = static_cast<int>(in.shape()[1]);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001332
Georgios Pinitasce093142017-06-19 16:11:53 +01001333 const int w_out = static_cast<int>(out.shape()[0]);
1334 const int h_out = static_cast<int>(out.shape()[1]);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001335
Georgios Pinitasce093142017-06-19 16:11:53 +01001336 int upper_dims = in.shape().total_size() / (w_in * h_in);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001337
Georgios Pinitasce093142017-06-19 16:11:53 +01001338 int pooled_w = 0;
1339 int pooled_h = 0;
1340 if(pool_info.pad_stride_info().round() == DimensionRoundingType::CEIL)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001341 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001342 pooled_w = static_cast<int>(ceil(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
1343 pooled_h = static_cast<int>(ceil(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001344 }
Georgios Pinitasce093142017-06-19 16:11:53 +01001345 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001346 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001347 pooled_w = static_cast<int>(floor(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
1348 pooled_h = static_cast<int>(floor(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
1349 }
1350
1351 if((pooled_w - 1) * pool_stride_x >= w_in + pad_x)
1352 {
1353 --pooled_w;
1354 }
1355 if((pooled_h - 1) * pool_stride_y >= h_in + pad_y)
1356 {
1357 --pooled_h;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001358 }
1359
1360 if(type == PoolingType::MAX)
1361 {
1362 for(int r = 0; r < upper_dims; ++r)
1363 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001364 for(int h = 0; h < pooled_h; ++h)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001365 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001366 for(int w = 0; w < pooled_w; ++w)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001367 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001368 int wstart = w * pool_stride_x - pad_x;
1369 int hstart = h * pool_stride_y - pad_y;
1370 int wend = std::min(wstart + pool_size, w_in);
1371 int hend = std::min(hstart + pool_size, h_in);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001372 wstart = std::max(wstart, 0);
Georgios Pinitasce093142017-06-19 16:11:53 +01001373 hstart = std::max(hstart, 0);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001374
1375 T max_val = std::numeric_limits<T>::lowest();
1376 for(int y = hstart; y < hend; ++y)
1377 {
1378 for(int x = wstart; x < wend; ++x)
1379 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001380 T val = in[r * h_in * w_in + y * w_in + x];
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001381 if(val > max_val)
1382 {
1383 max_val = val;
1384 }
1385 }
1386 }
1387
Georgios Pinitasce093142017-06-19 16:11:53 +01001388 out[r * h_out * w_out + h * pooled_w + w] = max_val;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001389 }
1390 }
1391 }
1392 }
1393 else // Average pooling
1394 {
1395 for(int r = 0; r < upper_dims; ++r)
1396 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001397 for(int h = 0; h < pooled_h; ++h)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001398 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001399 for(int w = 0; w < pooled_w; ++w)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001400 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001401 T avg_val = 0;
1402 int wstart = w * pool_stride_x - pad_x;
1403 int hstart = h * pool_stride_y - pad_y;
1404 int wend = std::min(wstart + pool_size, w_in + pad_x);
1405 int hend = std::min(hstart + pool_size, h_in + pad_y);
1406 int pool = (hend - hstart) * (wend - wstart);
1407 wstart = std::max(wstart, 0);
1408 hstart = std::max(hstart, 0);
1409 wend = std::min(wend, w_in);
1410 hend = std::min(hend, h_in);
Pablo Tello383deec2017-06-23 10:40:05 +01001411 if(is_floating_point<T>::value)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001412 {
1413 for(int y = hstart; y < hend; ++y)
1414 {
1415 for(int x = wstart; x < wend; ++x)
1416 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001417 avg_val += in[r * h_in * w_in + y * w_in + x];
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001418 }
1419 }
Georgios Pinitasce093142017-06-19 16:11:53 +01001420 out[r * h_out * w_out + h * pooled_w + w] = avg_val / pool;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001421 }
1422 else
1423 {
1424 static std::array<qint8_t, 10> scale_values_q8 =
1425 { { 0x0, 0x0, 0x40, 0x2A, 0x20, 0x19, 0x15, 0x12, 0x10, 0xE } };
1426
1427 for(int y = hstart; y < hend; ++y)
1428 {
1429 for(int x = wstart; x < wend; ++x)
1430 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001431 avg_val = sqadd_qs8(avg_val, in[r * h_in * w_in + y * w_in + x]);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001432 }
1433 }
Georgios Pinitasce093142017-06-19 16:11:53 +01001434 out[r * h_out * w_out + h * pooled_w + w] = sqmul_qs8(avg_val, (scale_values_q8[pool] >> (7 - fixed_point_position)), fixed_point_position);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001435 }
1436 }
1437 }
1438 }
1439 }
1440}
1441
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001442// Pooling layer
1443template <typename T>
1444void roi_pooling_layer(const Tensor<T> &in, Tensor<T> &out, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
1445{
1446 const int num_rois = rois.size();
1447 const int width_in = in.shape().x();
1448 const int height_in = in.shape().y();
1449 const int fms = in.shape().z();
1450 const int volume_in = width_in * height_in * fms;
1451 const int pool_w = pool_info.pooled_width();
1452 const int pool_h = pool_info.pooled_height();
1453 const int volume_out = pool_w * pool_h * fms;
1454 const float roi_scale = pool_info.spatial_scale();
1455
1456 // Iterate through all rois
1457 for(int roi_idx = 0; roi_idx < num_rois; ++roi_idx)
1458 {
1459 // Get dimensions of current ROI
1460 const ROI &roi = rois[roi_idx];
1461
1462 int batch_id = roi.batch_idx;
1463 int roi_start_x = support::cpp11::round(roi.rect.x * roi_scale);
1464 int roi_start_y = support::cpp11::round(roi.rect.y * roi_scale);
1465 int roi_width = std::max(support::cpp11::round(roi.rect.width * roi_scale), 1.f);
1466 int roi_height = std::max(support::cpp11::round(roi.rect.height * roi_scale), 1.f);
1467
1468 // Determine pooling regions
1469 float pool_region_size_x = static_cast<float>(roi_width) / pool_w;
1470 float pool_region_size_y = static_cast<float>(roi_height) / pool_h;
1471
1472 // Iterate through all channel
1473 for(int fm = 0; fm < fms; ++fm)
1474 {
1475 // Calculate each output pixel
1476 for(int py = 0; py < pool_h; ++py)
1477 {
1478 for(int px = 0; px < pool_w; ++px)
1479 {
1480 int region_start_x = static_cast<int>(std::floor(px * pool_region_size_x));
1481 int region_end_x = static_cast<int>(std::ceil((px + 1) * pool_region_size_x));
1482 int region_start_y = static_cast<int>(std::floor(py * pool_region_size_y));
1483 int region_end_y = static_cast<int>(std::ceil((py + 1) * pool_region_size_y));
1484
1485 region_start_x = std::min(std::max(region_start_x + roi_start_x, 0), width_in);
1486 region_end_x = std::min(std::max(region_end_x + roi_start_x, 0), width_in);
1487 region_start_y = std::min(std::max(region_start_y + roi_start_y, 0), height_in);
1488 region_end_y = std::min(std::max(region_end_y + roi_start_y, 0), height_in);
1489
1490 // Iterate through each pixel in the pooling region
1491 if((region_end_x <= region_start_x) || (region_end_y <= region_start_y))
1492 {
1493 out[roi_idx * volume_out + fm * pool_w * pool_h + py * pool_w + px] = 0;
1494 }
1495 else
1496 {
1497 T curr_max = std::numeric_limits<T>::lowest();
1498 for(int j = region_start_y; j < region_end_y; ++j)
1499 {
1500 for(int i = region_start_x; i < region_end_x; ++i)
1501 {
1502 const auto val = in[batch_id * volume_in + fm * width_in * height_in + j * width_in + i];
1503 curr_max = std::max(val, curr_max);
1504 }
1505 }
1506 out[roi_idx * volume_out + fm * pool_w * pool_h + py * pool_w + px] = curr_max;
1507 }
1508 }
1509 }
1510 }
1511 }
1512}
1513
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001514// Softmax Layer
Pablo Tello383deec2017-06-23 10:40:05 +01001515template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001516void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
1517{
1518 const int cols = static_cast<int>(in.shape()[0]);
1519 const int upper_dims = in.shape().total_size() / cols;
1520 for(int r = 0; r < upper_dims; ++r)
1521 {
1522 // Find max
1523 T max = std::numeric_limits<T>::lowest();
1524 for(int c = 0; c < cols; ++c)
1525 {
1526 const T x = in[r * cols + c];
1527 if(x > max)
1528 {
1529 max = x;
1530 }
1531 }
1532
1533 // Regularize
1534 T sum = 0;
1535 for(int c = 0; c < cols; ++c)
1536 {
1537 const T res = exp(in[r * cols + c] - max);
1538 out[r * cols + c] = res;
1539 sum += res;
1540 }
1541
1542 // Normalize
1543 const T norm_val = 1 / sum;
1544 for(int c = 0; c < cols; ++c)
1545 {
1546 out[r * cols + c] *= norm_val;
1547 }
1548 }
1549}
1550template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
1551void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
1552{
1553 using namespace fixed_point_arithmetic;
1554 using promoted_T = typename test::traits::promote<T>::type;
1555
1556 const int fixed_point_position = in.fixed_point_position();
1557 const int cols = static_cast<int>(in.shape()[0]);
1558 const int upper_dims = in.shape().total_size() / cols;
1559
1560 for(int r = 0; r < upper_dims; ++r)
1561 {
1562 // Find max
1563 fixed_point<T> max(std::numeric_limits<T>::lowest(), fixed_point_position, true);
1564 for(int c = 0; c < cols; ++c)
1565 {
1566 const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
1567 if(x > max)
1568 {
1569 max = x;
1570 }
1571 }
1572
1573 // Regularize
1574 fixed_point<promoted_T> sum(0, fixed_point_position);
1575 for(int c = 0; c < cols; ++c)
1576 {
1577 const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
1578 fixed_point<T> res = exp(x - max);
1579 out[r * cols + c] = res.raw();
1580 sum = add(sum, static_cast<fixed_point<promoted_T>>(res));
1581 }
1582
1583 // Normalize
1584 fixed_point<T> sat_sum(sum);
1585 for(int c = 0; c < cols; ++c)
1586 {
1587 const fixed_point<T> x(out[r * cols + c], fixed_point_position, true);
1588 out[r * cols + c] = div(x, sat_sum).raw();
1589 }
1590 }
1591}
1592
1593// Fixed point operations
1594template <typename T>
1595void fixed_point_operation(const Tensor<T> &in, Tensor<T> &out, FixedPointOp op)
1596{
1597 int p = in.fixed_point_position();
1598 switch(op)
1599 {
1600 case FixedPointOp::EXP:
1601 for(int i = 0; i < in.num_elements(); ++i)
1602 {
1603 out[i] = fixed_point_arithmetic::exp(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1604 }
1605 break;
1606 case FixedPointOp::LOG:
1607 for(int i = 0; i < in.num_elements(); ++i)
1608 {
1609 out[i] = fixed_point_arithmetic::log(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1610 }
1611 break;
1612 case FixedPointOp::INV_SQRT:
1613 for(int i = 0; i < in.num_elements(); ++i)
1614 {
1615 out[i] = fixed_point_arithmetic::inv_sqrt(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1616 }
1617 break;
1618 case FixedPointOp::RECIPROCAL:
1619 for(int i = 0; i < in.num_elements(); ++i)
1620 {
1621 out[i] = fixed_point_arithmetic::div(fixed_point_arithmetic::fixed_point<T>(1, p), fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1622 }
1623 break;
1624 default:
1625 ARM_COMPUTE_ERROR("Fixed point operation not supported");
1626 break;
1627 }
1628}
1629
1630// Tensor print
1631template <typename T>
1632void print(const Tensor<T> &in, std::ostream &out)
1633{
1634 out << "\n";
1635 for(int i = 0; i < in.num_elements(); ++i)
1636 {
1637 out << in[i] << " ";
1638 }
1639 out << "\n";
1640}
1641} // namespace tensor_operations
1642} // namespace validation
1643} // namespace test
1644} // namespace arm_compute
1645
1646#endif /* __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__ */