blob: fce257540bf8de7a6bdcba7e76fb9fb5e4316f7a [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__
25#define __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__
26
27#include "FixedPoint.h"
28#include "Tensor.h"
29#include "Types.h"
30#include "Utils.h"
31
32#include "FixedPoint.h"
33#include "Types.h"
34#include "arm_compute/core/FixedPoint.h"
35#include "arm_compute/core/Types.h"
36#include "tests/validation/FixedPoint.h"
Giorgio Arena50f9fd72017-06-19 17:05:30 +010037#include "tests/validation/ValidationUserConfiguration.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038
39#include <algorithm>
40#include <array>
41#include <cmath>
Giorgio Arena50f9fd72017-06-19 17:05:30 +010042#include <random>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010043
44namespace arm_compute
45{
46namespace test
47{
48namespace validation
49{
50namespace tensor_operations
51{
52namespace
53{
Pablo Tello383deec2017-06-23 10:40:05 +010054template <class T>
55struct is_floating_point
56 : std::integral_constant < bool,
57 std::is_same<float, typename std::remove_cv<T>::type>::value ||
58#if ARM_COMPUTE_ENABLE_FP16
59 std::is_same<float16_t, typename std::remove_cv<T>::type>::value ||
60#endif
61 std::is_same<double, typename std::remove_cv<T>::type>::value || std::is_same<long double, typename std::remove_cv<T>::type>::value >
62{
63};
64
Anthony Barbier6ff3b192017-09-04 18:44:23 +010065bool is_valid_pixel(int i, int min, int max)
66{
67 return (i >= min && i < max);
68}
69
70// 3D convolution for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +010071template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010072void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int8_t fixed_point_position)
73{
74 const int half_width_weights = width_weights / 2;
75 const int half_height_weights = height_weights / 2;
76
77 // Reset accumulator
78 T acc = static_cast<T>(0);
79
80 // Compute a 2D convolution for each IFM and accumulate the result
81 for(int ifm = 0; ifm < depth_in; ++ifm)
82 {
83 // Compute the offset for the input slice
84 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
85
86 // Compute 2D convolution
87 for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
88 {
89 for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
90 {
91 // Check if the pixel is out-of-bound
92 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
93 {
94 const int idx = xk + half_width_weights;
95 const int idy = yk + half_height_weights;
96
97 const T i_value = in[offset_slice_in + xk + yk * width_in];
98 const T w_value = weights[idx + idy * width_weights + ifm * width_weights * height_weights];
99
100 acc += i_value * w_value;
101 }
102 }
103 }
104 }
105
106 // Accumulate the bias and store the result
107 *out = acc + (*bias);
108}
109
110// 3D convolution for fixed point type
111template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
112void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights,
113 int8_t fixed_point_position)
114{
115 const int half_width_weights = width_weights / 2;
116 const int half_height_weights = height_weights / 2;
117
118 using namespace fixed_point_arithmetic;
119 using promoted_type = typename fixed_point_arithmetic::traits::promote<T>::type;
120
121 // Reset accumulator
122 fixed_point<promoted_type> acc(0, fixed_point_position);
123
124 // Compute a 2D convolution for each IFM and accumulate the result
125 for(int ifm = 0; ifm < depth_in; ++ifm)
126 {
127 // Compute the offset for the input slice
128 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
129
130 // Compute 2D convolution
131 for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
132 {
133 for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
134 {
135 // Check if the pixel is out-of-bound
136 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
137 {
138 const int idx = xk + half_width_weights;
139 const int idy = yk + half_height_weights;
140
141 const fixed_point<promoted_type> i_value(in[offset_slice_in + xk + yk * width_in], fixed_point_position, true);
142 const fixed_point<promoted_type> w_value(weights[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true);
143 const fixed_point<promoted_type> iw = i_value * w_value;
144 acc = iw + acc;
145 }
146 }
147 }
148 }
149
150 // Get the bias
151 const fixed_point<promoted_type> b(*bias, fixed_point_position, true);
152
153 // Accumulate the bias and covert back
154 acc = acc + b;
155 fixed_point<T> res(acc);
156 *out = res.raw();
157}
158
159template <typename T>
160void vector_matrix_multiply(const T *in, const T *weights, const T *bias, T *out, int cols_weights, int rows_weights, uint8_t fixed_point_position)
161{
162 for(int x = 0; x < cols_weights; ++x)
163 {
164 T acc = 0.0f;
165 for(int y = 0; y < rows_weights; ++y)
166 {
167 acc += in[y] * weights[x + y * cols_weights];
168 }
169 out[x] = acc + bias[x];
170 }
171}
172
173template <>
174void vector_matrix_multiply(const int8_t *in, const int8_t *weights, const int8_t *bias, int8_t *out, int cols_weights, int rows_weights, uint8_t fixed_point_position)
175{
176 using namespace fixed_point_arithmetic;
177 using promoted_type = typename fixed_point_arithmetic::traits::promote<int8_t>::type;
178
179 for(int x = 0; x < cols_weights; ++x)
180 {
181 // Reset accumulator
182 fixed_point<promoted_type> acc(0, fixed_point_position);
183
184 for(int y = 0; y < rows_weights; ++y)
185 {
186 const fixed_point<promoted_type> i_value(in[y], fixed_point_position, true);
187 const fixed_point<promoted_type> w_value(weights[x + y * cols_weights], fixed_point_position, true);
188 const fixed_point<promoted_type> iw = i_value * w_value;
189 acc = iw + acc;
190 }
191
192 // Get the bias
193 const fixed_point<int8_t> b(bias[x], fixed_point_position, true);
194
195 // Convert back and accumulate the bias
196 fixed_point<int8_t> res(acc);
197 res = res + b;
198
199 // Store the result
200 out[x] = res.raw();
201 }
202}
203
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100204template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
205T tensor_elem_at(const Tensor<T> &in, Coordinates &coord, BorderMode border_mode, T constant_border_value)
206{
207 const int x = coord.x();
208 const int y = coord.y();
209 const int width = static_cast<int>(in.shape().x());
210 const int height = static_cast<int>(in.shape().y());
211
212 // If on border
213 if(x < 0 || y < 0 || x >= width || y >= height)
214 {
215 if(border_mode == BorderMode::CONSTANT)
216 {
217 return constant_border_value;
218 }
219 else if(border_mode == BorderMode::REPLICATE)
220 {
221 coord.set(0, std::max(0, std::min(x, width - 1)));
222 coord.set(1, std::max(0, std::min(y, height - 1)));
223 return in[coord2index(in.shape(), coord)];
224 }
225 else
226 {
227 // Return a random value if on border and border_mode == UNDEFINED
228 std::mt19937 gen(user_config.seed.get());
229 std::uniform_int_distribution<T> distribution(0, 255);
230 return distribution(gen);
231 }
232 }
233 else
234 {
235 return in[coord2index(in.shape(), coord)];
236 }
237}
238
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100239/** Apply 2D spatial filter on a single element of @p in at coordinates @p coord
240 *
241 * - filter sizes have to be odd number
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100242 * - Row major order of filter assumed
243 * - TO_ZERO rounding policy assumed
244 * - SATURATE convert policy assumed
245 *
246 */
247template <typename T1, typename T2, typename T3>
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100248void apply_2d_spatial_filter(Coordinates coord, const Tensor<T1> &in, Tensor<T3> &out, const TensorShape &filter_shape, const T2 *filter_itr, float scale, BorderMode border_mode,
249 T1 constant_border_value = 0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100250{
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100251 double val = 0;
252 const int x = coord.x();
253 const int y = coord.y();
254 for(int j = y - static_cast<int>(filter_shape[1] / 2); j <= y + static_cast<int>(filter_shape[1] / 2); ++j)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100255 {
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100256 for(int i = x - static_cast<int>(filter_shape[0] / 2); i <= x + static_cast<int>(filter_shape[0] / 2); ++i)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100257 {
258 coord.set(0, i);
259 coord.set(1, j);
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100260 double pixel_to_multiply = tensor_elem_at(in, coord, border_mode, constant_border_value);
261 val += static_cast<double>(*filter_itr) * pixel_to_multiply;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100262 ++filter_itr;
263 }
264 }
265 coord.set(0, x);
266 coord.set(1, y);
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100267 const double rounded_val = cpp11::trunc(val * static_cast<double>(scale));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100268 out[coord2index(in.shape(), coord)] = saturate_cast<T3>(rounded_val);
269}
270} // namespace
271
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100272// Sobel 3x3
273template <typename T1, typename T2>
274void sobel_3x3(Tensor<T1> &in, Tensor<T2> &out_x, Tensor<T2> &out_y, BorderMode border_mode, uint8_t constant_border_value)
275{
276 const std::array<int8_t, 9> sobel_x{ { -1, 0, 1, -2, 0, 2, -1, 0, 1 } };
277 const std::array<int8_t, 9> sobel_y{ { -1, -2, -1, 0, 0, 0, 1, 2, 1 } };
278
279 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
280 {
281 const Coordinates id = index2coord(in.shape(), element_idx);
282
283 apply_2d_spatial_filter(id, in, out_x, TensorShape(3U, 3U), sobel_x.data(), 1.f, border_mode, constant_border_value);
284 apply_2d_spatial_filter(id, in, out_y, TensorShape(3U, 3U), sobel_y.data(), 1.f, border_mode, constant_border_value);
285 }
286}
287
288// Sobel 5x5
289template <typename T1, typename T2>
290void sobel_5x5(Tensor<T1> &in, Tensor<T2> &out_x, Tensor<T2> &out_y, BorderMode border_mode, uint8_t constant_border_value)
291{
292 const std::array<int8_t, 25> sobel_x{ {
293 -1, -2, 0, 2, 1,
294 -4, -8, 0, 8, 4,
295 -6, -12, 0, 12, 6,
296 -4, -8, 0, 8, 4,
297 -1, -2, 0, 2, 1
298 } };
299
300 const std::array<int8_t, 25> sobel_y{ {
301 -1, -4, -6, -4, -1,
302 -2, -8, -12, -8, -2,
303 0, 0, 0, 0, 0,
304 2, 8, 12, 8, 2,
305 1, 4, 6, 4, 1
306 } };
307
308 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
309 {
310 const Coordinates id = index2coord(in.shape(), element_idx);
311
312 apply_2d_spatial_filter(id, in, out_x, TensorShape(5U, 5U), sobel_x.data(), 1.f, border_mode, constant_border_value);
313 apply_2d_spatial_filter(id, in, out_y, TensorShape(5U, 5U), sobel_y.data(), 1.f, border_mode, constant_border_value);
314 }
315}
316
Giorgio Arenaf7959862017-06-13 15:19:51 +0100317// Mean Standard Deviation
318template <typename T1>
319void mean_and_standard_deviation(const Tensor<T1> &in, float &mean, float &std_dev)
320{
321 int num_elements = in.num_elements();
322
323 // Calculate mean
324 mean = 0.f;
325 for(int i = 0; i < num_elements; ++i)
326 {
327 mean += in[i];
328 }
329 mean /= num_elements;
330
331 // Calculate standard deviation
332 std_dev = 0.f;
333 for(int i = 0; i < num_elements; ++i)
334 {
335 std_dev += (mean - in[i]) * (mean - in[i]);
336 }
337 std_dev = sqrt(std_dev / num_elements);
338}
339
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100340// Integral Image
341void integral_image(const Tensor<uint8_t> &in, Tensor<uint32_t> &out)
342{
343 // Length of dimensions
344 const size_t width = in.shape().x();
345 const size_t height = in.shape().y();
346 const size_t depth = in.shape().z() * in.shape()[3] * in.shape()[4] * in.shape()[5];
347
348 const size_t image_size = width * height;
349
350 for(size_t z = 0; z < depth; ++z)
351 {
352 size_t current_image = z * image_size;
353
354 //First element of each image
355 out[current_image] = in[current_image];
356
357 // First row of each image (add only pixel on the left)
358 for(size_t x = 1; x < width; ++x)
359 {
360 out[current_image + x] = static_cast<uint32_t>(in[current_image + x]) + out[current_image + x - 1];
361 }
362
363 // Subsequent rows
364 for(size_t y = 1; y < height; ++y)
365 {
366 size_t current_row = current_image + (width * y);
367
368 // First element of each row (add only pixel up)
369 out[current_row] = static_cast<uint32_t>(in[current_row]) + out[current_row - width];
370
371 // Following row elements
372 for(size_t x = 1; x < width; ++x)
373 {
374 size_t current_pixel = current_row + x;
375
376 // out = in + up(out) + left(out) - up_left(out)
377 out[current_pixel] = static_cast<uint32_t>(in[current_pixel]) + out[current_pixel - 1]
378 + out[current_pixel - width] - out[current_pixel - width - 1];
379 }
380 }
381 }
382}
383
384// Absolute difference
385template <typename T1, typename T2, typename T3>
386void absolute_difference(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out)
387{
388 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
389
390 for(int i = 0; i < in1.num_elements(); ++i)
391 {
392 intermediate_type val = std::abs(static_cast<intermediate_type>(in1[i]) - static_cast<intermediate_type>(in2[i]));
393 out[i] = saturate_cast<T3>(val);
394 }
395}
396
397// Accumulate
398template <typename T1, typename T2>
399void accumulate(const Tensor<T1> &in, Tensor<T2> &out)
400{
401 using intermediate_type = typename common_promoted_signed_type<T1, T2>::intermediate_type;
402
403 for(int i = 0; i < in.num_elements(); ++i)
404 {
405 intermediate_type val = static_cast<intermediate_type>(out[i]) + static_cast<intermediate_type>(in[i]);
406 out[i] = saturate_cast<T2>(val);
407 }
408}
409
410// Accumulate squared
411template <typename T1, typename T2>
412void accumulate_squared(const Tensor<T1> &in, Tensor<T2> &out, uint32_t shift)
413{
414 if(shift > 15)
415 {
416 ARM_COMPUTE_ERROR("Shift in accumulate_squared must be within the range [0, 15]");
417 }
418 using intermediate_type = typename common_promoted_signed_type<T1, T2>::intermediate_type;
419 intermediate_type denom = 1 << shift;
420
421 for(int i = 0; i < in.num_elements(); ++i)
422 {
423 intermediate_type val = static_cast<intermediate_type>(out[i]) + (static_cast<intermediate_type>(in[i]) * static_cast<intermediate_type>(in[i]) / denom);
424 out[i] = saturate_cast<T2>(val);
425 }
426}
427
428// Accumulate weighted
429template <typename T>
430void accumulate_weighted(const Tensor<T> &in, Tensor<T> &out, float alpha)
431{
432 if(alpha < 0.f || alpha > 1.f)
433 {
434 ARM_COMPUTE_ERROR("Weight (alpha) specified in accumulate_weighted must be within the range [0, 1]");
435 }
436 using intermediate_type = typename common_promoted_signed_type<T>::intermediate_type;
437
438 for(int i = 0; i < in.num_elements(); ++i)
439 {
440 double val = (1. - static_cast<double>(alpha)) * static_cast<intermediate_type>(out[i]) + static_cast<double>(alpha) * static_cast<intermediate_type>(in[i]);
441 out[i] = static_cast<T>(val);
442 }
443}
444
445// Arithmetic addition
446template <typename T1, typename T2, typename T3>
447void arithmetic_addition(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, ConvertPolicy convert_policy)
448{
449 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
450
451 for(int i = 0; i < in1.num_elements(); ++i)
452 {
453 intermediate_type val = static_cast<intermediate_type>(in1[i]) + static_cast<intermediate_type>(in2[i]);
454 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(val) : static_cast<T3>(val);
455 }
456}
457
458// Arithmetic Subtraction
459template <typename T1, typename T2, typename T3>
460void arithmetic_subtraction(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, ConvertPolicy convert_policy)
461{
462 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
463
464 for(int i = 0; i < in1.num_elements(); ++i)
465 {
466 intermediate_type val = static_cast<intermediate_type>(in1[i]) - static_cast<intermediate_type>(in2[i]);
467 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(val) : static_cast<T3>(val);
468 }
469}
470
471// Bitwise and
472template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
473void bitwise_and(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
474{
475 for(int i = 0; i < in1.num_elements(); ++i)
476 {
477 out[i] = in1[i] & in2[i];
478 }
479}
480
481// Bitwise or
482template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
483void bitwise_or(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
484{
485 for(int i = 0; i < in1.num_elements(); ++i)
486 {
487 out[i] = in1[i] | in2[i];
488 }
489}
490
491// Bitwise xor
492template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
493void bitwise_xor(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
494{
495 for(int i = 0; i < in1.num_elements(); ++i)
496 {
497 out[i] = in1[i] ^ in2[i];
498 }
499}
500
501// Bitwise not
502template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
503void bitwise_not(const Tensor<T> &in, Tensor<T> &out)
504{
505 for(int i = 0; i < in.num_elements(); ++i)
506 {
507 out[i] = ~in[i];
508 }
509}
510
511// 3-by-3 box filter
512template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
513void box3x3(const Tensor<T> &in, Tensor<T> &out)
514{
515 const std::array<T, 9> filter{ { 1, 1, 1, 1, 1, 1, 1, 1, 1 } };
516 float scale = 1.f / static_cast<float>(filter.size());
517 const ValidRegion valid_region = shape_to_valid_region_undefined_border(in.shape(), BorderSize(1));
518 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
519 {
520 const Coordinates id = index2coord(in.shape(), element_idx);
521 if(is_in_valid_region(valid_region, id))
522 {
Giorgio Arena50f9fd72017-06-19 17:05:30 +0100523 apply_2d_spatial_filter(id, in, out, TensorShape(3U, 3U), filter.data(), scale, BorderMode::UNDEFINED);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100524 }
525 }
526}
527
528// Depth conversion
529template <typename T1, typename T2>
530void depth_convert(const Tensor<T1> &in, Tensor<T2> &out, ConvertPolicy policy, uint32_t shift)
531{
532 ARM_COMPUTE_ERROR("The conversion is not supported");
533}
534
535template <>
536void depth_convert<int8_t, float>(const Tensor<int8_t> &in, Tensor<float> &out, ConvertPolicy policy, uint32_t shift)
537{
538 const int8_t fixed_point_position = static_cast<int8_t>(in.fixed_point_position());
539 for(int i = 0; i < in.num_elements(); ++i)
540 {
541 out[i] = static_cast<float>(in[i]) * (1.0f / (1 << fixed_point_position));
542 }
543}
544
545template <>
546void depth_convert<float, int8_t>(const Tensor<float> &in, Tensor<int8_t> &out, ConvertPolicy policy, uint32_t shift)
547{
548 const int8_t fixed_point_position = static_cast<int8_t>(in.fixed_point_position());
549 for(int i = 0; i < in.num_elements(); ++i)
550 {
551 float val = in[i] * (1 << fixed_point_position) + 0.5f;
552 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<int8_t>(val) : static_cast<int8_t>(val));
553 }
554}
555
556template <>
557void depth_convert<uint8_t, uint16_t>(const Tensor<uint8_t> &in, Tensor<uint16_t> &out, ConvertPolicy policy, uint32_t shift)
558{
559 for(int i = 0; i < in.num_elements(); ++i)
560 {
561 out[i] = static_cast<uint16_t>(in[i]) << shift;
562 }
563}
564
565template <>
566void depth_convert<uint8_t, int16_t>(const Tensor<uint8_t> &in, Tensor<int16_t> &out, ConvertPolicy policy, uint32_t shift)
567{
568 for(int i = 0; i < in.num_elements(); ++i)
569 {
570 out[i] = static_cast<int16_t>(in[i]) << shift;
571 }
572}
573
574template <>
575void depth_convert<uint8_t, int32_t>(const Tensor<uint8_t> &in, Tensor<int32_t> &out, ConvertPolicy policy, uint32_t shift)
576{
577 for(int i = 0; i < in.num_elements(); ++i)
578 {
579 out[i] = static_cast<int32_t>(in[i]) << shift;
580 }
581}
582
583template <>
584void depth_convert<uint16_t, uint8_t>(const Tensor<uint16_t> &in, Tensor<uint8_t> &out, ConvertPolicy policy, uint32_t shift)
585{
586 for(int i = 0; i < in.num_elements(); ++i)
587 {
588 uint16_t val = in[i] >> shift;
589 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<uint8_t>(val) : static_cast<uint8_t>(val));
590 }
591}
592
593template <>
594void depth_convert<uint16_t, uint32_t>(const Tensor<uint16_t> &in, Tensor<uint32_t> &out, ConvertPolicy policy, uint32_t shift)
595{
596 for(int i = 0; i < in.num_elements(); ++i)
597 {
598 out[i] = static_cast<uint32_t>(in[i]) << shift;
599 }
600}
601
602template <>
603void depth_convert<int16_t, uint8_t>(const Tensor<int16_t> &in, Tensor<uint8_t> &out, ConvertPolicy policy, uint32_t shift)
604{
605 for(int i = 0; i < in.num_elements(); ++i)
606 {
607 int16_t val = in[i] >> shift;
608 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<uint8_t>(val) : static_cast<uint8_t>(val));
609 }
610}
611template <>
612void depth_convert<int16_t, int32_t>(const Tensor<int16_t> &in, Tensor<int32_t> &out, ConvertPolicy policy, uint32_t shift)
613{
614 for(int i = 0; i < in.num_elements(); ++i)
615 {
616 out[i] = static_cast<int32_t>(in[i]) << shift;
617 }
618}
619
620// Matrix multiplication for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +0100621template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100622void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
623{
624 const int M = out.shape().y();
625 const int N = out.shape().x();
626 const int K = in1.shape().x();
627
628 for(int r = 0; r < M; ++r)
629 {
630 for(int c = 0; c < N; ++c)
631 {
632 T acc = 0.0f;
633
634 for(int k = 0; k < K; ++k)
635 {
636 const T a0 = in1[r * K + k];
637 const T b0 = in2[k * N + c];
638
639 acc += a0 * b0;
640 }
641
642 // Finalize the result: A * B * alpha + C * beta
643 const T c0 = in3[c + r * N];
644 out[c + r * N] = alpha * acc + beta * c0;
645 }
646 }
647}
648
649// Matrix multiplication for fixed point type
650template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
651void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
652{
653 using namespace fixed_point_arithmetic;
654
655 using promoted_type = typename fixed_point_arithmetic::traits::promote<T>::type;
656
657 const int M = out.shape().y();
658 const int N = out.shape().x();
659 const int K = in1.shape().x();
660 const int8_t fixed_point_position = static_cast<int8_t>(in1.fixed_point_position());
661
662 const fixed_point<T> alpha_q(alpha, fixed_point_position);
663 const fixed_point<T> beta_q(beta, fixed_point_position);
664
665 for(int r = 0; r < M; ++r)
666 {
667 for(int c = 0; c < N; ++c)
668 {
669 fixed_point<promoted_type> acc_q(0, fixed_point_position);
670
671 for(int k = 0; k < K; ++k)
672 {
673 const fixed_point<promoted_type> a0_q(in1[r * K + k], fixed_point_position, true);
674 const fixed_point<promoted_type> b0_q(in2[k * N + c], fixed_point_position, true);
675 const fixed_point<promoted_type> axb_q = a0_q * b0_q;
676
677 acc_q = axb_q + acc_q;
678 }
679
680 // Finalize the result: A * B * alpha + C * beta
681 const fixed_point<T> c0_q(in3[c + r * N], fixed_point_position, true);
682
683 fixed_point<T> res_q(acc_q);
684 res_q = alpha_q * res_q;
685 res_q = (c0_q * beta_q) + res_q;
686
687 // Store the result
688 out[c + r * N] = res_q.raw();
689 }
690 }
691}
692
693// Pixel-wise multiplication
694template <typename T1, typename T2, typename T3>
695void pixel_wise_multiplication(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
696{
697 if(scale < 0)
698 {
699 ARM_COMPUTE_ERROR("Scale of pixel-wise multiplication must be non-negative");
700 }
701 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
702 for(int i = 0; i < in1.num_elements(); ++i)
703 {
704 double val = static_cast<intermediate_type>(in1[i]) * static_cast<intermediate_type>(in2[i]) * static_cast<double>(scale);
Pablo Tello383deec2017-06-23 10:40:05 +0100705 if(is_floating_point<T3>::value)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100706 {
707 out[i] = val;
708 }
709 else
710 {
711 double rounded_val = 0;
712 switch(rounding_policy)
713 {
714 case(RoundingPolicy::TO_ZERO):
715 rounded_val = cpp11::trunc(val);
716 break;
717 case(RoundingPolicy::TO_NEAREST_UP):
718 rounded_val = cpp11::round_half_up(val);
719 break;
720 case(RoundingPolicy::TO_NEAREST_EVEN):
721 rounded_val = cpp11::round_half_even(val);
722 break;
723 default:
724 ARM_COMPUTE_ERROR("Unsupported rounding policy");
725 }
726 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(rounded_val) : static_cast<T3>(rounded_val);
727 }
728 }
729}
730
731// Fixed-point Pixel-wise Multiplication
732template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
733void fixed_point_pixel_wise_multiplication(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out, int scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
734{
735 using namespace fixed_point_arithmetic;
736
737 const int fixed_point_position = in1.fixed_point_position();
738
739 ARM_COMPUTE_ERROR_ON_MSG(in1.data_type() != in2.data_type() || in1.data_type() != out.data_type(),
740 "Tensors must all have the same DataType");
741 ARM_COMPUTE_ERROR_ON_MSG(fixed_point_position != in2.fixed_point_position() || fixed_point_position != out.fixed_point_position(),
742 "Fixed-point position must be the same for both inputs and outputs");
743
744 // Validate fixed_point_position
745 ARM_COMPUTE_ERROR_ON((in1.data_type() == DataType::QS8) && (fixed_point_position == 0 || fixed_point_position > 7));
746 ARM_COMPUTE_ERROR_ON((in1.data_type() == DataType::QS16) && (fixed_point_position == 0 || fixed_point_position > 15));
747
748 fixed_point<T> fp_scale(scale, fixed_point_position);
749 const bool is_sat = convert_policy == ConvertPolicy::SATURATE;
750 const bool do_scaling = scale != 1;
751
752 for(int i = 0; i < in1.num_elements(); ++i)
753 {
754 fixed_point<T> val1(in1[i], fixed_point_position, true);
755 fixed_point<T> val2(in2[i], fixed_point_position, true);
756 fixed_point<T> res = (is_sat) ? val1 * val2 : mul<OverflowPolicy::WRAP>(val1, val2);
757 if(do_scaling)
758 {
759 res = (is_sat) ? res * fp_scale : mul<OverflowPolicy::WRAP>(res, fp_scale);
760 }
761 out[i] = res.raw();
762 }
763}
764
765// Threshold
766template <typename T>
767void threshold(const Tensor<T> &in, Tensor<T> &out, uint8_t threshold, uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper)
768{
769 switch(type)
770 {
771 case ThresholdType::BINARY:
772 for(int i = 0; i < in.num_elements(); ++i)
773 {
774 out[i] = ((in[i] > threshold) ? true_value : false_value);
775 }
776 break;
777 case ThresholdType::RANGE:
778 for(int i = 0; i < in.num_elements(); ++i)
779 {
780 if(in[i] > upper)
781 {
782 out[i] = false_value;
783 }
784 else if(in[i] < threshold)
785 {
786 out[i] = false_value;
787 }
788 else
789 {
790 out[i] = true_value;
791 }
792 }
793 break;
794 default:
795 ARM_COMPUTE_ERROR("Thresholding type not recognised");
796 break;
797 }
798}
799
800// Activation Layer for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +0100801template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100802void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
803{
804 const T a = static_cast<T>(act_info.a());
805 const T b = static_cast<T>(act_info.b());
806
807 for(int i = 0; i < in.num_elements(); ++i)
808 {
809 T x = in[i];
810 switch(act_info.activation())
811 {
812 case ActivationLayerInfo::ActivationFunction::ABS:
813 out[i] = std::abs(x);
814 break;
815 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
816 out[i] = std::min<T>(a, std::max<T>(0, x));
817 break;
818 case ActivationLayerInfo::ActivationFunction::LINEAR:
819 out[i] = a * x + b;
820 break;
821 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
822 out[i] = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-x));
823 break;
824 case ActivationLayerInfo::ActivationFunction::RELU:
825 out[i] = std::max<T>(0, x);
826 break;
827 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
828 out[i] = std::log(static_cast<T>(1) + std::exp(x));
829 break;
830 case ActivationLayerInfo::ActivationFunction::SQRT:
831 out[i] = std::sqrt(x);
832 break;
833 case ActivationLayerInfo::ActivationFunction::SQUARE:
834 out[i] = x * x;
835 break;
836 case ActivationLayerInfo::ActivationFunction::TANH:
837 out[i] = a * std::tanh(b * x);
838 break;
839 default:
840 ARM_COMPUTE_ERROR("Activation function not recognised");
841 break;
842 }
843 }
844}
845
846// Activation Layer for fixed point type
847template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
848void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
849{
850 using namespace fixed_point_arithmetic;
851 int fixed_point_position = in.fixed_point_position();
852 ActivationLayerInfo::ActivationFunction act_func = act_info.activation();
853 const fixed_point<T> a(act_info.a(), fixed_point_position);
854 const fixed_point<T> b(act_info.b(), fixed_point_position);
855 const fixed_point<T> const_0(0, fixed_point_position);
856 const fixed_point<T> const_1(1, fixed_point_position);
857
858 for(int i = 0; i < in.num_elements(); ++i)
859 {
860 fixed_point<T> x(in[i], fixed_point_position, true);
861 switch(act_func)
862 {
863 case ActivationLayerInfo::ActivationFunction::ABS:
864 out[i] = abs(x).raw();
865 break;
866 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
867 out[i] = min(a, max(const_0, x)).raw();
868 break;
869 case ActivationLayerInfo::ActivationFunction::LINEAR:
870 out[i] = add(b, mul(a, x)).raw();
871 break;
872 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
873 out[i] = (const_1 / (const_1 + exp(-x))).raw();
874 break;
875 case ActivationLayerInfo::ActivationFunction::RELU:
876 out[i] = max(const_0, x).raw();
877 break;
878 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
879 out[i] = log(const_1 + exp(x)).raw();
880 break;
881 case ActivationLayerInfo::ActivationFunction::SQRT:
882 out[i] = (const_1 / inv_sqrt(x)).raw();
883 break;
884 case ActivationLayerInfo::ActivationFunction::SQUARE:
885 out[i] = mul(x, x).raw();
886 break;
887 case ActivationLayerInfo::ActivationFunction::TANH:
888 out[i] = tanh(x).raw();
889 break;
890 default:
891 ARM_COMPUTE_ERROR("Activation function not recognised");
892 break;
893 }
894 }
895}
896
897// Batch Normalization Layer for fixed point type
898template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
899void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
900{
901 const int cols = static_cast<int>(in.shape()[0]);
902 const int rows = static_cast<int>(in.shape()[1]);
903 const int depth = static_cast<int>(in.shape()[2]);
904 int upper_dims = in.shape().total_size() / (cols * rows * depth);
905
906 for(int r = 0; r < upper_dims; ++r)
907 {
908 for(int i = 0; i < depth; ++i)
909 {
910 for(int k = 0; k < rows; ++k)
911 {
912 for(int l = 0; l < cols; ++l)
913 {
914 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
915 fixed_point_arithmetic::fixed_point<T> in_qs8(in[pos], fixed_point_position, true);
916 fixed_point_arithmetic::fixed_point<T> var_qs8(var[i], fixed_point_position, true);
917 fixed_point_arithmetic::fixed_point<T> mean_qs8(mean[i], fixed_point_position, true);
918 fixed_point_arithmetic::fixed_point<T> beta_qs8(beta[i], fixed_point_position, true);
919 fixed_point_arithmetic::fixed_point<T> gamma_qs8(gamma[i], fixed_point_position, true);
920 fixed_point_arithmetic::fixed_point<T> epsilon_qs8(epsilon, fixed_point_position);
921
922 auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs8 + epsilon_qs8);
923 auto numerator = in_qs8 - mean_qs8;
924 auto x_bar = numerator * denominator;
925 x_bar = beta_qs8 + x_bar * gamma_qs8;
926 out[pos] = x_bar.raw();
927 }
928 }
929 }
930 }
931}
932
933// Batch Normalization Layer for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +0100934template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100935void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
936{
937 const int cols = static_cast<int>(in.shape()[0]);
938 const int rows = static_cast<int>(in.shape()[1]);
939 const int depth = static_cast<int>(in.shape()[2]);
940 int upper_dims = in.shape().total_size() / (cols * rows * depth);
941
942 for(int r = 0; r < upper_dims; ++r)
943 {
944 for(int i = 0; i < depth; ++i)
945 {
946 for(int k = 0; k < rows; ++k)
947 {
948 for(int l = 0; l < cols; ++l)
949 {
950 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
951 const float denominator = sqrt(var[i] + epsilon);
952 const float numerator = in[pos] - mean[i];
953 const float x_bar = numerator / denominator;
954 out[pos] = beta[i] + x_bar * gamma[i];
955 }
956 }
957 }
958 }
959}
960
961// Convolution layer
962template <typename T>
963void convolution_layer(const Tensor<T> &in, const Tensor<T> &weights, const Tensor<T> &bias, Tensor<T> &out, const PadStrideInfo &conv_info)
964{
965 const int width_in = in.shape().x();
966 const int height_in = in.shape().y();
967 const int depth_in = in.shape().z();
968 const int width_out = out.shape().x();
969 const int height_out = out.shape().y();
970 const int depth_out = out.shape().z();
971 const int width_weights = weights.shape().x();
972 const int height_weights = weights.shape().y();
973 const int depth_weights = weights.shape().z();
974 const int pad_xi = std::min(static_cast<int>(conv_info.pad().first), width_weights / 2);
975 const int pad_yi = std::min(static_cast<int>(conv_info.pad().second), height_weights / 2);
976 const int start_xi = width_weights / 2 - pad_xi;
977 const int start_yi = height_weights / 2 - pad_yi;
978 const int end_xi = width_in - start_xi;
979 const int end_yi = height_in - start_yi;
980 const int stride_xi = conv_info.stride().first;
981 const int stride_yi = conv_info.stride().second;
982 const int num_batches = in.shape().total_size() / (width_in * height_in * depth_in);
983
984 for(int r = 0; r < num_batches; ++r)
985 {
986 for(int yi = start_yi; yi < end_yi; yi += stride_yi)
987 {
988 for(int xi = start_xi; xi < end_xi; xi += stride_xi)
989 {
990 for(int ofm = 0; ofm < depth_out; ++ofm)
991 {
992 // Compute input and output offsets
993 const int offset_in = r * width_in * height_in * depth_in;
994 const int xo = (xi - start_xi) / stride_xi;
995 const int yo = (yi - start_yi) / stride_yi;
996 const int offset_out = xo + yo * width_out + ofm * width_out * height_out + r * width_out * height_out * depth_out;
997
998 // Compute 3D convolution
999 convolution3d(in.data() + offset_in,
1000 weights.data() + ofm * width_weights * height_weights * depth_weights,
1001 bias.data() + ofm,
1002 out.data() + offset_out,
1003 xi, yi,
1004 width_in, height_in, depth_in,
1005 width_weights, height_weights,
1006 static_cast<int8_t>(in.fixed_point_position()));
1007 }
1008 }
1009 }
1010 }
1011}
1012
1013// Fully connected layer
1014template <typename T>
1015void fully_connected_layer(const Tensor<T> &in, const Tensor<T> &weights, const Tensor<T> &bias, Tensor<T> &out)
1016{
1017 ARM_COMPUTE_ERROR_ON(weights.shape().x() != out.shape().x());
1018 ARM_COMPUTE_ERROR_ON(weights.shape().y() != in.shape().x() * in.shape().y() * in.shape().z());
1019 const int cols_weights = weights.shape().x();
1020 const int rows_weights = weights.shape().y();
1021 const int num_batches = in.shape().total_size() / rows_weights;
1022
1023 for(int k = 0; k < num_batches; ++k)
1024 {
1025 vector_matrix_multiply<T>(in.data() + k * rows_weights,
1026 weights.data(),
1027 bias.data(),
1028 out.data() + k * cols_weights,
1029 cols_weights,
1030 rows_weights,
1031 in.fixed_point_position());
1032 }
1033}
1034
1035// Normalization Layer for floating point type
Pablo Tello383deec2017-06-23 10:40:05 +01001036template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001037void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
1038{
1039 const uint32_t norm_size = norm_info.norm_size();
1040 NormType type = norm_info.type();
1041 float beta = norm_info.beta();
1042 uint32_t kappa = norm_info.kappa();
1043
1044 const int cols = static_cast<int>(in.shape()[0]);
1045 const int rows = static_cast<int>(in.shape()[1]);
1046 const int depth = static_cast<int>(in.shape()[2]);
1047 int upper_dims = in.shape().total_size() / (cols * rows);
1048
1049 float coeff = norm_info.scale_coeff();
1050 int radius_cols = norm_size / 2;
1051 // IN_MAP_1D and CROSS_MAP normalize over a single axis only
1052 int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
1053
1054 if(type == NormType::CROSS_MAP)
1055 {
1056 // Remove also depth from upper dimensions since it is the axes we want
1057 // to use for normalization
1058 upper_dims /= depth;
1059 for(int r = 0; r < upper_dims; ++r)
1060 {
1061 for(int i = 0; i < rows; ++i)
1062 {
1063 for(int k = 0; k < cols; ++k)
1064 {
1065 for(int l = 0; l < depth; ++l)
1066 {
1067 float accumulated_scale = 0.f;
1068 for(int j = -radius_cols; j <= radius_cols; ++j)
1069 {
1070 const int z = l + j;
1071 if(z >= 0 && z < depth)
1072 {
1073 const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth];
1074 accumulated_scale += value * value;
1075 }
1076 }
1077 out[k + i * cols + l * rows * cols + r * cols * rows * depth] = kappa + accumulated_scale * coeff;
1078 }
1079 }
1080 }
1081 }
1082 }
1083 else
1084 {
1085 for(int r = 0; r < upper_dims; ++r)
1086 {
1087 for(int i = 0; i < rows; ++i)
1088 {
1089 for(int k = 0; k < cols; ++k)
1090 {
1091 float accumulated_scale = 0.f;
1092 for(int j = -radius_rows; j <= radius_rows; ++j)
1093 {
1094 const int y = i + j;
1095 for(int l = -radius_cols; l <= radius_cols; ++l)
1096 {
1097 const int x = k + l;
1098 if((x >= 0 && y >= 0) && (x < cols && y < rows))
1099 {
1100 const T value = in[x + y * cols + r * cols * rows];
1101 accumulated_scale += value * value;
1102 }
1103 }
1104 }
1105 out[k + i * cols + r * cols * rows] = kappa + accumulated_scale * coeff;
1106 }
1107 }
1108 }
1109 }
1110
1111 if(beta == 1.f)
1112 {
1113 for(int i = 0; i < out.num_elements(); ++i)
1114 {
1115 out[i] = in[i] / out[i];
1116 }
1117 }
1118 else if(beta == 0.5f)
1119 {
1120 for(int i = 0; i < out.num_elements(); ++i)
1121 {
1122 out[i] = in[i] / std::sqrt(out[i]);
1123 }
1124 }
1125 else
1126 {
1127 for(int i = 0; i < out.num_elements(); ++i)
1128 {
1129 out[i] = in[i] * std::exp(std::log(out[i]) * -beta);
1130 }
1131 }
1132}
1133// Normalization Layer for fixed-point types
1134template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
1135void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
1136{
1137 using namespace fixed_point_arithmetic;
1138
1139 const int fixed_point_position = in.fixed_point_position();
1140
1141 const uint32_t norm_size = norm_info.norm_size();
1142 NormType type = norm_info.type();
1143 fixed_point<T> beta(norm_info.beta(), fixed_point_position);
1144 fixed_point<T> kappa(norm_info.kappa(), fixed_point_position);
1145
1146 const int cols = static_cast<int>(in.shape()[0]);
1147 const int rows = static_cast<int>(in.shape()[1]);
1148 const int depth = static_cast<int>(in.shape()[2]);
1149 int upper_dims = in.shape().total_size() / (cols * rows);
1150
1151 fixed_point<T> coeff(norm_info.scale_coeff(), fixed_point_position);
1152 int radius_cols = norm_size / 2;
1153 // IN_MAP_1D and CROSS_MAP normalize over a single axis only
1154 int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
1155
1156 if(type == NormType::CROSS_MAP)
1157 {
1158 // Remove also depth from upper dimensions since it is the axes we want
1159 // to use for normalization
1160 upper_dims /= depth;
1161 for(int r = 0; r < upper_dims; ++r)
1162 {
1163 for(int i = 0; i < rows; ++i)
1164 {
1165 for(int k = 0; k < cols; ++k)
1166 {
1167 for(int l = 0; l < depth; ++l)
1168 {
1169 fixed_point<T> accumulated_scale(0.f, fixed_point_position);
1170 for(int j = -radius_cols; j <= radius_cols; ++j)
1171 {
1172 const int z = l + j;
1173 if(z >= 0 && z < depth)
1174 {
1175 const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth];
1176 const fixed_point<T> fp_value(value, fixed_point_position, true);
1177 accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
1178 }
1179 }
1180 accumulated_scale = add(kappa, mul(accumulated_scale, coeff));
1181 out[k + i * cols + l * rows * cols + r * cols * rows * depth] = accumulated_scale.raw();
1182 }
1183 }
1184 }
1185 }
1186 }
1187 else
1188 {
1189 for(int r = 0; r < upper_dims; ++r)
1190 {
1191 for(int i = 0; i < rows; ++i)
1192 {
1193 for(int k = 0; k < cols; ++k)
1194 {
1195 fixed_point<T> accumulated_scale(0.f, fixed_point_position);
1196 for(int j = -radius_rows; j <= radius_rows; ++j)
1197 {
1198 const int y = i + j;
1199 for(int l = -radius_cols; l <= radius_cols; ++l)
1200 {
1201 const int x = k + l;
1202 if((x >= 0 && y >= 0) && (x < cols && y < rows))
1203 {
1204 const T value = in[x + y * cols + r * cols * rows];
1205 const fixed_point<T> fp_value(value, fixed_point_position, true);
1206 accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
1207 }
1208 }
1209 }
1210 accumulated_scale = add(kappa, mul(accumulated_scale, coeff));
1211 out[k + i * cols + r * cols * rows] = accumulated_scale.raw();
1212 }
1213 }
1214 }
1215 }
1216
1217 if(norm_info.beta() == 1.f)
1218 {
1219 for(int i = 0; i < out.num_elements(); ++i)
1220 {
1221 fixed_point<T> res = div(fixed_point<T>(in[i], fixed_point_position, true), fixed_point<T>(out[i], fixed_point_position, true));
1222 out[i] = res.raw();
1223 }
1224 }
1225 else
1226 {
1227 const fixed_point<T> beta(norm_info.beta(), fixed_point_position);
1228 for(int i = 0; i < out.num_elements(); ++i)
1229 {
1230 fixed_point<T> res = pow(fixed_point<T>(out[i], fixed_point_position, true), beta);
1231 res = div(fixed_point<T>(in[i], fixed_point_position, true), res);
1232 out[i] = res.raw();
1233 }
1234 }
1235}
1236
1237// Pooling layer
1238template <typename T>
1239void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info, int fixed_point_position)
1240{
1241 const int pool_size = pool_info.pool_size();
1242 PoolingType type = pool_info.pool_type();
1243 int pool_stride_x = 0;
1244 int pool_stride_y = 0;
1245 int pad_x = 0;
1246 int pad_y = 0;
1247 std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info().stride();
1248 std::tie(pad_x, pad_y) = pool_info.pad_stride_info().pad();
1249
Georgios Pinitasce093142017-06-19 16:11:53 +01001250 const int w_in = static_cast<int>(in.shape()[0]);
1251 const int h_in = static_cast<int>(in.shape()[1]);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001252
Georgios Pinitasce093142017-06-19 16:11:53 +01001253 const int w_out = static_cast<int>(out.shape()[0]);
1254 const int h_out = static_cast<int>(out.shape()[1]);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001255
Georgios Pinitasce093142017-06-19 16:11:53 +01001256 int upper_dims = in.shape().total_size() / (w_in * h_in);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001257
Georgios Pinitasce093142017-06-19 16:11:53 +01001258 int pooled_w = 0;
1259 int pooled_h = 0;
1260 if(pool_info.pad_stride_info().round() == DimensionRoundingType::CEIL)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001261 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001262 pooled_w = static_cast<int>(ceil(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
1263 pooled_h = static_cast<int>(ceil(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001264 }
Georgios Pinitasce093142017-06-19 16:11:53 +01001265 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001266 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001267 pooled_w = static_cast<int>(floor(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
1268 pooled_h = static_cast<int>(floor(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
1269 }
1270
1271 if((pooled_w - 1) * pool_stride_x >= w_in + pad_x)
1272 {
1273 --pooled_w;
1274 }
1275 if((pooled_h - 1) * pool_stride_y >= h_in + pad_y)
1276 {
1277 --pooled_h;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001278 }
1279
1280 if(type == PoolingType::MAX)
1281 {
1282 for(int r = 0; r < upper_dims; ++r)
1283 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001284 for(int h = 0; h < pooled_h; ++h)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001285 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001286 for(int w = 0; w < pooled_w; ++w)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001287 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001288 int wstart = w * pool_stride_x - pad_x;
1289 int hstart = h * pool_stride_y - pad_y;
1290 int wend = std::min(wstart + pool_size, w_in);
1291 int hend = std::min(hstart + pool_size, h_in);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001292 wstart = std::max(wstart, 0);
Georgios Pinitasce093142017-06-19 16:11:53 +01001293 hstart = std::max(hstart, 0);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001294
1295 T max_val = std::numeric_limits<T>::lowest();
1296 for(int y = hstart; y < hend; ++y)
1297 {
1298 for(int x = wstart; x < wend; ++x)
1299 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001300 T val = in[r * h_in * w_in + y * w_in + x];
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001301 if(val > max_val)
1302 {
1303 max_val = val;
1304 }
1305 }
1306 }
1307
Georgios Pinitasce093142017-06-19 16:11:53 +01001308 out[r * h_out * w_out + h * pooled_w + w] = max_val;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001309 }
1310 }
1311 }
1312 }
1313 else // Average pooling
1314 {
1315 for(int r = 0; r < upper_dims; ++r)
1316 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001317 for(int h = 0; h < pooled_h; ++h)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001318 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001319 for(int w = 0; w < pooled_w; ++w)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001320 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001321 T avg_val = 0;
1322 int wstart = w * pool_stride_x - pad_x;
1323 int hstart = h * pool_stride_y - pad_y;
1324 int wend = std::min(wstart + pool_size, w_in + pad_x);
1325 int hend = std::min(hstart + pool_size, h_in + pad_y);
1326 int pool = (hend - hstart) * (wend - wstart);
1327 wstart = std::max(wstart, 0);
1328 hstart = std::max(hstart, 0);
1329 wend = std::min(wend, w_in);
1330 hend = std::min(hend, h_in);
Pablo Tello383deec2017-06-23 10:40:05 +01001331 if(is_floating_point<T>::value)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001332 {
1333 for(int y = hstart; y < hend; ++y)
1334 {
1335 for(int x = wstart; x < wend; ++x)
1336 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001337 avg_val += in[r * h_in * w_in + y * w_in + x];
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001338 }
1339 }
Georgios Pinitasce093142017-06-19 16:11:53 +01001340 out[r * h_out * w_out + h * pooled_w + w] = avg_val / pool;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001341 }
1342 else
1343 {
1344 static std::array<qint8_t, 10> scale_values_q8 =
1345 { { 0x0, 0x0, 0x40, 0x2A, 0x20, 0x19, 0x15, 0x12, 0x10, 0xE } };
1346
1347 for(int y = hstart; y < hend; ++y)
1348 {
1349 for(int x = wstart; x < wend; ++x)
1350 {
Georgios Pinitasce093142017-06-19 16:11:53 +01001351 avg_val = sqadd_qs8(avg_val, in[r * h_in * w_in + y * w_in + x]);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001352 }
1353 }
Georgios Pinitasce093142017-06-19 16:11:53 +01001354 out[r * h_out * w_out + h * pooled_w + w] = sqmul_qs8(avg_val, (scale_values_q8[pool] >> (7 - fixed_point_position)), fixed_point_position);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001355 }
1356 }
1357 }
1358 }
1359 }
1360}
1361
1362// Softmax Layer
Pablo Tello383deec2017-06-23 10:40:05 +01001363template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001364void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
1365{
1366 const int cols = static_cast<int>(in.shape()[0]);
1367 const int upper_dims = in.shape().total_size() / cols;
1368 for(int r = 0; r < upper_dims; ++r)
1369 {
1370 // Find max
1371 T max = std::numeric_limits<T>::lowest();
1372 for(int c = 0; c < cols; ++c)
1373 {
1374 const T x = in[r * cols + c];
1375 if(x > max)
1376 {
1377 max = x;
1378 }
1379 }
1380
1381 // Regularize
1382 T sum = 0;
1383 for(int c = 0; c < cols; ++c)
1384 {
1385 const T res = exp(in[r * cols + c] - max);
1386 out[r * cols + c] = res;
1387 sum += res;
1388 }
1389
1390 // Normalize
1391 const T norm_val = 1 / sum;
1392 for(int c = 0; c < cols; ++c)
1393 {
1394 out[r * cols + c] *= norm_val;
1395 }
1396 }
1397}
1398template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
1399void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
1400{
1401 using namespace fixed_point_arithmetic;
1402 using promoted_T = typename test::traits::promote<T>::type;
1403
1404 const int fixed_point_position = in.fixed_point_position();
1405 const int cols = static_cast<int>(in.shape()[0]);
1406 const int upper_dims = in.shape().total_size() / cols;
1407
1408 for(int r = 0; r < upper_dims; ++r)
1409 {
1410 // Find max
1411 fixed_point<T> max(std::numeric_limits<T>::lowest(), fixed_point_position, true);
1412 for(int c = 0; c < cols; ++c)
1413 {
1414 const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
1415 if(x > max)
1416 {
1417 max = x;
1418 }
1419 }
1420
1421 // Regularize
1422 fixed_point<promoted_T> sum(0, fixed_point_position);
1423 for(int c = 0; c < cols; ++c)
1424 {
1425 const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
1426 fixed_point<T> res = exp(x - max);
1427 out[r * cols + c] = res.raw();
1428 sum = add(sum, static_cast<fixed_point<promoted_T>>(res));
1429 }
1430
1431 // Normalize
1432 fixed_point<T> sat_sum(sum);
1433 for(int c = 0; c < cols; ++c)
1434 {
1435 const fixed_point<T> x(out[r * cols + c], fixed_point_position, true);
1436 out[r * cols + c] = div(x, sat_sum).raw();
1437 }
1438 }
1439}
1440
1441// Fixed point operations
1442template <typename T>
1443void fixed_point_operation(const Tensor<T> &in, Tensor<T> &out, FixedPointOp op)
1444{
1445 int p = in.fixed_point_position();
1446 switch(op)
1447 {
1448 case FixedPointOp::EXP:
1449 for(int i = 0; i < in.num_elements(); ++i)
1450 {
1451 out[i] = fixed_point_arithmetic::exp(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1452 }
1453 break;
1454 case FixedPointOp::LOG:
1455 for(int i = 0; i < in.num_elements(); ++i)
1456 {
1457 out[i] = fixed_point_arithmetic::log(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1458 }
1459 break;
1460 case FixedPointOp::INV_SQRT:
1461 for(int i = 0; i < in.num_elements(); ++i)
1462 {
1463 out[i] = fixed_point_arithmetic::inv_sqrt(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1464 }
1465 break;
1466 case FixedPointOp::RECIPROCAL:
1467 for(int i = 0; i < in.num_elements(); ++i)
1468 {
1469 out[i] = fixed_point_arithmetic::div(fixed_point_arithmetic::fixed_point<T>(1, p), fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1470 }
1471 break;
1472 default:
1473 ARM_COMPUTE_ERROR("Fixed point operation not supported");
1474 break;
1475 }
1476}
1477
1478// Tensor print
1479template <typename T>
1480void print(const Tensor<T> &in, std::ostream &out)
1481{
1482 out << "\n";
1483 for(int i = 0; i < in.num_elements(); ++i)
1484 {
1485 out << in[i] << " ";
1486 }
1487 out << "\n";
1488}
1489} // namespace tensor_operations
1490} // namespace validation
1491} // namespace test
1492} // namespace arm_compute
1493
1494#endif /* __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__ */