blob: 5e27e9d3a0bf486669fd925edcf004b228bf7d85 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__
25#define __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__
26
27#include "FixedPoint.h"
28#include "Tensor.h"
29#include "Types.h"
30#include "Utils.h"
31
32#include "FixedPoint.h"
33#include "Types.h"
34#include "arm_compute/core/FixedPoint.h"
35#include "arm_compute/core/Types.h"
36#include "tests/validation/FixedPoint.h"
37
38#include <algorithm>
39#include <array>
40#include <cmath>
41
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
48namespace tensor_operations
49{
50namespace
51{
52bool is_valid_pixel(int i, int min, int max)
53{
54 return (i >= min && i < max);
55}
56
57// 3D convolution for floating point type
58template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
59void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int8_t fixed_point_position)
60{
61 const int half_width_weights = width_weights / 2;
62 const int half_height_weights = height_weights / 2;
63
64 // Reset accumulator
65 T acc = static_cast<T>(0);
66
67 // Compute a 2D convolution for each IFM and accumulate the result
68 for(int ifm = 0; ifm < depth_in; ++ifm)
69 {
70 // Compute the offset for the input slice
71 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
72
73 // Compute 2D convolution
74 for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
75 {
76 for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
77 {
78 // Check if the pixel is out-of-bound
79 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
80 {
81 const int idx = xk + half_width_weights;
82 const int idy = yk + half_height_weights;
83
84 const T i_value = in[offset_slice_in + xk + yk * width_in];
85 const T w_value = weights[idx + idy * width_weights + ifm * width_weights * height_weights];
86
87 acc += i_value * w_value;
88 }
89 }
90 }
91 }
92
93 // Accumulate the bias and store the result
94 *out = acc + (*bias);
95}
96
97// 3D convolution for fixed point type
98template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
99void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights,
100 int8_t fixed_point_position)
101{
102 const int half_width_weights = width_weights / 2;
103 const int half_height_weights = height_weights / 2;
104
105 using namespace fixed_point_arithmetic;
106 using promoted_type = typename fixed_point_arithmetic::traits::promote<T>::type;
107
108 // Reset accumulator
109 fixed_point<promoted_type> acc(0, fixed_point_position);
110
111 // Compute a 2D convolution for each IFM and accumulate the result
112 for(int ifm = 0; ifm < depth_in; ++ifm)
113 {
114 // Compute the offset for the input slice
115 const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
116
117 // Compute 2D convolution
118 for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
119 {
120 for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
121 {
122 // Check if the pixel is out-of-bound
123 if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
124 {
125 const int idx = xk + half_width_weights;
126 const int idy = yk + half_height_weights;
127
128 const fixed_point<promoted_type> i_value(in[offset_slice_in + xk + yk * width_in], fixed_point_position, true);
129 const fixed_point<promoted_type> w_value(weights[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true);
130 const fixed_point<promoted_type> iw = i_value * w_value;
131 acc = iw + acc;
132 }
133 }
134 }
135 }
136
137 // Get the bias
138 const fixed_point<promoted_type> b(*bias, fixed_point_position, true);
139
140 // Accumulate the bias and covert back
141 acc = acc + b;
142 fixed_point<T> res(acc);
143 *out = res.raw();
144}
145
146template <typename T>
147void vector_matrix_multiply(const T *in, const T *weights, const T *bias, T *out, int cols_weights, int rows_weights, uint8_t fixed_point_position)
148{
149 for(int x = 0; x < cols_weights; ++x)
150 {
151 T acc = 0.0f;
152 for(int y = 0; y < rows_weights; ++y)
153 {
154 acc += in[y] * weights[x + y * cols_weights];
155 }
156 out[x] = acc + bias[x];
157 }
158}
159
160template <>
161void vector_matrix_multiply(const int8_t *in, const int8_t *weights, const int8_t *bias, int8_t *out, int cols_weights, int rows_weights, uint8_t fixed_point_position)
162{
163 using namespace fixed_point_arithmetic;
164 using promoted_type = typename fixed_point_arithmetic::traits::promote<int8_t>::type;
165
166 for(int x = 0; x < cols_weights; ++x)
167 {
168 // Reset accumulator
169 fixed_point<promoted_type> acc(0, fixed_point_position);
170
171 for(int y = 0; y < rows_weights; ++y)
172 {
173 const fixed_point<promoted_type> i_value(in[y], fixed_point_position, true);
174 const fixed_point<promoted_type> w_value(weights[x + y * cols_weights], fixed_point_position, true);
175 const fixed_point<promoted_type> iw = i_value * w_value;
176 acc = iw + acc;
177 }
178
179 // Get the bias
180 const fixed_point<int8_t> b(bias[x], fixed_point_position, true);
181
182 // Convert back and accumulate the bias
183 fixed_point<int8_t> res(acc);
184 res = res + b;
185
186 // Store the result
187 out[x] = res.raw();
188 }
189}
190
191/** Apply 2D spatial filter on a single element of @p in at coordinates @p coord
192 *
193 * - filter sizes have to be odd number
194 * - Valid region assumed
195 * - Row major order of filter assumed
196 * - TO_ZERO rounding policy assumed
197 * - SATURATE convert policy assumed
198 *
199 */
200template <typename T1, typename T2, typename T3>
201void apply_2d_spatial_filter(Coordinates coord, const Tensor<T1> &in, Tensor<T3> &out, const TensorShape &filter_shape, const T2 *filter_itr, float scale)
202{
203 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
204 intermediate_type val = 0;
205 int x = coord.x();
206 int y = coord.y();
207 for(size_t j = y - filter_shape[1] / 2; j <= y + filter_shape[1] / 2; ++j)
208 {
209 for(size_t i = x - filter_shape[0] / 2; i <= x + filter_shape[0] / 2; ++i)
210 {
211 coord.set(0, i);
212 coord.set(1, j);
213 val += static_cast<intermediate_type>(*filter_itr) * static_cast<intermediate_type>(in[coord2index(in.shape(), coord)]);
214 ++filter_itr;
215 }
216 }
217 coord.set(0, x);
218 coord.set(1, y);
219 double rounded_val = cpp11::trunc(val * static_cast<double>(scale));
220 out[coord2index(in.shape(), coord)] = saturate_cast<T3>(rounded_val);
221}
222} // namespace
223
224// Integral Image
225void integral_image(const Tensor<uint8_t> &in, Tensor<uint32_t> &out)
226{
227 // Length of dimensions
228 const size_t width = in.shape().x();
229 const size_t height = in.shape().y();
230 const size_t depth = in.shape().z() * in.shape()[3] * in.shape()[4] * in.shape()[5];
231
232 const size_t image_size = width * height;
233
234 for(size_t z = 0; z < depth; ++z)
235 {
236 size_t current_image = z * image_size;
237
238 //First element of each image
239 out[current_image] = in[current_image];
240
241 // First row of each image (add only pixel on the left)
242 for(size_t x = 1; x < width; ++x)
243 {
244 out[current_image + x] = static_cast<uint32_t>(in[current_image + x]) + out[current_image + x - 1];
245 }
246
247 // Subsequent rows
248 for(size_t y = 1; y < height; ++y)
249 {
250 size_t current_row = current_image + (width * y);
251
252 // First element of each row (add only pixel up)
253 out[current_row] = static_cast<uint32_t>(in[current_row]) + out[current_row - width];
254
255 // Following row elements
256 for(size_t x = 1; x < width; ++x)
257 {
258 size_t current_pixel = current_row + x;
259
260 // out = in + up(out) + left(out) - up_left(out)
261 out[current_pixel] = static_cast<uint32_t>(in[current_pixel]) + out[current_pixel - 1]
262 + out[current_pixel - width] - out[current_pixel - width - 1];
263 }
264 }
265 }
266}
267
268// Absolute difference
269template <typename T1, typename T2, typename T3>
270void absolute_difference(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out)
271{
272 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
273
274 for(int i = 0; i < in1.num_elements(); ++i)
275 {
276 intermediate_type val = std::abs(static_cast<intermediate_type>(in1[i]) - static_cast<intermediate_type>(in2[i]));
277 out[i] = saturate_cast<T3>(val);
278 }
279}
280
281// Accumulate
282template <typename T1, typename T2>
283void accumulate(const Tensor<T1> &in, Tensor<T2> &out)
284{
285 using intermediate_type = typename common_promoted_signed_type<T1, T2>::intermediate_type;
286
287 for(int i = 0; i < in.num_elements(); ++i)
288 {
289 intermediate_type val = static_cast<intermediate_type>(out[i]) + static_cast<intermediate_type>(in[i]);
290 out[i] = saturate_cast<T2>(val);
291 }
292}
293
294// Accumulate squared
295template <typename T1, typename T2>
296void accumulate_squared(const Tensor<T1> &in, Tensor<T2> &out, uint32_t shift)
297{
298 if(shift > 15)
299 {
300 ARM_COMPUTE_ERROR("Shift in accumulate_squared must be within the range [0, 15]");
301 }
302 using intermediate_type = typename common_promoted_signed_type<T1, T2>::intermediate_type;
303 intermediate_type denom = 1 << shift;
304
305 for(int i = 0; i < in.num_elements(); ++i)
306 {
307 intermediate_type val = static_cast<intermediate_type>(out[i]) + (static_cast<intermediate_type>(in[i]) * static_cast<intermediate_type>(in[i]) / denom);
308 out[i] = saturate_cast<T2>(val);
309 }
310}
311
312// Accumulate weighted
313template <typename T>
314void accumulate_weighted(const Tensor<T> &in, Tensor<T> &out, float alpha)
315{
316 if(alpha < 0.f || alpha > 1.f)
317 {
318 ARM_COMPUTE_ERROR("Weight (alpha) specified in accumulate_weighted must be within the range [0, 1]");
319 }
320 using intermediate_type = typename common_promoted_signed_type<T>::intermediate_type;
321
322 for(int i = 0; i < in.num_elements(); ++i)
323 {
324 double val = (1. - static_cast<double>(alpha)) * static_cast<intermediate_type>(out[i]) + static_cast<double>(alpha) * static_cast<intermediate_type>(in[i]);
325 out[i] = static_cast<T>(val);
326 }
327}
328
329// Arithmetic addition
330template <typename T1, typename T2, typename T3>
331void arithmetic_addition(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, ConvertPolicy convert_policy)
332{
333 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
334
335 for(int i = 0; i < in1.num_elements(); ++i)
336 {
337 intermediate_type val = static_cast<intermediate_type>(in1[i]) + static_cast<intermediate_type>(in2[i]);
338 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(val) : static_cast<T3>(val);
339 }
340}
341
342// Arithmetic Subtraction
343template <typename T1, typename T2, typename T3>
344void arithmetic_subtraction(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, ConvertPolicy convert_policy)
345{
346 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
347
348 for(int i = 0; i < in1.num_elements(); ++i)
349 {
350 intermediate_type val = static_cast<intermediate_type>(in1[i]) - static_cast<intermediate_type>(in2[i]);
351 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(val) : static_cast<T3>(val);
352 }
353}
354
355// Bitwise and
356template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
357void bitwise_and(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
358{
359 for(int i = 0; i < in1.num_elements(); ++i)
360 {
361 out[i] = in1[i] & in2[i];
362 }
363}
364
365// Bitwise or
366template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
367void bitwise_or(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
368{
369 for(int i = 0; i < in1.num_elements(); ++i)
370 {
371 out[i] = in1[i] | in2[i];
372 }
373}
374
375// Bitwise xor
376template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
377void bitwise_xor(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out)
378{
379 for(int i = 0; i < in1.num_elements(); ++i)
380 {
381 out[i] = in1[i] ^ in2[i];
382 }
383}
384
385// Bitwise not
386template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
387void bitwise_not(const Tensor<T> &in, Tensor<T> &out)
388{
389 for(int i = 0; i < in.num_elements(); ++i)
390 {
391 out[i] = ~in[i];
392 }
393}
394
395// 3-by-3 box filter
396template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
397void box3x3(const Tensor<T> &in, Tensor<T> &out)
398{
399 const std::array<T, 9> filter{ { 1, 1, 1, 1, 1, 1, 1, 1, 1 } };
400 float scale = 1.f / static_cast<float>(filter.size());
401 const ValidRegion valid_region = shape_to_valid_region_undefined_border(in.shape(), BorderSize(1));
402 for(int element_idx = 0; element_idx < in.num_elements(); ++element_idx)
403 {
404 const Coordinates id = index2coord(in.shape(), element_idx);
405 if(is_in_valid_region(valid_region, id))
406 {
407 apply_2d_spatial_filter(id, in, out, TensorShape(3U, 3U), filter.data(), scale);
408 }
409 }
410}
411
412// Depth conversion
413template <typename T1, typename T2>
414void depth_convert(const Tensor<T1> &in, Tensor<T2> &out, ConvertPolicy policy, uint32_t shift)
415{
416 ARM_COMPUTE_ERROR("The conversion is not supported");
417}
418
419template <>
420void depth_convert<int8_t, float>(const Tensor<int8_t> &in, Tensor<float> &out, ConvertPolicy policy, uint32_t shift)
421{
422 const int8_t fixed_point_position = static_cast<int8_t>(in.fixed_point_position());
423 for(int i = 0; i < in.num_elements(); ++i)
424 {
425 out[i] = static_cast<float>(in[i]) * (1.0f / (1 << fixed_point_position));
426 }
427}
428
429template <>
430void depth_convert<float, int8_t>(const Tensor<float> &in, Tensor<int8_t> &out, ConvertPolicy policy, uint32_t shift)
431{
432 const int8_t fixed_point_position = static_cast<int8_t>(in.fixed_point_position());
433 for(int i = 0; i < in.num_elements(); ++i)
434 {
435 float val = in[i] * (1 << fixed_point_position) + 0.5f;
436 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<int8_t>(val) : static_cast<int8_t>(val));
437 }
438}
439
440template <>
441void depth_convert<uint8_t, uint16_t>(const Tensor<uint8_t> &in, Tensor<uint16_t> &out, ConvertPolicy policy, uint32_t shift)
442{
443 for(int i = 0; i < in.num_elements(); ++i)
444 {
445 out[i] = static_cast<uint16_t>(in[i]) << shift;
446 }
447}
448
449template <>
450void depth_convert<uint8_t, int16_t>(const Tensor<uint8_t> &in, Tensor<int16_t> &out, ConvertPolicy policy, uint32_t shift)
451{
452 for(int i = 0; i < in.num_elements(); ++i)
453 {
454 out[i] = static_cast<int16_t>(in[i]) << shift;
455 }
456}
457
458template <>
459void depth_convert<uint8_t, int32_t>(const Tensor<uint8_t> &in, Tensor<int32_t> &out, ConvertPolicy policy, uint32_t shift)
460{
461 for(int i = 0; i < in.num_elements(); ++i)
462 {
463 out[i] = static_cast<int32_t>(in[i]) << shift;
464 }
465}
466
467template <>
468void depth_convert<uint16_t, uint8_t>(const Tensor<uint16_t> &in, Tensor<uint8_t> &out, ConvertPolicy policy, uint32_t shift)
469{
470 for(int i = 0; i < in.num_elements(); ++i)
471 {
472 uint16_t val = in[i] >> shift;
473 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<uint8_t>(val) : static_cast<uint8_t>(val));
474 }
475}
476
477template <>
478void depth_convert<uint16_t, uint32_t>(const Tensor<uint16_t> &in, Tensor<uint32_t> &out, ConvertPolicy policy, uint32_t shift)
479{
480 for(int i = 0; i < in.num_elements(); ++i)
481 {
482 out[i] = static_cast<uint32_t>(in[i]) << shift;
483 }
484}
485
486template <>
487void depth_convert<int16_t, uint8_t>(const Tensor<int16_t> &in, Tensor<uint8_t> &out, ConvertPolicy policy, uint32_t shift)
488{
489 for(int i = 0; i < in.num_elements(); ++i)
490 {
491 int16_t val = in[i] >> shift;
492 out[i] = ((policy == ConvertPolicy::SATURATE) ? saturate_cast<uint8_t>(val) : static_cast<uint8_t>(val));
493 }
494}
495template <>
496void depth_convert<int16_t, int32_t>(const Tensor<int16_t> &in, Tensor<int32_t> &out, ConvertPolicy policy, uint32_t shift)
497{
498 for(int i = 0; i < in.num_elements(); ++i)
499 {
500 out[i] = static_cast<int32_t>(in[i]) << shift;
501 }
502}
503
504// Matrix multiplication for floating point type
505template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
506void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
507{
508 const int M = out.shape().y();
509 const int N = out.shape().x();
510 const int K = in1.shape().x();
511
512 for(int r = 0; r < M; ++r)
513 {
514 for(int c = 0; c < N; ++c)
515 {
516 T acc = 0.0f;
517
518 for(int k = 0; k < K; ++k)
519 {
520 const T a0 = in1[r * K + k];
521 const T b0 = in2[k * N + c];
522
523 acc += a0 * b0;
524 }
525
526 // Finalize the result: A * B * alpha + C * beta
527 const T c0 = in3[c + r * N];
528 out[c + r * N] = alpha * acc + beta * c0;
529 }
530 }
531}
532
533// Matrix multiplication for fixed point type
534template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
535void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
536{
537 using namespace fixed_point_arithmetic;
538
539 using promoted_type = typename fixed_point_arithmetic::traits::promote<T>::type;
540
541 const int M = out.shape().y();
542 const int N = out.shape().x();
543 const int K = in1.shape().x();
544 const int8_t fixed_point_position = static_cast<int8_t>(in1.fixed_point_position());
545
546 const fixed_point<T> alpha_q(alpha, fixed_point_position);
547 const fixed_point<T> beta_q(beta, fixed_point_position);
548
549 for(int r = 0; r < M; ++r)
550 {
551 for(int c = 0; c < N; ++c)
552 {
553 fixed_point<promoted_type> acc_q(0, fixed_point_position);
554
555 for(int k = 0; k < K; ++k)
556 {
557 const fixed_point<promoted_type> a0_q(in1[r * K + k], fixed_point_position, true);
558 const fixed_point<promoted_type> b0_q(in2[k * N + c], fixed_point_position, true);
559 const fixed_point<promoted_type> axb_q = a0_q * b0_q;
560
561 acc_q = axb_q + acc_q;
562 }
563
564 // Finalize the result: A * B * alpha + C * beta
565 const fixed_point<T> c0_q(in3[c + r * N], fixed_point_position, true);
566
567 fixed_point<T> res_q(acc_q);
568 res_q = alpha_q * res_q;
569 res_q = (c0_q * beta_q) + res_q;
570
571 // Store the result
572 out[c + r * N] = res_q.raw();
573 }
574 }
575}
576
577// Pixel-wise multiplication
578template <typename T1, typename T2, typename T3>
579void pixel_wise_multiplication(const Tensor<T1> &in1, const Tensor<T2> &in2, Tensor<T3> &out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
580{
581 if(scale < 0)
582 {
583 ARM_COMPUTE_ERROR("Scale of pixel-wise multiplication must be non-negative");
584 }
585 using intermediate_type = typename common_promoted_signed_type<T1, T2, T3>::intermediate_type;
586 for(int i = 0; i < in1.num_elements(); ++i)
587 {
588 double val = static_cast<intermediate_type>(in1[i]) * static_cast<intermediate_type>(in2[i]) * static_cast<double>(scale);
589 if(std::is_floating_point<T3>::value)
590 {
591 out[i] = val;
592 }
593 else
594 {
595 double rounded_val = 0;
596 switch(rounding_policy)
597 {
598 case(RoundingPolicy::TO_ZERO):
599 rounded_val = cpp11::trunc(val);
600 break;
601 case(RoundingPolicy::TO_NEAREST_UP):
602 rounded_val = cpp11::round_half_up(val);
603 break;
604 case(RoundingPolicy::TO_NEAREST_EVEN):
605 rounded_val = cpp11::round_half_even(val);
606 break;
607 default:
608 ARM_COMPUTE_ERROR("Unsupported rounding policy");
609 }
610 out[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T3>(rounded_val) : static_cast<T3>(rounded_val);
611 }
612 }
613}
614
615// Fixed-point Pixel-wise Multiplication
616template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type>
617void fixed_point_pixel_wise_multiplication(const Tensor<T> &in1, const Tensor<T> &in2, Tensor<T> &out, int scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
618{
619 using namespace fixed_point_arithmetic;
620
621 const int fixed_point_position = in1.fixed_point_position();
622
623 ARM_COMPUTE_ERROR_ON_MSG(in1.data_type() != in2.data_type() || in1.data_type() != out.data_type(),
624 "Tensors must all have the same DataType");
625 ARM_COMPUTE_ERROR_ON_MSG(fixed_point_position != in2.fixed_point_position() || fixed_point_position != out.fixed_point_position(),
626 "Fixed-point position must be the same for both inputs and outputs");
627
628 // Validate fixed_point_position
629 ARM_COMPUTE_ERROR_ON((in1.data_type() == DataType::QS8) && (fixed_point_position == 0 || fixed_point_position > 7));
630 ARM_COMPUTE_ERROR_ON((in1.data_type() == DataType::QS16) && (fixed_point_position == 0 || fixed_point_position > 15));
631
632 fixed_point<T> fp_scale(scale, fixed_point_position);
633 const bool is_sat = convert_policy == ConvertPolicy::SATURATE;
634 const bool do_scaling = scale != 1;
635
636 for(int i = 0; i < in1.num_elements(); ++i)
637 {
638 fixed_point<T> val1(in1[i], fixed_point_position, true);
639 fixed_point<T> val2(in2[i], fixed_point_position, true);
640 fixed_point<T> res = (is_sat) ? val1 * val2 : mul<OverflowPolicy::WRAP>(val1, val2);
641 if(do_scaling)
642 {
643 res = (is_sat) ? res * fp_scale : mul<OverflowPolicy::WRAP>(res, fp_scale);
644 }
645 out[i] = res.raw();
646 }
647}
648
649// Threshold
650template <typename T>
651void threshold(const Tensor<T> &in, Tensor<T> &out, uint8_t threshold, uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper)
652{
653 switch(type)
654 {
655 case ThresholdType::BINARY:
656 for(int i = 0; i < in.num_elements(); ++i)
657 {
658 out[i] = ((in[i] > threshold) ? true_value : false_value);
659 }
660 break;
661 case ThresholdType::RANGE:
662 for(int i = 0; i < in.num_elements(); ++i)
663 {
664 if(in[i] > upper)
665 {
666 out[i] = false_value;
667 }
668 else if(in[i] < threshold)
669 {
670 out[i] = false_value;
671 }
672 else
673 {
674 out[i] = true_value;
675 }
676 }
677 break;
678 default:
679 ARM_COMPUTE_ERROR("Thresholding type not recognised");
680 break;
681 }
682}
683
684// Activation Layer for floating point type
685template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
686void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
687{
688 const T a = static_cast<T>(act_info.a());
689 const T b = static_cast<T>(act_info.b());
690
691 for(int i = 0; i < in.num_elements(); ++i)
692 {
693 T x = in[i];
694 switch(act_info.activation())
695 {
696 case ActivationLayerInfo::ActivationFunction::ABS:
697 out[i] = std::abs(x);
698 break;
699 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
700 out[i] = std::min<T>(a, std::max<T>(0, x));
701 break;
702 case ActivationLayerInfo::ActivationFunction::LINEAR:
703 out[i] = a * x + b;
704 break;
705 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
706 out[i] = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-x));
707 break;
708 case ActivationLayerInfo::ActivationFunction::RELU:
709 out[i] = std::max<T>(0, x);
710 break;
711 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
712 out[i] = std::log(static_cast<T>(1) + std::exp(x));
713 break;
714 case ActivationLayerInfo::ActivationFunction::SQRT:
715 out[i] = std::sqrt(x);
716 break;
717 case ActivationLayerInfo::ActivationFunction::SQUARE:
718 out[i] = x * x;
719 break;
720 case ActivationLayerInfo::ActivationFunction::TANH:
721 out[i] = a * std::tanh(b * x);
722 break;
723 default:
724 ARM_COMPUTE_ERROR("Activation function not recognised");
725 break;
726 }
727 }
728}
729
730// Activation Layer for fixed point type
731template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
732void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
733{
734 using namespace fixed_point_arithmetic;
735 int fixed_point_position = in.fixed_point_position();
736 ActivationLayerInfo::ActivationFunction act_func = act_info.activation();
737 const fixed_point<T> a(act_info.a(), fixed_point_position);
738 const fixed_point<T> b(act_info.b(), fixed_point_position);
739 const fixed_point<T> const_0(0, fixed_point_position);
740 const fixed_point<T> const_1(1, fixed_point_position);
741
742 for(int i = 0; i < in.num_elements(); ++i)
743 {
744 fixed_point<T> x(in[i], fixed_point_position, true);
745 switch(act_func)
746 {
747 case ActivationLayerInfo::ActivationFunction::ABS:
748 out[i] = abs(x).raw();
749 break;
750 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
751 out[i] = min(a, max(const_0, x)).raw();
752 break;
753 case ActivationLayerInfo::ActivationFunction::LINEAR:
754 out[i] = add(b, mul(a, x)).raw();
755 break;
756 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
757 out[i] = (const_1 / (const_1 + exp(-x))).raw();
758 break;
759 case ActivationLayerInfo::ActivationFunction::RELU:
760 out[i] = max(const_0, x).raw();
761 break;
762 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
763 out[i] = log(const_1 + exp(x)).raw();
764 break;
765 case ActivationLayerInfo::ActivationFunction::SQRT:
766 out[i] = (const_1 / inv_sqrt(x)).raw();
767 break;
768 case ActivationLayerInfo::ActivationFunction::SQUARE:
769 out[i] = mul(x, x).raw();
770 break;
771 case ActivationLayerInfo::ActivationFunction::TANH:
772 out[i] = tanh(x).raw();
773 break;
774 default:
775 ARM_COMPUTE_ERROR("Activation function not recognised");
776 break;
777 }
778 }
779}
780
781// Batch Normalization Layer for fixed point type
782template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
783void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
784{
785 const int cols = static_cast<int>(in.shape()[0]);
786 const int rows = static_cast<int>(in.shape()[1]);
787 const int depth = static_cast<int>(in.shape()[2]);
788 int upper_dims = in.shape().total_size() / (cols * rows * depth);
789
790 for(int r = 0; r < upper_dims; ++r)
791 {
792 for(int i = 0; i < depth; ++i)
793 {
794 for(int k = 0; k < rows; ++k)
795 {
796 for(int l = 0; l < cols; ++l)
797 {
798 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
799 fixed_point_arithmetic::fixed_point<T> in_qs8(in[pos], fixed_point_position, true);
800 fixed_point_arithmetic::fixed_point<T> var_qs8(var[i], fixed_point_position, true);
801 fixed_point_arithmetic::fixed_point<T> mean_qs8(mean[i], fixed_point_position, true);
802 fixed_point_arithmetic::fixed_point<T> beta_qs8(beta[i], fixed_point_position, true);
803 fixed_point_arithmetic::fixed_point<T> gamma_qs8(gamma[i], fixed_point_position, true);
804 fixed_point_arithmetic::fixed_point<T> epsilon_qs8(epsilon, fixed_point_position);
805
806 auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs8 + epsilon_qs8);
807 auto numerator = in_qs8 - mean_qs8;
808 auto x_bar = numerator * denominator;
809 x_bar = beta_qs8 + x_bar * gamma_qs8;
810 out[pos] = x_bar.raw();
811 }
812 }
813 }
814 }
815}
816
817// Batch Normalization Layer for floating point type
818template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
819void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
820{
821 const int cols = static_cast<int>(in.shape()[0]);
822 const int rows = static_cast<int>(in.shape()[1]);
823 const int depth = static_cast<int>(in.shape()[2]);
824 int upper_dims = in.shape().total_size() / (cols * rows * depth);
825
826 for(int r = 0; r < upper_dims; ++r)
827 {
828 for(int i = 0; i < depth; ++i)
829 {
830 for(int k = 0; k < rows; ++k)
831 {
832 for(int l = 0; l < cols; ++l)
833 {
834 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
835 const float denominator = sqrt(var[i] + epsilon);
836 const float numerator = in[pos] - mean[i];
837 const float x_bar = numerator / denominator;
838 out[pos] = beta[i] + x_bar * gamma[i];
839 }
840 }
841 }
842 }
843}
844
845// Convolution layer
846template <typename T>
847void convolution_layer(const Tensor<T> &in, const Tensor<T> &weights, const Tensor<T> &bias, Tensor<T> &out, const PadStrideInfo &conv_info)
848{
849 const int width_in = in.shape().x();
850 const int height_in = in.shape().y();
851 const int depth_in = in.shape().z();
852 const int width_out = out.shape().x();
853 const int height_out = out.shape().y();
854 const int depth_out = out.shape().z();
855 const int width_weights = weights.shape().x();
856 const int height_weights = weights.shape().y();
857 const int depth_weights = weights.shape().z();
858 const int pad_xi = std::min(static_cast<int>(conv_info.pad().first), width_weights / 2);
859 const int pad_yi = std::min(static_cast<int>(conv_info.pad().second), height_weights / 2);
860 const int start_xi = width_weights / 2 - pad_xi;
861 const int start_yi = height_weights / 2 - pad_yi;
862 const int end_xi = width_in - start_xi;
863 const int end_yi = height_in - start_yi;
864 const int stride_xi = conv_info.stride().first;
865 const int stride_yi = conv_info.stride().second;
866 const int num_batches = in.shape().total_size() / (width_in * height_in * depth_in);
867
868 for(int r = 0; r < num_batches; ++r)
869 {
870 for(int yi = start_yi; yi < end_yi; yi += stride_yi)
871 {
872 for(int xi = start_xi; xi < end_xi; xi += stride_xi)
873 {
874 for(int ofm = 0; ofm < depth_out; ++ofm)
875 {
876 // Compute input and output offsets
877 const int offset_in = r * width_in * height_in * depth_in;
878 const int xo = (xi - start_xi) / stride_xi;
879 const int yo = (yi - start_yi) / stride_yi;
880 const int offset_out = xo + yo * width_out + ofm * width_out * height_out + r * width_out * height_out * depth_out;
881
882 // Compute 3D convolution
883 convolution3d(in.data() + offset_in,
884 weights.data() + ofm * width_weights * height_weights * depth_weights,
885 bias.data() + ofm,
886 out.data() + offset_out,
887 xi, yi,
888 width_in, height_in, depth_in,
889 width_weights, height_weights,
890 static_cast<int8_t>(in.fixed_point_position()));
891 }
892 }
893 }
894 }
895}
896
897// Fully connected layer
898template <typename T>
899void fully_connected_layer(const Tensor<T> &in, const Tensor<T> &weights, const Tensor<T> &bias, Tensor<T> &out)
900{
901 ARM_COMPUTE_ERROR_ON(weights.shape().x() != out.shape().x());
902 ARM_COMPUTE_ERROR_ON(weights.shape().y() != in.shape().x() * in.shape().y() * in.shape().z());
903 const int cols_weights = weights.shape().x();
904 const int rows_weights = weights.shape().y();
905 const int num_batches = in.shape().total_size() / rows_weights;
906
907 for(int k = 0; k < num_batches; ++k)
908 {
909 vector_matrix_multiply<T>(in.data() + k * rows_weights,
910 weights.data(),
911 bias.data(),
912 out.data() + k * cols_weights,
913 cols_weights,
914 rows_weights,
915 in.fixed_point_position());
916 }
917}
918
919// Normalization Layer for floating point type
920template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
921void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
922{
923 const uint32_t norm_size = norm_info.norm_size();
924 NormType type = norm_info.type();
925 float beta = norm_info.beta();
926 uint32_t kappa = norm_info.kappa();
927
928 const int cols = static_cast<int>(in.shape()[0]);
929 const int rows = static_cast<int>(in.shape()[1]);
930 const int depth = static_cast<int>(in.shape()[2]);
931 int upper_dims = in.shape().total_size() / (cols * rows);
932
933 float coeff = norm_info.scale_coeff();
934 int radius_cols = norm_size / 2;
935 // IN_MAP_1D and CROSS_MAP normalize over a single axis only
936 int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
937
938 if(type == NormType::CROSS_MAP)
939 {
940 // Remove also depth from upper dimensions since it is the axes we want
941 // to use for normalization
942 upper_dims /= depth;
943 for(int r = 0; r < upper_dims; ++r)
944 {
945 for(int i = 0; i < rows; ++i)
946 {
947 for(int k = 0; k < cols; ++k)
948 {
949 for(int l = 0; l < depth; ++l)
950 {
951 float accumulated_scale = 0.f;
952 for(int j = -radius_cols; j <= radius_cols; ++j)
953 {
954 const int z = l + j;
955 if(z >= 0 && z < depth)
956 {
957 const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth];
958 accumulated_scale += value * value;
959 }
960 }
961 out[k + i * cols + l * rows * cols + r * cols * rows * depth] = kappa + accumulated_scale * coeff;
962 }
963 }
964 }
965 }
966 }
967 else
968 {
969 for(int r = 0; r < upper_dims; ++r)
970 {
971 for(int i = 0; i < rows; ++i)
972 {
973 for(int k = 0; k < cols; ++k)
974 {
975 float accumulated_scale = 0.f;
976 for(int j = -radius_rows; j <= radius_rows; ++j)
977 {
978 const int y = i + j;
979 for(int l = -radius_cols; l <= radius_cols; ++l)
980 {
981 const int x = k + l;
982 if((x >= 0 && y >= 0) && (x < cols && y < rows))
983 {
984 const T value = in[x + y * cols + r * cols * rows];
985 accumulated_scale += value * value;
986 }
987 }
988 }
989 out[k + i * cols + r * cols * rows] = kappa + accumulated_scale * coeff;
990 }
991 }
992 }
993 }
994
995 if(beta == 1.f)
996 {
997 for(int i = 0; i < out.num_elements(); ++i)
998 {
999 out[i] = in[i] / out[i];
1000 }
1001 }
1002 else if(beta == 0.5f)
1003 {
1004 for(int i = 0; i < out.num_elements(); ++i)
1005 {
1006 out[i] = in[i] / std::sqrt(out[i]);
1007 }
1008 }
1009 else
1010 {
1011 for(int i = 0; i < out.num_elements(); ++i)
1012 {
1013 out[i] = in[i] * std::exp(std::log(out[i]) * -beta);
1014 }
1015 }
1016}
1017// Normalization Layer for fixed-point types
1018template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
1019void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
1020{
1021 using namespace fixed_point_arithmetic;
1022
1023 const int fixed_point_position = in.fixed_point_position();
1024
1025 const uint32_t norm_size = norm_info.norm_size();
1026 NormType type = norm_info.type();
1027 fixed_point<T> beta(norm_info.beta(), fixed_point_position);
1028 fixed_point<T> kappa(norm_info.kappa(), fixed_point_position);
1029
1030 const int cols = static_cast<int>(in.shape()[0]);
1031 const int rows = static_cast<int>(in.shape()[1]);
1032 const int depth = static_cast<int>(in.shape()[2]);
1033 int upper_dims = in.shape().total_size() / (cols * rows);
1034
1035 fixed_point<T> coeff(norm_info.scale_coeff(), fixed_point_position);
1036 int radius_cols = norm_size / 2;
1037 // IN_MAP_1D and CROSS_MAP normalize over a single axis only
1038 int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
1039
1040 if(type == NormType::CROSS_MAP)
1041 {
1042 // Remove also depth from upper dimensions since it is the axes we want
1043 // to use for normalization
1044 upper_dims /= depth;
1045 for(int r = 0; r < upper_dims; ++r)
1046 {
1047 for(int i = 0; i < rows; ++i)
1048 {
1049 for(int k = 0; k < cols; ++k)
1050 {
1051 for(int l = 0; l < depth; ++l)
1052 {
1053 fixed_point<T> accumulated_scale(0.f, fixed_point_position);
1054 for(int j = -radius_cols; j <= radius_cols; ++j)
1055 {
1056 const int z = l + j;
1057 if(z >= 0 && z < depth)
1058 {
1059 const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth];
1060 const fixed_point<T> fp_value(value, fixed_point_position, true);
1061 accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
1062 }
1063 }
1064 accumulated_scale = add(kappa, mul(accumulated_scale, coeff));
1065 out[k + i * cols + l * rows * cols + r * cols * rows * depth] = accumulated_scale.raw();
1066 }
1067 }
1068 }
1069 }
1070 }
1071 else
1072 {
1073 for(int r = 0; r < upper_dims; ++r)
1074 {
1075 for(int i = 0; i < rows; ++i)
1076 {
1077 for(int k = 0; k < cols; ++k)
1078 {
1079 fixed_point<T> accumulated_scale(0.f, fixed_point_position);
1080 for(int j = -radius_rows; j <= radius_rows; ++j)
1081 {
1082 const int y = i + j;
1083 for(int l = -radius_cols; l <= radius_cols; ++l)
1084 {
1085 const int x = k + l;
1086 if((x >= 0 && y >= 0) && (x < cols && y < rows))
1087 {
1088 const T value = in[x + y * cols + r * cols * rows];
1089 const fixed_point<T> fp_value(value, fixed_point_position, true);
1090 accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
1091 }
1092 }
1093 }
1094 accumulated_scale = add(kappa, mul(accumulated_scale, coeff));
1095 out[k + i * cols + r * cols * rows] = accumulated_scale.raw();
1096 }
1097 }
1098 }
1099 }
1100
1101 if(norm_info.beta() == 1.f)
1102 {
1103 for(int i = 0; i < out.num_elements(); ++i)
1104 {
1105 fixed_point<T> res = div(fixed_point<T>(in[i], fixed_point_position, true), fixed_point<T>(out[i], fixed_point_position, true));
1106 out[i] = res.raw();
1107 }
1108 }
1109 else
1110 {
1111 const fixed_point<T> beta(norm_info.beta(), fixed_point_position);
1112 for(int i = 0; i < out.num_elements(); ++i)
1113 {
1114 fixed_point<T> res = pow(fixed_point<T>(out[i], fixed_point_position, true), beta);
1115 res = div(fixed_point<T>(in[i], fixed_point_position, true), res);
1116 out[i] = res.raw();
1117 }
1118 }
1119}
1120
1121// Pooling layer
1122template <typename T>
1123void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info, int fixed_point_position)
1124{
1125 const int pool_size = pool_info.pool_size();
1126 PoolingType type = pool_info.pool_type();
1127 int pool_stride_x = 0;
1128 int pool_stride_y = 0;
1129 int pad_x = 0;
1130 int pad_y = 0;
1131 std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info().stride();
1132 std::tie(pad_x, pad_y) = pool_info.pad_stride_info().pad();
1133
1134 const int cols_in = static_cast<int>(in.shape()[0]);
1135 const int rows_in = static_cast<int>(in.shape()[1]);
1136
1137 const int cols_out = static_cast<int>(out.shape()[0]);
1138 const int rows_out = static_cast<int>(out.shape()[1]);
1139
1140 int upper_dims = in.shape().total_size() / (cols_in * rows_in);
1141
1142 int pooled_height = static_cast<int>(ceil(static_cast<float>(rows_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
1143 int pooled_width = static_cast<int>(ceil(static_cast<float>(cols_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
1144
1145 if((pooled_height - 1) * pool_stride_x >= rows_in + pad_x)
1146 {
1147 --pooled_height;
1148 }
1149 if((pooled_width - 1) * pool_stride_y >= cols_in + pad_y)
1150 {
1151 --pooled_width;
1152 }
1153
1154 if(type == PoolingType::MAX)
1155 {
1156 for(int r = 0; r < upper_dims; ++r)
1157 {
1158 for(int i = 0; i < pooled_height; ++i)
1159 {
1160 for(int k = 0; k < pooled_width; ++k)
1161 {
1162 int hstart = i * pool_stride_x - pad_x;
1163 int wstart = k * pool_stride_y - pad_y;
1164 int hend = std::min(hstart + pool_size, rows_in);
1165 int wend = std::min(wstart + pool_size, cols_in);
1166 hstart = std::max(hstart, 0);
1167 wstart = std::max(wstart, 0);
1168
1169 T max_val = std::numeric_limits<T>::lowest();
1170 for(int y = hstart; y < hend; ++y)
1171 {
1172 for(int x = wstart; x < wend; ++x)
1173 {
1174 T val = in[r * cols_in * rows_in + y * cols_in + x];
1175 if(val > max_val)
1176 {
1177 max_val = val;
1178 }
1179 }
1180 }
1181
1182 out[r * rows_out * cols_out + i * pooled_width + k] = max_val;
1183 }
1184 }
1185 }
1186 }
1187 else // Average pooling
1188 {
1189 for(int r = 0; r < upper_dims; ++r)
1190 {
1191 for(int i = 0; i < pooled_height; ++i)
1192 {
1193 for(int k = 0; k < pooled_width; ++k)
1194 {
1195 T avg_val = 0;
1196
1197 int hstart = i * pool_stride_x - pad_x;
1198 int wstart = k * pool_stride_y - pad_y;
1199 int hend = std::min(hstart + pool_size, cols_in + pad_x);
1200 int wend = std::min(wstart + pool_size, rows_in + pad_y);
1201 int pool = (hend - hstart) * (wend - wstart);
1202 hstart = std::max(hstart, 0);
1203 wstart = std::max(wstart, 0);
1204 hend = std::min(hend, rows_in);
1205 wend = std::min(wend, cols_in);
1206
1207 if(std::is_floating_point<T>::value)
1208 {
1209 for(int y = hstart; y < hend; ++y)
1210 {
1211 for(int x = wstart; x < wend; ++x)
1212 {
1213 avg_val += in[r * cols_in * rows_in + y * cols_in + x];
1214 }
1215 }
1216 out[r * rows_out * cols_out + i * pooled_width + k] = avg_val / pool;
1217 }
1218 else
1219 {
1220 static std::array<qint8_t, 10> scale_values_q8 =
1221 { { 0x0, 0x0, 0x40, 0x2A, 0x20, 0x19, 0x15, 0x12, 0x10, 0xE } };
1222
1223 for(int y = hstart; y < hend; ++y)
1224 {
1225 for(int x = wstart; x < wend; ++x)
1226 {
1227 avg_val = sqadd_qs8(avg_val, in[r * cols_in * rows_in + y * cols_in + x]);
1228 }
1229 }
1230 out[r * rows_out * cols_out + i * pooled_width + k] = sqmul_qs8(avg_val, (scale_values_q8[pool] >> (7 - fixed_point_position)), fixed_point_position);
1231 }
1232 }
1233 }
1234 }
1235 }
1236}
1237
1238// Softmax Layer
1239template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
1240void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
1241{
1242 const int cols = static_cast<int>(in.shape()[0]);
1243 const int upper_dims = in.shape().total_size() / cols;
1244 for(int r = 0; r < upper_dims; ++r)
1245 {
1246 // Find max
1247 T max = std::numeric_limits<T>::lowest();
1248 for(int c = 0; c < cols; ++c)
1249 {
1250 const T x = in[r * cols + c];
1251 if(x > max)
1252 {
1253 max = x;
1254 }
1255 }
1256
1257 // Regularize
1258 T sum = 0;
1259 for(int c = 0; c < cols; ++c)
1260 {
1261 const T res = exp(in[r * cols + c] - max);
1262 out[r * cols + c] = res;
1263 sum += res;
1264 }
1265
1266 // Normalize
1267 const T norm_val = 1 / sum;
1268 for(int c = 0; c < cols; ++c)
1269 {
1270 out[r * cols + c] *= norm_val;
1271 }
1272 }
1273}
1274template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
1275void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
1276{
1277 using namespace fixed_point_arithmetic;
1278 using promoted_T = typename test::traits::promote<T>::type;
1279
1280 const int fixed_point_position = in.fixed_point_position();
1281 const int cols = static_cast<int>(in.shape()[0]);
1282 const int upper_dims = in.shape().total_size() / cols;
1283
1284 for(int r = 0; r < upper_dims; ++r)
1285 {
1286 // Find max
1287 fixed_point<T> max(std::numeric_limits<T>::lowest(), fixed_point_position, true);
1288 for(int c = 0; c < cols; ++c)
1289 {
1290 const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
1291 if(x > max)
1292 {
1293 max = x;
1294 }
1295 }
1296
1297 // Regularize
1298 fixed_point<promoted_T> sum(0, fixed_point_position);
1299 for(int c = 0; c < cols; ++c)
1300 {
1301 const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
1302 fixed_point<T> res = exp(x - max);
1303 out[r * cols + c] = res.raw();
1304 sum = add(sum, static_cast<fixed_point<promoted_T>>(res));
1305 }
1306
1307 // Normalize
1308 fixed_point<T> sat_sum(sum);
1309 for(int c = 0; c < cols; ++c)
1310 {
1311 const fixed_point<T> x(out[r * cols + c], fixed_point_position, true);
1312 out[r * cols + c] = div(x, sat_sum).raw();
1313 }
1314 }
1315}
1316
1317// Fixed point operations
1318template <typename T>
1319void fixed_point_operation(const Tensor<T> &in, Tensor<T> &out, FixedPointOp op)
1320{
1321 int p = in.fixed_point_position();
1322 switch(op)
1323 {
1324 case FixedPointOp::EXP:
1325 for(int i = 0; i < in.num_elements(); ++i)
1326 {
1327 out[i] = fixed_point_arithmetic::exp(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1328 }
1329 break;
1330 case FixedPointOp::LOG:
1331 for(int i = 0; i < in.num_elements(); ++i)
1332 {
1333 out[i] = fixed_point_arithmetic::log(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1334 }
1335 break;
1336 case FixedPointOp::INV_SQRT:
1337 for(int i = 0; i < in.num_elements(); ++i)
1338 {
1339 out[i] = fixed_point_arithmetic::inv_sqrt(fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1340 }
1341 break;
1342 case FixedPointOp::RECIPROCAL:
1343 for(int i = 0; i < in.num_elements(); ++i)
1344 {
1345 out[i] = fixed_point_arithmetic::div(fixed_point_arithmetic::fixed_point<T>(1, p), fixed_point_arithmetic::fixed_point<T>(in[i], p, true)).raw();
1346 }
1347 break;
1348 default:
1349 ARM_COMPUTE_ERROR("Fixed point operation not supported");
1350 break;
1351 }
1352}
1353
1354// Tensor print
1355template <typename T>
1356void print(const Tensor<T> &in, std::ostream &out)
1357{
1358 out << "\n";
1359 for(int i = 0; i < in.num_elements(); ++i)
1360 {
1361 out << in[i] << " ";
1362 }
1363 out << "\n";
1364}
1365} // namespace tensor_operations
1366} // namespace validation
1367} // namespace test
1368} // namespace arm_compute
1369
1370#endif /* __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__ */