blob: 37e2d55bf10939d682040cf40966d9d1220fed50 [file] [log] [blame]
Sanghoon Lee96883782017-09-15 14:10:48 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "BatchNormalizationLayer.h"
25
26#include "tests/validation/FixedPoint.h"
27#include "tests/validation/Helpers.h"
28
29namespace arm_compute
30{
31namespace test
32{
33namespace validation
34{
35namespace reference
36{
37// Batch Normalization Layer for fixed point type
38template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type *>
39SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
40 int fixed_point_position)
41{
42 SimpleTensor<T> result(src.shape(), src.data_type());
43
44 const auto cols = static_cast<int>(src.shape()[0]);
45 const auto rows = static_cast<int>(src.shape()[1]);
46 const auto depth = static_cast<int>(src.shape()[2]);
47 int upper_dims = src.shape().total_size() / (cols * rows * depth);
48
49 for(int r = 0; r < upper_dims; ++r)
50 {
51 for(int i = 0; i < depth; ++i)
52 {
53 for(int k = 0; k < rows; ++k)
54 {
55 for(int l = 0; l < cols; ++l)
56 {
57 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
58
59 fixed_point_arithmetic::fixed_point<T> src_qs(src[pos], fixed_point_position, true);
60 fixed_point_arithmetic::fixed_point<T> var_qs(var[i], fixed_point_position, true);
61 fixed_point_arithmetic::fixed_point<T> mean_qs(mean[i], fixed_point_position, true);
62 fixed_point_arithmetic::fixed_point<T> beta_qs(beta[i], fixed_point_position, true);
63 fixed_point_arithmetic::fixed_point<T> gamma_qs(gamma[i], fixed_point_position, true);
64 fixed_point_arithmetic::fixed_point<T> epsilon_qs(epsilon, fixed_point_position);
65
66 auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs);
67 auto numerator = src_qs - mean_qs;
68 auto x_bar = numerator * denominator;
69 x_bar = beta_qs + x_bar * gamma_qs;
70 result[pos] = x_bar.raw();
71 }
72 }
73 }
74 }
75
76 return result;
77}
78
79// Batch Normalization Layer for floating point type
80template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *>
81SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
82 int fixed_point_position)
83{
84 ARM_COMPUTE_UNUSED(fixed_point_position);
85
86 SimpleTensor<T> result(src.shape(), src.data_type());
87
88 const auto cols = static_cast<int>(src.shape()[0]);
89 const auto rows = static_cast<int>(src.shape()[1]);
90 const auto depth = static_cast<int>(src.shape()[2]);
91 int upper_dims = src.shape().total_size() / (cols * rows * depth);
92
93 for(int r = 0; r < upper_dims; ++r)
94 {
95 for(int i = 0; i < depth; ++i)
96 {
97 for(int k = 0; k < rows; ++k)
98 {
99 for(int l = 0; l < cols; ++l)
100 {
101 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
102 const float denominator = sqrt(var[i] + epsilon);
103 const float numerator = src[pos] - mean[i];
104 const float x_bar = numerator / denominator;
105 result[pos] = beta[i] + x_bar * gamma[i];
106 }
107 }
108 }
109 }
110 return result;
111}
112template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta,
113 const SimpleTensor<float> &gamma, float epsilon, int fixed_point_position);
114template SimpleTensor<int8_t> batch_normalization_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &mean, const SimpleTensor<int8_t> &var, const SimpleTensor<int8_t> &beta,
115 const SimpleTensor<int8_t> &gamma, float epsilon, int fixed_point_position);
116template SimpleTensor<int16_t> batch_normalization_layer(const SimpleTensor<int16_t> &src, const SimpleTensor<int16_t> &mean, const SimpleTensor<int16_t> &var, const SimpleTensor<int16_t> &beta,
117 const SimpleTensor<int16_t> &gamma, float epsilon, int fixed_point_position);
118template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var,
119 const SimpleTensor<half> &beta,
120 const SimpleTensor<half> &gamma, float epsilon, int fixed_point_position);
121
122} // namespace reference
123} // namespace validation
124} // namespace test
125} // namespace arm_compute