blob: 5c24416624e9d2c866afd823599e32285d8cfde5 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "RefNormalizationFloat32Workload.hpp"
7
8#include "RefWorkloadUtils.hpp"
9
10#include "Profiling.hpp"
11
12#include <armnn/Tensor.hpp>
13
14#include <boost/log/trivial.hpp>
15#include <boost/numeric/conversion/cast.hpp>
16
17namespace armnn
18{
19
telsoa01c577f2c2018-08-31 09:22:23 +010020// Helper function to compute "Within" normalization using Krichevsky 2012: Local Brightness Normalization.
telsoa014fcda012018-03-09 14:13:49 +000021static void NormalizeWithinUingLbr(const float* inputData,
22 float* outputData,
23 const TensorShape& tensorShape,
24 uint32_t norm_size,
25 float alpha,
26 float beta,
27 float kappa)
28{
29 const unsigned int batchSize = tensorShape[0];
30 const unsigned int depth = tensorShape[1];
31 const unsigned int rows = tensorShape[2];
32 const unsigned int cols = tensorShape[3];
33
34 int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
35
36 for (unsigned int n = 0; n < batchSize; n++)
37 {
38 for (unsigned int c = 0; c < depth; c++)
39 {
40 for (unsigned int h = 0; h < rows; h++)
41 {
42 for (unsigned int w = 0; w < cols; w++)
43 {
44 float accumulated_scale = 0.0;
45 for (int y = -radius; y <= radius; y++)
46 {
47 for (int x = -radius; x <= radius; x++)
48 {
49 int i = boost::numeric_cast<int>(w) + x;
50 int j = boost::numeric_cast<int>(h) + y;
51
52 if ((i < 0) || (i >= boost::numeric_cast<int>(cols)))
53 {
54 continue;
55 }
56
57 if ((j < 0) || (j >= boost::numeric_cast<int>(rows)))
58 {
59 continue;
60 }
61
62 float inval = inputData[n * cols * rows * depth +
63 c * cols * rows +
64 boost::numeric_cast<unsigned int>(j) * cols +
65 boost::numeric_cast<unsigned int>(i)];
66
67 accumulated_scale += inval*inval;
68 }
69 }
70 outputData[n * cols * rows * depth +
71 c * cols * rows +
72 h * cols +
73 w] = inputData[n * cols * rows * depth +
74 c * cols * rows +
75 h * cols +
76 w] / (powf((kappa + (accumulated_scale * alpha)), beta));
77 }
78 }
79 }
80 }
81}
82
telsoa01c577f2c2018-08-31 09:22:23 +010083// Helper function to compute "Across" normalization using Krichevsky 2012: Local Brightness Normalization.
telsoa014fcda012018-03-09 14:13:49 +000084void NormalizeAcrossUingLbr(const float* inputData,
85 float* outputData,
86 const TensorShape& tensorShape,
87 uint32_t norm_size,
88 float alpha,
89 float beta,
90 float kappa)
91{
92 const unsigned int batchSize = tensorShape[0];
93 const unsigned int depth = tensorShape[1];
94 const unsigned int rows = tensorShape[2];
95 const unsigned int cols = tensorShape[3];
96
97 int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
98
99 for (unsigned int n = 0; n < batchSize; n++)
100 {
101 for (unsigned int c = 0; c < depth; c++)
102 {
103 for (unsigned int h = 0; h < rows; h++)
104 {
105 for (unsigned int w = 0; w < cols; w++)
106 {
107 float accumulated_scale = 0.0;
108 for (int z = -radius; z <= radius; z++)
109 {
110 int k = boost::numeric_cast<int>(c) + z;
111
112 if ((k < 0) || (k >= boost::numeric_cast<int>(depth)))
113 {
114 continue;
115 }
116
117 float inval = inputData[n * cols * rows * depth +
118 boost::numeric_cast<unsigned int>(k) * cols * rows +
119 h * cols +
120 w];
121
122 accumulated_scale += inval*inval;
123 }
124 float scale = kappa + (accumulated_scale * alpha);
125 scale = powf(scale, -beta);
126 outputData[n * cols * rows * depth +
127 c * cols * rows +
128 h * cols +
129 w] = scale *
130 inputData[n * cols * rows * depth +
131 c * cols * rows +
132 h * cols +
133 w];
134 }
135 }
136 }
137 }
138}
139
140void RefNormalizationFloat32Workload::Execute() const
141{
142 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationFloat32Workload_Execute");
143
144 const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
145
146 float* outputData = GetOutputTensorDataFloat(0, m_Data);
147 const float* inputData = GetInputTensorDataFloat(0, m_Data);
148
149
150 if (NormalizationAlgorithmMethod::LocalBrightness == m_Data.m_Parameters.m_NormMethodType)
151 {
152 if (NormalizationAlgorithmChannel::Within == m_Data.m_Parameters.m_NormChannelType)
153 {
154 NormalizeWithinUingLbr(inputData,
155 outputData,
156 inputInfo.GetShape(),
157 m_Data.m_Parameters.m_NormSize,
158 m_Data.m_Parameters.m_Alpha,
159 m_Data.m_Parameters.m_Beta,
160 m_Data.m_Parameters.m_K);
161 }
162 else if (NormalizationAlgorithmChannel::Across == m_Data.m_Parameters.m_NormChannelType)
163 {
164 NormalizeAcrossUingLbr(inputData,
165 outputData,
166 inputInfo.GetShape(),
167 m_Data.m_Parameters.m_NormSize,
168 m_Data.m_Parameters.m_Alpha,
169 m_Data.m_Parameters.m_Beta,
170 m_Data.m_Parameters.m_K);
171 }
172 else
173 {
174 BOOST_LOG_TRIVIAL(warning) << "Illegal NORMALIZATION mode in normalization_f32";
175 return;
176 }
177 }
178 else
179 {
180 BOOST_LOG_TRIVIAL(warning) << "Lcr method (Jarret 2009: Local Contrast Normalization) not supported yet.";
181 return;
182 }
183}
184
185} //namespace armnn