blob: 0427baf475c721dda4dbb270317b278c6c1b93cd [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01006#include "RefNormalizationWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007#include "RefWorkloadUtils.hpp"
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01008#include "Decoders.hpp"
9#include "Encoders.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <armnn/Tensor.hpp>
12
Matteo Martincighe011d202019-11-28 11:35:47 +000013#include <armnnUtils/DataLayoutIndexed.hpp>
14
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010015#include <Profiling.hpp>
16
telsoa014fcda012018-03-09 14:13:49 +000017#include <boost/log/trivial.hpp>
18#include <boost/numeric/conversion/cast.hpp>
19
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010020using namespace armnn;
Matteo Martincigh21350152018-11-28 16:22:22 +000021using namespace armnnUtils;
22
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010023namespace
telsoa014fcda012018-03-09 14:13:49 +000024{
25
telsoa01c577f2c2018-08-31 09:22:23 +010026// Helper function to compute "Within" normalization using Krichevsky 2012: Local Brightness Normalization.
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010027void NormalizeWithinUingLbr(Decoder<float>& inputData,
28 Encoder<float>& outputData,
29 const TensorShape& tensorShape,
30 uint32_t norm_size,
31 float alpha,
32 float beta,
33 float kappa)
telsoa014fcda012018-03-09 14:13:49 +000034{
35 const unsigned int batchSize = tensorShape[0];
36 const unsigned int depth = tensorShape[1];
37 const unsigned int rows = tensorShape[2];
38 const unsigned int cols = tensorShape[3];
39
40 int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
41
42 for (unsigned int n = 0; n < batchSize; n++)
43 {
44 for (unsigned int c = 0; c < depth; c++)
45 {
46 for (unsigned int h = 0; h < rows; h++)
47 {
48 for (unsigned int w = 0; w < cols; w++)
49 {
50 float accumulated_scale = 0.0;
51 for (int y = -radius; y <= radius; y++)
52 {
53 for (int x = -radius; x <= radius; x++)
54 {
55 int i = boost::numeric_cast<int>(w) + x;
56 int j = boost::numeric_cast<int>(h) + y;
57
58 if ((i < 0) || (i >= boost::numeric_cast<int>(cols)))
59 {
60 continue;
61 }
62
63 if ((j < 0) || (j >= boost::numeric_cast<int>(rows)))
64 {
65 continue;
66 }
67
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010068 unsigned int inputIndex = n * cols * rows * depth +
69 c * cols * rows +
70 boost::numeric_cast<unsigned int>(j) * cols +
71 boost::numeric_cast<unsigned int>(i);
72 inputData[inputIndex];
73 float inval = inputData.Get();
telsoa014fcda012018-03-09 14:13:49 +000074
75 accumulated_scale += inval*inval;
76 }
77 }
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010078
79 unsigned int index = n * cols * rows * depth +
80 c * cols * rows +
81 h * cols +
82 w;
83 inputData[index];
84 outputData[index];
85 outputData.Set(inputData.Get() / (powf((kappa + (accumulated_scale * alpha)), beta)));
telsoa014fcda012018-03-09 14:13:49 +000086 }
87 }
88 }
89 }
90}
91
telsoa01c577f2c2018-08-31 09:22:23 +010092// Helper function to compute "Across" normalization using Krichevsky 2012: Local Brightness Normalization.
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010093void NormalizeAcrossUingLbr(Decoder<float>& inputData,
94 Encoder<float>& outputData,
telsoa014fcda012018-03-09 14:13:49 +000095 const TensorShape& tensorShape,
96 uint32_t norm_size,
97 float alpha,
98 float beta,
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +010099 float kappa,
100 DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000101{
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100102 DataLayoutIndexed dataLayoutIndexed(dataLayout);
103
telsoa014fcda012018-03-09 14:13:49 +0000104 const unsigned int batchSize = tensorShape[0];
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100105 const unsigned int depth = tensorShape[dataLayoutIndexed.GetChannelsIndex()];
106 const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()];
107 const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000108
109 int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
110
111 for (unsigned int n = 0; n < batchSize; n++)
112 {
113 for (unsigned int c = 0; c < depth; c++)
114 {
115 for (unsigned int h = 0; h < rows; h++)
116 {
117 for (unsigned int w = 0; w < cols; w++)
118 {
119 float accumulated_scale = 0.0;
120 for (int z = -radius; z <= radius; z++)
121 {
122 int k = boost::numeric_cast<int>(c) + z;
123
124 if ((k < 0) || (k >= boost::numeric_cast<int>(depth)))
125 {
126 continue;
127 }
128
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100129 unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape,
130 n,
131 boost::numeric_cast<unsigned int>(k),
132 h,
133 w);
134
135 inputData[inputIndex];
136 float inval = inputData.Get();
telsoa014fcda012018-03-09 14:13:49 +0000137
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100138 accumulated_scale += inval * inval;
telsoa014fcda012018-03-09 14:13:49 +0000139 }
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100140
telsoa014fcda012018-03-09 14:13:49 +0000141 float scale = kappa + (accumulated_scale * alpha);
142 scale = powf(scale, -beta);
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100143
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100144 unsigned index = dataLayoutIndexed.GetIndex(tensorShape, n, c, h, w);
145
146 inputData[index];
147 outputData[index];
148 outputData.Set(scale * inputData.Get());
telsoa014fcda012018-03-09 14:13:49 +0000149 }
150 }
151 }
152 }
153}
154
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100155} // Anonymous namespace
156
157namespace armnn
telsoa014fcda012018-03-09 14:13:49 +0000158{
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100159
160RefNormalizationWorkload::RefNormalizationWorkload(const NormalizationQueueDescriptor& descriptor,
161 const WorkloadInfo& info)
162 : BaseWorkload(descriptor, info)
163{}
164
165void RefNormalizationWorkload::Execute() const
166{
167 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationWorkload_Execute");
telsoa014fcda012018-03-09 14:13:49 +0000168
169 const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
170
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100171 auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
172 auto outputEncoder = MakeEncoder<float>(inputInfo, m_Data.m_Outputs[0]->Map());
telsoa014fcda012018-03-09 14:13:49 +0000173
telsoa014fcda012018-03-09 14:13:49 +0000174 if (NormalizationAlgorithmMethod::LocalBrightness == m_Data.m_Parameters.m_NormMethodType)
175 {
176 if (NormalizationAlgorithmChannel::Within == m_Data.m_Parameters.m_NormChannelType)
177 {
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100178 NormalizeWithinUingLbr(*inputDecoder,
179 *outputEncoder,
telsoa014fcda012018-03-09 14:13:49 +0000180 inputInfo.GetShape(),
181 m_Data.m_Parameters.m_NormSize,
182 m_Data.m_Parameters.m_Alpha,
183 m_Data.m_Parameters.m_Beta,
184 m_Data.m_Parameters.m_K);
185 }
186 else if (NormalizationAlgorithmChannel::Across == m_Data.m_Parameters.m_NormChannelType)
187 {
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100188 NormalizeAcrossUingLbr(*inputDecoder,
189 *outputEncoder,
telsoa014fcda012018-03-09 14:13:49 +0000190 inputInfo.GetShape(),
191 m_Data.m_Parameters.m_NormSize,
192 m_Data.m_Parameters.m_Alpha,
193 m_Data.m_Parameters.m_Beta,
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100194 m_Data.m_Parameters.m_K,
195 m_Data.m_Parameters.m_DataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000196 }
197 else
198 {
199 BOOST_LOG_TRIVIAL(warning) << "Illegal NORMALIZATION mode in normalization_f32";
200 return;
201 }
202 }
203 else
204 {
205 BOOST_LOG_TRIVIAL(warning) << "Lcr method (Jarret 2009: Local Contrast Normalization) not supported yet.";
206 return;
207 }
208}
209
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100210} // namespace armnn