blob: 613868de5757242969b57bd21d232df1cd04ec0a [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
telsoa014fcda012018-03-09 14:13:49 +00002// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01006#include "RefNormalizationWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007
Matthew Benthamf48afc62020-01-15 17:55:08 +00008#include <armnn/Logging.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Tensor.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010#include <armnnUtils/DataLayoutIndexed.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010011#include <armnn/utility/NumericCast.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000012
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010013#include <Profiling.hpp>
14
Matthew Benthamf48afc62020-01-15 17:55:08 +000015#include "RefWorkloadUtils.hpp"
16#include "Decoders.hpp"
17#include "Encoders.hpp"
18
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010019using namespace armnn;
Matteo Martincigh21350152018-11-28 16:22:22 +000020using namespace armnnUtils;
21
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010022namespace
telsoa014fcda012018-03-09 14:13:49 +000023{
24
telsoa01c577f2c2018-08-31 09:22:23 +010025// Helper function to compute "Within" normalization using Krichevsky 2012: Local Brightness Normalization.
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010026void NormalizeWithinUingLbr(Decoder<float>& inputData,
27 Encoder<float>& outputData,
28 const TensorShape& tensorShape,
29 uint32_t norm_size,
30 float alpha,
31 float beta,
32 float kappa)
telsoa014fcda012018-03-09 14:13:49 +000033{
34 const unsigned int batchSize = tensorShape[0];
35 const unsigned int depth = tensorShape[1];
36 const unsigned int rows = tensorShape[2];
37 const unsigned int cols = tensorShape[3];
38
Matthew Sloyan171214c2020-09-09 09:07:37 +010039 int radius = armnn::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
telsoa014fcda012018-03-09 14:13:49 +000040
41 for (unsigned int n = 0; n < batchSize; n++)
42 {
43 for (unsigned int c = 0; c < depth; c++)
44 {
45 for (unsigned int h = 0; h < rows; h++)
46 {
47 for (unsigned int w = 0; w < cols; w++)
48 {
49 float accumulated_scale = 0.0;
50 for (int y = -radius; y <= radius; y++)
51 {
52 for (int x = -radius; x <= radius; x++)
53 {
Matthew Sloyan171214c2020-09-09 09:07:37 +010054 int i = armnn::numeric_cast<int>(w) + x;
55 int j = armnn::numeric_cast<int>(h) + y;
telsoa014fcda012018-03-09 14:13:49 +000056
Matthew Sloyan171214c2020-09-09 09:07:37 +010057 if ((i < 0) || (i >= armnn::numeric_cast<int>(cols)))
telsoa014fcda012018-03-09 14:13:49 +000058 {
59 continue;
60 }
61
Matthew Sloyan171214c2020-09-09 09:07:37 +010062 if ((j < 0) || (j >= armnn::numeric_cast<int>(rows)))
telsoa014fcda012018-03-09 14:13:49 +000063 {
64 continue;
65 }
66
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010067 unsigned int inputIndex = n * cols * rows * depth +
68 c * cols * rows +
Matthew Sloyan171214c2020-09-09 09:07:37 +010069 armnn::numeric_cast<unsigned int>(j) * cols +
70 armnn::numeric_cast<unsigned int>(i);
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010071 inputData[inputIndex];
72 float inval = inputData.Get();
telsoa014fcda012018-03-09 14:13:49 +000073
74 accumulated_scale += inval*inval;
75 }
76 }
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010077
78 unsigned int index = n * cols * rows * depth +
79 c * cols * rows +
80 h * cols +
81 w;
82 inputData[index];
83 outputData[index];
84 outputData.Set(inputData.Get() / (powf((kappa + (accumulated_scale * alpha)), beta)));
telsoa014fcda012018-03-09 14:13:49 +000085 }
86 }
87 }
88 }
89}
90
telsoa01c577f2c2018-08-31 09:22:23 +010091// Helper function to compute "Across" normalization using Krichevsky 2012: Local Brightness Normalization.
Matteo Martincigh2fc70c52019-06-05 14:12:48 +010092void NormalizeAcrossUingLbr(Decoder<float>& inputData,
93 Encoder<float>& outputData,
telsoa014fcda012018-03-09 14:13:49 +000094 const TensorShape& tensorShape,
95 uint32_t norm_size,
96 float alpha,
97 float beta,
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +010098 float kappa,
99 DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000100{
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100101 DataLayoutIndexed dataLayoutIndexed(dataLayout);
102
telsoa014fcda012018-03-09 14:13:49 +0000103 const unsigned int batchSize = tensorShape[0];
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100104 const unsigned int depth = tensorShape[dataLayoutIndexed.GetChannelsIndex()];
105 const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()];
106 const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000107
Matthew Sloyan171214c2020-09-09 09:07:37 +0100108 int radius = armnn::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
telsoa014fcda012018-03-09 14:13:49 +0000109
110 for (unsigned int n = 0; n < batchSize; n++)
111 {
112 for (unsigned int c = 0; c < depth; c++)
113 {
114 for (unsigned int h = 0; h < rows; h++)
115 {
116 for (unsigned int w = 0; w < cols; w++)
117 {
118 float accumulated_scale = 0.0;
119 for (int z = -radius; z <= radius; z++)
120 {
Matthew Sloyan171214c2020-09-09 09:07:37 +0100121 int k = armnn::numeric_cast<int>(c) + z;
telsoa014fcda012018-03-09 14:13:49 +0000122
Matthew Sloyan171214c2020-09-09 09:07:37 +0100123 if ((k < 0) || (k >= armnn::numeric_cast<int>(depth)))
telsoa014fcda012018-03-09 14:13:49 +0000124 {
125 continue;
126 }
127
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100128 unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape,
129 n,
Matthew Sloyan171214c2020-09-09 09:07:37 +0100130 armnn::numeric_cast<unsigned int>(k),
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100131 h,
132 w);
133
134 inputData[inputIndex];
135 float inval = inputData.Get();
telsoa014fcda012018-03-09 14:13:49 +0000136
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100137 accumulated_scale += inval * inval;
telsoa014fcda012018-03-09 14:13:49 +0000138 }
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100139
telsoa014fcda012018-03-09 14:13:49 +0000140 float scale = kappa + (accumulated_scale * alpha);
141 scale = powf(scale, -beta);
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100142
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100143 unsigned index = dataLayoutIndexed.GetIndex(tensorShape, n, c, h, w);
144
145 inputData[index];
146 outputData[index];
147 outputData.Set(scale * inputData.Get());
telsoa014fcda012018-03-09 14:13:49 +0000148 }
149 }
150 }
151 }
152}
153
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100154} // Anonymous namespace
155
156namespace armnn
telsoa014fcda012018-03-09 14:13:49 +0000157{
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100158
159RefNormalizationWorkload::RefNormalizationWorkload(const NormalizationQueueDescriptor& descriptor,
160 const WorkloadInfo& info)
Finn Williams73c547d2022-02-15 20:47:34 +0000161 : RefBaseWorkload(descriptor, info)
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100162{}
163
164void RefNormalizationWorkload::Execute() const
165{
Finn Williamsb8181f72021-04-07 10:23:21 +0100166 Execute(m_Data.m_Inputs, m_Data.m_Outputs);
167}
168
169void RefNormalizationWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
170{
171 Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
172}
173
174void RefNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
175{
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100176 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationWorkload_Execute");
telsoa014fcda012018-03-09 14:13:49 +0000177
Finn Williamsb8181f72021-04-07 10:23:21 +0100178 const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000179
Finn Williamsb8181f72021-04-07 10:23:21 +0100180 auto inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
181 auto outputEncoder = MakeEncoder<float>(inputInfo, outputs[0]->Map());
telsoa014fcda012018-03-09 14:13:49 +0000182
telsoa014fcda012018-03-09 14:13:49 +0000183 if (NormalizationAlgorithmMethod::LocalBrightness == m_Data.m_Parameters.m_NormMethodType)
184 {
185 if (NormalizationAlgorithmChannel::Within == m_Data.m_Parameters.m_NormChannelType)
186 {
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100187 NormalizeWithinUingLbr(*inputDecoder,
188 *outputEncoder,
telsoa014fcda012018-03-09 14:13:49 +0000189 inputInfo.GetShape(),
190 m_Data.m_Parameters.m_NormSize,
191 m_Data.m_Parameters.m_Alpha,
192 m_Data.m_Parameters.m_Beta,
193 m_Data.m_Parameters.m_K);
194 }
195 else if (NormalizationAlgorithmChannel::Across == m_Data.m_Parameters.m_NormChannelType)
196 {
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100197 NormalizeAcrossUingLbr(*inputDecoder,
198 *outputEncoder,
telsoa014fcda012018-03-09 14:13:49 +0000199 inputInfo.GetShape(),
200 m_Data.m_Parameters.m_NormSize,
201 m_Data.m_Parameters.m_Alpha,
202 m_Data.m_Parameters.m_Beta,
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100203 m_Data.m_Parameters.m_K,
204 m_Data.m_Parameters.m_DataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000205 }
206 else
207 {
Derek Lamberti08446972019-11-26 16:38:31 +0000208 ARMNN_LOG(warning) << "Illegal NORMALIZATION mode in normalization_f32";
telsoa014fcda012018-03-09 14:13:49 +0000209 return;
210 }
211 }
212 else
213 {
Derek Lamberti08446972019-11-26 16:38:31 +0000214 ARMNN_LOG(warning) << "Lcr method (Jarret 2009: Local Contrast Normalization) not supported yet.";
telsoa014fcda012018-03-09 14:13:49 +0000215 return;
216 }
217}
218
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100219} // namespace armnn