blob: 16893eb3154b59d0e842f4199f2289cfc9c2e350 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
David Beckac42efd2018-09-26 17:41:13 +01006#include <armnn/Exceptions.hpp>
7#include <armnn/LayerSupport.hpp>
narpra0155a97bc2018-10-02 14:35:53 +01008#include "armnn/Types.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <backendsCommon/CpuTensorHandle.hpp>
11#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
13LayerTestResult<float,4> SimpleNormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
14 armnn::NormalizationAlgorithmChannel normChannel,
15 armnn::NormalizationAlgorithmMethod normMethod)
16{
17 const unsigned int inputHeight = 2;
18 const unsigned int inputWidth = 2;
19 const unsigned int inputChannels = 1;
20 const unsigned int inputNum = 2;
21
22 unsigned int outputHeight = inputHeight;
23 unsigned int outputWidth = inputWidth;
24 unsigned int outputChannels = inputChannels;
25 unsigned int outputNum = inputNum;
26
27 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
28 unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
29
30 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
31 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
32
33 LayerTestResult<float,4> ret(outputTensorInfo);
34
35 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
36 // Batch #0
37 1.0f, 2.0f,
38 3.0f, 4.0f,
39 // Batch #1
40 5.0f, 6.0f,
41 7.0f, 8.0f
42 }));
43
44 float alpha = 1.f;
45 float beta = 1.f;
46 float kappa = 1.f;
47 uint32_t normSize = 3;
48
49 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
50 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
51
52 armnn::NormalizationQueueDescriptor data;
53 armnn::WorkloadInfo info;
54 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
55 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
56 data.m_Parameters.m_NormChannelType = normChannel;
57 data.m_Parameters.m_NormMethodType = normMethod;
58 data.m_Parameters.m_NormSize = normSize;
59 data.m_Parameters.m_Alpha = alpha;
60 data.m_Parameters.m_Beta = beta;
61 data.m_Parameters.m_K = kappa;
narpra0155a97bc2018-10-02 14:35:53 +010062 data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
telsoa014fcda012018-03-09 14:13:49 +000063
64 armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
65 armnn::NormalizationQueueDescriptor refData = data;
66 armnn::WorkloadInfo refInfo = info;
67 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
68
69 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
70
71 inputHandle->Allocate();
72 outputHandle->Allocate();
73
74 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
75
Aron Virginas-Tar60578952018-10-31 11:04:01 +000076 workloadFactory.Acquire();
telsoa014fcda012018-03-09 14:13:49 +000077 workload->Execute();
Aron Virginas-Tar60578952018-10-31 11:04:01 +000078 workloadFactory.Release();
telsoa014fcda012018-03-09 14:13:49 +000079
80 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
81
82 switch (normMethod)
83 {
84 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
85 {
86 switch (normChannel)
87 {
88 case armnn::NormalizationAlgorithmChannel::Within:
89 {
90 // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
91 // Therefore, all output values should equal the inputs, but divided by:
92 // pow((kappa + (accumulatedScale * alpha)), beta)
telsoa01c577f2c2018-08-31 09:22:23 +010093 // ...where accumulatedScale is the sum of every element squared.
telsoa014fcda012018-03-09 14:13:49 +000094 float divisor[inputNum];
95 for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
96 {
97 float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
98 input[i][0][0][1]*input[i][0][0][1] +
99 input[i][0][1][0]*input[i][0][1][0] +
100 input[i][0][1][1]*input[i][0][1][1];
101 divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
102 }
103 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
104 std::vector<float>({input[0][0][0][0]/divisor[0],
105 input[0][0][0][1]/divisor[0],
106 input[0][0][1][0]/divisor[0],
107 input[0][0][1][1]/divisor[0],
108 input[1][0][0][0]/divisor[1],
109 input[1][0][0][1]/divisor[1],
110 input[1][0][1][0]/divisor[1],
111 input[1][0][1][1]/divisor[1]}));
112 break;
113 }
114 case armnn::NormalizationAlgorithmChannel::Across:
115 {
116 // When normalising across channels, all output values should equal the inputs, but multiplied by:
117 // pow((kappa + (accumulatedScale * alpha)), -beta)
118 // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
119 // ...where adjacent channels means within half the normSize for the channel
120 // The test data has only one channel, so this is simplified below.
121 std::vector<float> outputVector;
122 for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
123 {
124 for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
125 {
126 for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
127 {
128 float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
129 float scale = powf((kappa + accumulatedScale * alpha), -beta);
130 outputVector.push_back(input[n][0][h][w] * scale);
131 }
132 }
133 }
134 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
135 break;
136 }
137 default:
138 {
139 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
140 "only Across and Within are supported");
141 }
142 }
143 break;
144 }
telsoa01c577f2c2018-08-31 09:22:23 +0100145 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
telsoa014fcda012018-03-09 14:13:49 +0000146 default:
147 {
148 throw armnn::UnimplementedException("Unsupported normalisation method type, "
149 "only LocalBrightness is supported");
150 }
151 }
152
153 return ret;
154}
155
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100156LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(armnn::IWorkloadFactory& workloadFactory,
157 armnn::NormalizationAlgorithmChannel normChannel,
158 armnn::NormalizationAlgorithmMethod normMethod)
narpra0155a97bc2018-10-02 14:35:53 +0100159{
160 const unsigned int inputHeight = 2;
161 const unsigned int inputWidth = 2;
162 const unsigned int inputChannels = 1;
163 const unsigned int inputNum = 2;
164
165 unsigned int outputHeight = inputHeight;
166 unsigned int outputWidth = inputWidth;
167 unsigned int outputChannels = inputChannels;
168 unsigned int outputNum = inputNum;
169
170 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
171 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
172
173 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
174 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
175
176 LayerTestResult<float,4> ret(outputTensorInfo);
177
178 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
179 // Batch #0
180 1.0f, 2.0f,
181 3.0f, 4.0f,
182 // Batch #1
183 5.0f, 6.0f,
184 7.0f, 8.0f
185 }));
186
187 float alpha = 1.f;
188 float beta = 1.f;
189 float kappa = 1.f;
190 uint32_t normSize = 3;
191
192 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
193 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
194
195 armnn::NormalizationQueueDescriptor data;
196 armnn::WorkloadInfo info;
197 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
198 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
199 data.m_Parameters.m_NormChannelType = normChannel;
200 data.m_Parameters.m_NormMethodType = normMethod;
201 data.m_Parameters.m_NormSize = normSize;
202 data.m_Parameters.m_Alpha = alpha;
203 data.m_Parameters.m_Beta = beta;
204 data.m_Parameters.m_K = kappa;
205 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
206
207 armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
208 armnn::NormalizationQueueDescriptor refData = data;
209 armnn::WorkloadInfo refInfo = info;
210 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
211
212 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
213
214 inputHandle->Allocate();
215 outputHandle->Allocate();
216
217 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
218
Aron Virginas-Tar60578952018-10-31 11:04:01 +0000219 workloadFactory.Acquire();
narpra0155a97bc2018-10-02 14:35:53 +0100220 workload->Execute();
Aron Virginas-Tar60578952018-10-31 11:04:01 +0000221 workloadFactory.Release();
narpra0155a97bc2018-10-02 14:35:53 +0100222
223 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
224
225 switch (normMethod)
226 {
227 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
228 {
229 switch (normChannel)
230 {
231 case armnn::NormalizationAlgorithmChannel::Across:
232 {
233 std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
234 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
235 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
236 break;
237 }
238 default:
239 {
240 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
241 "Only Cross-map is supported for NHWC layout");
242 }
243 }
244 break;
245 }
246 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
247 default:
248 {
249 throw armnn::UnimplementedException("Unsupported normalisation method type, "
250 "only LocalBrightness is supported");
251 }
252 }
253
254 return ret;
255}
256
telsoa014fcda012018-03-09 14:13:49 +0000257LayerTestResult<float,4> CompareNormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
258 armnn::IWorkloadFactory& refWorkloadFactory,
259 armnn::NormalizationAlgorithmChannel normChannel,
260 armnn::NormalizationAlgorithmMethod normMethod)
261{
262 constexpr unsigned int inputNum = 5;
263 constexpr unsigned int inputChannels = 3;
264 constexpr unsigned int inputHeight = 32;
265 constexpr unsigned int inputWidth = 24;
266
267 constexpr unsigned int outputNum = inputNum;
268 constexpr unsigned int outputChannels = inputChannels;
269 constexpr unsigned int outputHeight = inputHeight;
270 constexpr unsigned int outputWidth = inputWidth;
271
272 armnn::TensorInfo inputTensorInfo;
273 armnn::TensorInfo outputTensorInfo;
274
275 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
276 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
277
278 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
279 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
280
281 LayerTestResult<float,4> ret(outputTensorInfo);
282
283 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
284
285 constexpr float alpha = 1.f;
286 constexpr float beta = 1.f;
287 constexpr float kappa = 1.f;
288 constexpr uint32_t normSize = 5;
289
290 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
291 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
292
293 armnn::NormalizationQueueDescriptor data;
294 armnn::WorkloadInfo info;
295 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
296 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
297 data.m_Parameters.m_NormChannelType = normChannel;
298 data.m_Parameters.m_NormMethodType = normMethod;
299 data.m_Parameters.m_NormSize = normSize;
300 data.m_Parameters.m_Alpha = alpha;
301 data.m_Parameters.m_Beta = beta;
302 data.m_Parameters.m_K = kappa;
303
304 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
305 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
306
307 armnn::NormalizationQueueDescriptor refData = data;
308 armnn::WorkloadInfo refInfo = info;
309 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
310 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
311
312 // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100313 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000314 const size_t reasonIfUnsupportedMaxLen = 255;
315 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100316 ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
telsoa014fcda012018-03-09 14:13:49 +0000317 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
318 if (!ret.supported)
319 {
320 return ret;
321 }
322
323 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
324 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
325
326 outputHandleRef->Allocate();
327 inputHandleRef->Allocate();
328
329 inputHandle->Allocate();
330 outputHandle->Allocate();
331
332 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
333 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
334
Aron Virginas-Tar60578952018-10-31 11:04:01 +0000335 workloadFactory.Acquire();
telsoa014fcda012018-03-09 14:13:49 +0000336 workload->Execute();
Aron Virginas-Tar60578952018-10-31 11:04:01 +0000337 workloadFactory.Release();
338
telsoa014fcda012018-03-09 14:13:49 +0000339 workloadRef->Execute();
340
341 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
342 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
343
344 return ret;
345}
346