blob: b42b180dc9a64db1b53fcd71713d4237dc07799e [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "NormalizationTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
David Beckac42efd2018-09-26 17:41:13 +01008#include <armnn/Exceptions.hpp>
9#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000011#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010012
13#include <backendsCommon/test/TensorCopyUtils.hpp>
14#include <backendsCommon/test/WorkloadTestUtils.hpp>
15
16#include <test/TensorHelpers.hpp>
17
18namespace
19{
telsoa014fcda012018-03-09 14:13:49 +000020
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000021LayerTestResult<float,4> SimpleNormalizationTestImpl(
22 armnn::IWorkloadFactory& workloadFactory,
23 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +010024 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000025 armnn::NormalizationAlgorithmChannel normChannel,
26 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +000027{
Jan Eilers8eb25602020-03-09 12:13:48 +000028 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000029 const unsigned int inputHeight = 2;
30 const unsigned int inputWidth = 2;
31 const unsigned int inputChannels = 1;
32 const unsigned int inputNum = 2;
33
34 unsigned int outputHeight = inputHeight;
35 unsigned int outputWidth = inputWidth;
36 unsigned int outputChannels = inputChannels;
37 unsigned int outputNum = inputNum;
38
39 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
40 unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
41
42 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
43 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
44
45 LayerTestResult<float,4> ret(outputTensorInfo);
46
47 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
48 // Batch #0
49 1.0f, 2.0f,
50 3.0f, 4.0f,
51 // Batch #1
52 5.0f, 6.0f,
53 7.0f, 8.0f
54 }));
55
56 float alpha = 1.f;
57 float beta = 1.f;
58 float kappa = 1.f;
59 uint32_t normSize = 3;
60
Finn Williams826a5432020-08-27 16:15:20 +010061 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +000063
64 armnn::NormalizationQueueDescriptor data;
65 armnn::WorkloadInfo info;
66 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
67 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
68 data.m_Parameters.m_NormChannelType = normChannel;
69 data.m_Parameters.m_NormMethodType = normMethod;
70 data.m_Parameters.m_NormSize = normSize;
71 data.m_Parameters.m_Alpha = alpha;
72 data.m_Parameters.m_Beta = beta;
73 data.m_Parameters.m_K = kappa;
narpra0155a97bc2018-10-02 14:35:53 +010074 data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
telsoa014fcda012018-03-09 14:13:49 +000075
76 armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
77 armnn::NormalizationQueueDescriptor refData = data;
78 armnn::WorkloadInfo refInfo = info;
79 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
80
81 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
82
83 inputHandle->Allocate();
84 outputHandle->Allocate();
85
86 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
87
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000088 ExecuteWorkload(*workload, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000089
90 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
91
92 switch (normMethod)
93 {
94 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
95 {
96 switch (normChannel)
97 {
98 case armnn::NormalizationAlgorithmChannel::Within:
99 {
100 // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
101 // Therefore, all output values should equal the inputs, but divided by:
102 // pow((kappa + (accumulatedScale * alpha)), beta)
telsoa01c577f2c2018-08-31 09:22:23 +0100103 // ...where accumulatedScale is the sum of every element squared.
telsoa014fcda012018-03-09 14:13:49 +0000104 float divisor[inputNum];
105 for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
106 {
107 float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
108 input[i][0][0][1]*input[i][0][0][1] +
109 input[i][0][1][0]*input[i][0][1][0] +
110 input[i][0][1][1]*input[i][0][1][1];
111 divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
112 }
113 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
114 std::vector<float>({input[0][0][0][0]/divisor[0],
115 input[0][0][0][1]/divisor[0],
116 input[0][0][1][0]/divisor[0],
117 input[0][0][1][1]/divisor[0],
118 input[1][0][0][0]/divisor[1],
119 input[1][0][0][1]/divisor[1],
120 input[1][0][1][0]/divisor[1],
121 input[1][0][1][1]/divisor[1]}));
122 break;
123 }
124 case armnn::NormalizationAlgorithmChannel::Across:
125 {
126 // When normalising across channels, all output values should equal the inputs, but multiplied by:
127 // pow((kappa + (accumulatedScale * alpha)), -beta)
128 // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
129 // ...where adjacent channels means within half the normSize for the channel
130 // The test data has only one channel, so this is simplified below.
131 std::vector<float> outputVector;
132 for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
133 {
134 for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
135 {
136 for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
137 {
138 float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
139 float scale = powf((kappa + accumulatedScale * alpha), -beta);
140 outputVector.push_back(input[n][0][h][w] * scale);
141 }
142 }
143 }
144 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
145 break;
146 }
147 default:
148 {
149 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
150 "only Across and Within are supported");
151 }
152 }
153 break;
154 }
telsoa01c577f2c2018-08-31 09:22:23 +0100155 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
telsoa014fcda012018-03-09 14:13:49 +0000156 default:
157 {
158 throw armnn::UnimplementedException("Unsupported normalisation method type, "
159 "only LocalBrightness is supported");
160 }
161 }
162
163 return ret;
164}
165
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000166LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
167 armnn::IWorkloadFactory& workloadFactory,
168 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100169 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000170 armnn::NormalizationAlgorithmChannel normChannel,
171 armnn::NormalizationAlgorithmMethod normMethod)
narpra0155a97bc2018-10-02 14:35:53 +0100172{
173 const unsigned int inputHeight = 2;
174 const unsigned int inputWidth = 2;
175 const unsigned int inputChannels = 1;
176 const unsigned int inputNum = 2;
177
178 unsigned int outputHeight = inputHeight;
179 unsigned int outputWidth = inputWidth;
180 unsigned int outputChannels = inputChannels;
181 unsigned int outputNum = inputNum;
182
183 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
184 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
185
186 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
187 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
188
189 LayerTestResult<float,4> ret(outputTensorInfo);
190
191 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
192 // Batch #0
193 1.0f, 2.0f,
194 3.0f, 4.0f,
195 // Batch #1
196 5.0f, 6.0f,
197 7.0f, 8.0f
198 }));
199
200 float alpha = 1.f;
201 float beta = 1.f;
202 float kappa = 1.f;
203 uint32_t normSize = 3;
204
Finn Williams826a5432020-08-27 16:15:20 +0100205 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
206 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
narpra0155a97bc2018-10-02 14:35:53 +0100207
208 armnn::NormalizationQueueDescriptor data;
209 armnn::WorkloadInfo info;
210 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
211 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
212 data.m_Parameters.m_NormChannelType = normChannel;
213 data.m_Parameters.m_NormMethodType = normMethod;
214 data.m_Parameters.m_NormSize = normSize;
215 data.m_Parameters.m_Alpha = alpha;
216 data.m_Parameters.m_Beta = beta;
217 data.m_Parameters.m_K = kappa;
218 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
219
220 armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
221 armnn::NormalizationQueueDescriptor refData = data;
222 armnn::WorkloadInfo refInfo = info;
223 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
224
225 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
226
227 inputHandle->Allocate();
228 outputHandle->Allocate();
229
230 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
231
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000232 ExecuteWorkload(*workload, memoryManager);
narpra0155a97bc2018-10-02 14:35:53 +0100233
234 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
235
236 switch (normMethod)
237 {
238 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
239 {
240 switch (normChannel)
241 {
242 case armnn::NormalizationAlgorithmChannel::Across:
243 {
244 std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
245 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
246 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
247 break;
248 }
249 default:
250 {
251 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
252 "Only Cross-map is supported for NHWC layout");
253 }
254 }
255 break;
256 }
257 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
258 default:
259 {
260 throw armnn::UnimplementedException("Unsupported normalisation method type, "
261 "only LocalBrightness is supported");
262 }
263 }
264
265 return ret;
266}
267
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000268LayerTestResult<float,4> CompareNormalizationTestImpl(
269 armnn::IWorkloadFactory& workloadFactory,
270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
271 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100272 const armnn::ITensorHandleFactory& tensorHandleFactory,
273 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000274 armnn::NormalizationAlgorithmChannel normChannel,
275 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000276{
277 constexpr unsigned int inputNum = 5;
278 constexpr unsigned int inputChannels = 3;
279 constexpr unsigned int inputHeight = 32;
280 constexpr unsigned int inputWidth = 24;
281
282 constexpr unsigned int outputNum = inputNum;
283 constexpr unsigned int outputChannels = inputChannels;
284 constexpr unsigned int outputHeight = inputHeight;
285 constexpr unsigned int outputWidth = inputWidth;
286
287 armnn::TensorInfo inputTensorInfo;
288 armnn::TensorInfo outputTensorInfo;
289
290 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
291 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
292
293 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
294 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
295
296 LayerTestResult<float,4> ret(outputTensorInfo);
297
298 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
299
300 constexpr float alpha = 1.f;
301 constexpr float beta = 1.f;
302 constexpr float kappa = 1.f;
303 constexpr uint32_t normSize = 5;
304
Finn Williams826a5432020-08-27 16:15:20 +0100305 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
306 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000307
308 armnn::NormalizationQueueDescriptor data;
309 armnn::WorkloadInfo info;
310 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
311 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
312 data.m_Parameters.m_NormChannelType = normChannel;
313 data.m_Parameters.m_NormMethodType = normMethod;
314 data.m_Parameters.m_NormSize = normSize;
315 data.m_Parameters.m_Alpha = alpha;
316 data.m_Parameters.m_Beta = beta;
317 data.m_Parameters.m_K = kappa;
318
Finn Williams826a5432020-08-27 16:15:20 +0100319 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
320 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000321
322 armnn::NormalizationQueueDescriptor refData = data;
323 armnn::WorkloadInfo refInfo = info;
324 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
325 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
326
327 // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100328 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000329 const size_t reasonIfUnsupportedMaxLen = 255;
330 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100331 ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
telsoa014fcda012018-03-09 14:13:49 +0000332 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
333 if (!ret.supported)
334 {
335 return ret;
336 }
337
338 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
339 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
340
341 outputHandleRef->Allocate();
342 inputHandleRef->Allocate();
343
344 inputHandle->Allocate();
345 outputHandle->Allocate();
346
347 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
348 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
349
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000350 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tar60578952018-10-31 11:04:01 +0000351
telsoa014fcda012018-03-09 14:13:49 +0000352 workloadRef->Execute();
353
354 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
355 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
356
357 return ret;
358}
359
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100360} // anonymous namespace
361
362LayerTestResult<float,4> SimpleNormalizationAcrossTest(
363 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100364 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
365 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100366{
367 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
368 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Finn Williams826a5432020-08-27 16:15:20 +0100369 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100370}
371
372LayerTestResult<float,4> SimpleNormalizationWithinTest(
373 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
375 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100376{
377 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
378 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Finn Williams826a5432020-08-27 16:15:20 +0100379 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100380}
381
382LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
383 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
385 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100386{
387 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
388 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Finn Williams826a5432020-08-27 16:15:20 +0100389 return SimpleNormalizationNhwcTestImpl(
390 workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100391}
392
393LayerTestResult<float,4> CompareNormalizationTest(
394 armnn::IWorkloadFactory& workloadFactory,
395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
396 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100397 const armnn::ITensorHandleFactory& tensorHandleFactory,
398 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100399 armnn::NormalizationAlgorithmChannel normChannel,
400 armnn::NormalizationAlgorithmMethod normMethod)
401{
Finn Williams826a5432020-08-27 16:15:20 +0100402 return CompareNormalizationTestImpl(
403 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
404 normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100405}