blob: e3a3bea798d43484e27f2c26ff7f928136e60c6d [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "NormalizationTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
David Beckac42efd2018-09-26 17:41:13 +01008#include <armnn/Exceptions.hpp>
9#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
Matthew Sloyan171214c2020-09-09 09:07:37 +010011#include <armnn/utility/NumericCast.hpp>
12
James Conroy1f58f032021-04-27 17:13:27 +010013#include <backendsCommon/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010014
Sadik Armagana097d2a2021-11-24 15:47:28 +000015#include <armnnTestUtils/TensorCopyUtils.hpp>
16#include <WorkloadTestUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010017
Sadik Armagana097d2a2021-11-24 15:47:28 +000018#include <TensorHelpers.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010019
20namespace
21{
telsoa014fcda012018-03-09 14:13:49 +000022
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000023LayerTestResult<float,4> SimpleNormalizationTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +010026 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000027 armnn::NormalizationAlgorithmChannel normChannel,
28 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +000029{
Jan Eilers8eb25602020-03-09 12:13:48 +000030 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000031 const unsigned int inputHeight = 2;
32 const unsigned int inputWidth = 2;
33 const unsigned int inputChannels = 1;
34 const unsigned int inputNum = 2;
35
36 unsigned int outputHeight = inputHeight;
37 unsigned int outputWidth = inputWidth;
38 unsigned int outputChannels = inputChannels;
39 unsigned int outputNum = inputNum;
40
41 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
42 unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
43
44 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
45 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
46
Sadik Armagan483c8112021-06-01 09:24:52 +010047 std::vector<float> input =
48 {
telsoa014fcda012018-03-09 14:13:49 +000049 // Batch #0
50 1.0f, 2.0f,
51 3.0f, 4.0f,
52 // Batch #1
53 5.0f, 6.0f,
54 7.0f, 8.0f
Sadik Armagan483c8112021-06-01 09:24:52 +010055 };
56
57 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
58 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
telsoa014fcda012018-03-09 14:13:49 +000059
60 float alpha = 1.f;
61 float beta = 1.f;
62 float kappa = 1.f;
63 uint32_t normSize = 3;
64
Finn Williams826a5432020-08-27 16:15:20 +010065 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
66 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +000067
68 armnn::NormalizationQueueDescriptor data;
69 armnn::WorkloadInfo info;
70 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
71 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
72 data.m_Parameters.m_NormChannelType = normChannel;
73 data.m_Parameters.m_NormMethodType = normMethod;
74 data.m_Parameters.m_NormSize = normSize;
75 data.m_Parameters.m_Alpha = alpha;
76 data.m_Parameters.m_Beta = beta;
77 data.m_Parameters.m_K = kappa;
narpra0155a97bc2018-10-02 14:35:53 +010078 data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
telsoa014fcda012018-03-09 14:13:49 +000079
Sadik Armagan483c8112021-06-01 09:24:52 +010080 armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
telsoa014fcda012018-03-09 14:13:49 +000081 armnn::NormalizationQueueDescriptor refData = data;
82 armnn::WorkloadInfo refInfo = info;
83 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
84
85 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
86
87 inputHandle->Allocate();
88 outputHandle->Allocate();
89
Sadik Armagan483c8112021-06-01 09:24:52 +010090 CopyDataToITensorHandle(inputHandle.get(), input.data());
telsoa014fcda012018-03-09 14:13:49 +000091
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000092 ExecuteWorkload(*workload, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000093
Sadik Armagan483c8112021-06-01 09:24:52 +010094 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
telsoa014fcda012018-03-09 14:13:49 +000095
96 switch (normMethod)
97 {
98 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
99 {
100 switch (normChannel)
101 {
102 case armnn::NormalizationAlgorithmChannel::Within:
103 {
104 // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
105 // Therefore, all output values should equal the inputs, but divided by:
106 // pow((kappa + (accumulatedScale * alpha)), beta)
telsoa01c577f2c2018-08-31 09:22:23 +0100107 // ...where accumulatedScale is the sum of every element squared.
telsoa014fcda012018-03-09 14:13:49 +0000108 float divisor[inputNum];
Sadik Armagan483c8112021-06-01 09:24:52 +0100109
110 float accumulatedScale1 = 0.0f;
111 for (size_t i = 0; i < input.size()/2; ++i)
telsoa014fcda012018-03-09 14:13:49 +0000112 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100113 accumulatedScale1 += input[i]*input[i];
telsoa014fcda012018-03-09 14:13:49 +0000114 }
Sadik Armagan483c8112021-06-01 09:24:52 +0100115
116 float accumulatedScale2 = 0.0f;
117 for (size_t i = input.size()/2; i < input.size(); ++i)
118 {
119 accumulatedScale2 += input[i]*input[i];
120 }
121
122 divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta);
123 divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta);
124
125 std::vector<float> output;
126 unsigned int divisorIndex = 0;
127 for (size_t i = 0; i < input.size(); ++i)
128 {
129 if (i == input.size()/2)
130 {
131 divisorIndex++;
132 }
133 output.emplace_back(input[i]/divisor[divisorIndex]);
134 }
135
136 expectedOutput = output;
telsoa014fcda012018-03-09 14:13:49 +0000137 break;
138 }
139 case armnn::NormalizationAlgorithmChannel::Across:
140 {
141 // When normalising across channels, all output values should equal the inputs, but multiplied by:
142 // pow((kappa + (accumulatedScale * alpha)), -beta)
143 // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
144 // ...where adjacent channels means within half the normSize for the channel
145 // The test data has only one channel, so this is simplified below.
146 std::vector<float> outputVector;
Sadik Armagan483c8112021-06-01 09:24:52 +0100147
148 for (unsigned int i = 0; i < input.size(); ++i)
telsoa014fcda012018-03-09 14:13:49 +0000149 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100150 float accumulatedScale = input[i]*input[i];
151 float scale = powf((kappa + accumulatedScale * alpha), -beta);
152 outputVector.push_back(input[i] * scale);
telsoa014fcda012018-03-09 14:13:49 +0000153 }
Sadik Armagan483c8112021-06-01 09:24:52 +0100154 expectedOutput = outputVector;
telsoa014fcda012018-03-09 14:13:49 +0000155 break;
156 }
157 default:
158 {
159 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
160 "only Across and Within are supported");
161 }
162 }
163 break;
164 }
telsoa01c577f2c2018-08-31 09:22:23 +0100165 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
telsoa014fcda012018-03-09 14:13:49 +0000166 default:
167 {
168 throw armnn::UnimplementedException("Unsupported normalisation method type, "
169 "only LocalBrightness is supported");
170 }
171 }
172
Sadik Armagan483c8112021-06-01 09:24:52 +0100173 return LayerTestResult<float, 4>(actualOutput,
174 expectedOutput,
175 outputHandle->GetShape(),
176 outputTensorInfo.GetShape());
telsoa014fcda012018-03-09 14:13:49 +0000177}
178
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000179LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
180 armnn::IWorkloadFactory& workloadFactory,
181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100182 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000183 armnn::NormalizationAlgorithmChannel normChannel,
184 armnn::NormalizationAlgorithmMethod normMethod)
narpra0155a97bc2018-10-02 14:35:53 +0100185{
186 const unsigned int inputHeight = 2;
187 const unsigned int inputWidth = 2;
188 const unsigned int inputChannels = 1;
189 const unsigned int inputNum = 2;
190
191 unsigned int outputHeight = inputHeight;
192 unsigned int outputWidth = inputWidth;
193 unsigned int outputChannels = inputChannels;
194 unsigned int outputNum = inputNum;
195
196 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
197 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
198
199 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
200 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
201
Sadik Armagan483c8112021-06-01 09:24:52 +0100202 std::vector<float> input =
203 {
narpra0155a97bc2018-10-02 14:35:53 +0100204 // Batch #0
205 1.0f, 2.0f,
206 3.0f, 4.0f,
207 // Batch #1
208 5.0f, 6.0f,
209 7.0f, 8.0f
Sadik Armagan483c8112021-06-01 09:24:52 +0100210 };
211
212 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
213 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
narpra0155a97bc2018-10-02 14:35:53 +0100214
215 float alpha = 1.f;
216 float beta = 1.f;
217 float kappa = 1.f;
218 uint32_t normSize = 3;
219
Finn Williams826a5432020-08-27 16:15:20 +0100220 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
221 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
narpra0155a97bc2018-10-02 14:35:53 +0100222
223 armnn::NormalizationQueueDescriptor data;
224 armnn::WorkloadInfo info;
225 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
226 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
227 data.m_Parameters.m_NormChannelType = normChannel;
228 data.m_Parameters.m_NormMethodType = normMethod;
229 data.m_Parameters.m_NormSize = normSize;
230 data.m_Parameters.m_Alpha = alpha;
231 data.m_Parameters.m_Beta = beta;
232 data.m_Parameters.m_K = kappa;
233 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
234
Sadik Armagan483c8112021-06-01 09:24:52 +0100235 armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
narpra0155a97bc2018-10-02 14:35:53 +0100236 armnn::NormalizationQueueDescriptor refData = data;
237 armnn::WorkloadInfo refInfo = info;
238 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
239
240 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
241
242 inputHandle->Allocate();
243 outputHandle->Allocate();
244
Sadik Armagan483c8112021-06-01 09:24:52 +0100245 CopyDataToITensorHandle(inputHandle.get(), input.data());
narpra0155a97bc2018-10-02 14:35:53 +0100246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000247 ExecuteWorkload(*workload, memoryManager);
narpra0155a97bc2018-10-02 14:35:53 +0100248
Sadik Armagan483c8112021-06-01 09:24:52 +0100249 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
narpra0155a97bc2018-10-02 14:35:53 +0100250
251 switch (normMethod)
252 {
253 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
254 {
255 switch (normChannel)
256 {
257 case armnn::NormalizationAlgorithmChannel::Across:
258 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100259 expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
260 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
narpra0155a97bc2018-10-02 14:35:53 +0100261 break;
262 }
263 default:
264 {
265 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
266 "Only Cross-map is supported for NHWC layout");
267 }
268 }
269 break;
270 }
271 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
272 default:
273 {
274 throw armnn::UnimplementedException("Unsupported normalisation method type, "
275 "only LocalBrightness is supported");
276 }
277 }
278
Sadik Armagan483c8112021-06-01 09:24:52 +0100279 return LayerTestResult<float, 4>(actualOutput,
280 expectedOutput,
281 outputHandle->GetShape(),
282 outputTensorInfo.GetShape());
narpra0155a97bc2018-10-02 14:35:53 +0100283}
284
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000285LayerTestResult<float,4> CompareNormalizationTestImpl(
286 armnn::IWorkloadFactory& workloadFactory,
287 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
288 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100289 const armnn::ITensorHandleFactory& tensorHandleFactory,
290 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000291 armnn::NormalizationAlgorithmChannel normChannel,
292 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000293{
294 constexpr unsigned int inputNum = 5;
295 constexpr unsigned int inputChannels = 3;
296 constexpr unsigned int inputHeight = 32;
297 constexpr unsigned int inputWidth = 24;
298
299 constexpr unsigned int outputNum = inputNum;
300 constexpr unsigned int outputChannels = inputChannels;
301 constexpr unsigned int outputHeight = inputHeight;
302 constexpr unsigned int outputWidth = inputWidth;
303
304 armnn::TensorInfo inputTensorInfo;
305 armnn::TensorInfo outputTensorInfo;
306
307 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
308 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
309
310 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
311 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
312
313 LayerTestResult<float,4> ret(outputTensorInfo);
314
Sadik Armagan483c8112021-06-01 09:24:52 +0100315 auto input = MakeRandomTensor<float>(inputTensorInfo, 111234);
316
317 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
318 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
telsoa014fcda012018-03-09 14:13:49 +0000319
320 constexpr float alpha = 1.f;
321 constexpr float beta = 1.f;
322 constexpr float kappa = 1.f;
323 constexpr uint32_t normSize = 5;
324
Finn Williams826a5432020-08-27 16:15:20 +0100325 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
326 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000327
328 armnn::NormalizationQueueDescriptor data;
329 armnn::WorkloadInfo info;
330 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
331 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
332 data.m_Parameters.m_NormChannelType = normChannel;
333 data.m_Parameters.m_NormMethodType = normMethod;
334 data.m_Parameters.m_NormSize = normSize;
335 data.m_Parameters.m_Alpha = alpha;
336 data.m_Parameters.m_Beta = beta;
337 data.m_Parameters.m_K = kappa;
338
Finn Williams826a5432020-08-27 16:15:20 +0100339 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
340 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000341
342 armnn::NormalizationQueueDescriptor refData = data;
343 armnn::WorkloadInfo refInfo = info;
344 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
345 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
346
347 // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100348 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000349 const size_t reasonIfUnsupportedMaxLen = 255;
350 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
Sadik Armagan483c8112021-06-01 09:24:52 +0100351 ret.m_Supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
352 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
353 if (!ret.m_Supported)
telsoa014fcda012018-03-09 14:13:49 +0000354 {
355 return ret;
356 }
357
358 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
359 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
360
361 outputHandleRef->Allocate();
362 inputHandleRef->Allocate();
363
364 inputHandle->Allocate();
365 outputHandle->Allocate();
366
Sadik Armagan483c8112021-06-01 09:24:52 +0100367 CopyDataToITensorHandle(inputHandle.get(), input.data());
368 CopyDataToITensorHandle(inputHandleRef.get(), input.data());
telsoa014fcda012018-03-09 14:13:49 +0000369
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000370 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tar60578952018-10-31 11:04:01 +0000371
telsoa014fcda012018-03-09 14:13:49 +0000372 workloadRef->Execute();
373
Sadik Armagan483c8112021-06-01 09:24:52 +0100374 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
375 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
376 ret.m_ActualData = actualOutput;
377 ret.m_ExpectedData = expectedOutput;
telsoa014fcda012018-03-09 14:13:49 +0000378
379 return ret;
380}
381
Sadik Armagan483c8112021-06-01 09:24:52 +0100382LayerTestResult<float,4> AcrossChannelNormalizationTestImpl(
383 armnn::IWorkloadFactory& workloadFactory,
384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
385 const armnn::ITensorHandleFactory& tensorHandleFactory,
386 armnn::NormalizationAlgorithmChannel normChannel,
387 armnn::NormalizationAlgorithmMethod normMethod)
388{
389 const unsigned int inputHeight = 1;
390 const unsigned int inputWidth = 2;
391 const unsigned int inputChannels = 3;
392 const unsigned int inputNum = 2;
393
394 unsigned int outputHeight = inputHeight;
395 unsigned int outputWidth = inputWidth;
396 unsigned int outputChannels = inputChannels;
397 unsigned int outputNum = inputNum;
398
399 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
400 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
401
402 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
403 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
404
405 std::vector<float> input =
406 {
407 // Batch #0
408 -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
409 // Batch #1
410 -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
411 };
412
413 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
414 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
415
416 float alpha = 4.f;
417 float beta = 0.5f;
418 float kappa = 9.f;
419 uint32_t normSize = 5;
420
421 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
422 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
423
424 armnn::NormalizationQueueDescriptor data;
425 armnn::WorkloadInfo info;
426 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
427 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
428 data.m_Parameters.m_NormChannelType = normChannel;
429 data.m_Parameters.m_NormMethodType = normMethod;
430 data.m_Parameters.m_NormSize = normSize;
431 data.m_Parameters.m_Alpha = alpha;
432 data.m_Parameters.m_Beta = beta;
433 data.m_Parameters.m_K = kappa;
434 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
435
436 armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
437 armnn::NormalizationQueueDescriptor refData = data;
438 armnn::WorkloadInfo refInfo = info;
439 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
440
441 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
442
443 inputHandle->Allocate();
444 outputHandle->Allocate();
445
446 CopyDataToITensorHandle(inputHandle.get(), input.data());
447
448 ExecuteWorkload(*workload, memoryManager);
449
450 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
451
452 switch (normMethod)
453 {
454 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
455 {
456 switch (normChannel)
457 {
458 case armnn::NormalizationAlgorithmChannel::Across:
459 {
460 expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f,
461 -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, };
462 break;
463 }
464 default:
465 {
466 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
467 "only Across and Within are supported");
468 }
469 }
470 break;
471 }
472 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
473 default:
474 {
475 throw armnn::UnimplementedException("Unsupported normalisation method type, "
476 "only LocalBrightness is supported");
477 }
478 }
479
480 return LayerTestResult<float, 4>(actualOutput,
481 expectedOutput,
482 outputHandle->GetShape(),
483 outputTensorInfo.GetShape());
484}
485
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100486} // anonymous namespace
487
488LayerTestResult<float,4> SimpleNormalizationAcrossTest(
489 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
491 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100492{
493 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
494 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Finn Williams826a5432020-08-27 16:15:20 +0100495 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100496}
497
498LayerTestResult<float,4> SimpleNormalizationWithinTest(
499 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100500 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
501 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100502{
503 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
504 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Finn Williams826a5432020-08-27 16:15:20 +0100505 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100506}
507
508LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
509 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
511 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100512{
513 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
514 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Finn Williams826a5432020-08-27 16:15:20 +0100515 return SimpleNormalizationNhwcTestImpl(
516 workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100517}
518
519LayerTestResult<float,4> CompareNormalizationTest(
520 armnn::IWorkloadFactory& workloadFactory,
521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
522 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100523 const armnn::ITensorHandleFactory& tensorHandleFactory,
524 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100525 armnn::NormalizationAlgorithmChannel normChannel,
526 armnn::NormalizationAlgorithmMethod normMethod)
527{
Finn Williams826a5432020-08-27 16:15:20 +0100528 return CompareNormalizationTestImpl(
529 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
530 normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100531}
Sadik Armagan483c8112021-06-01 09:24:52 +0100532
533LayerTestResult<float,4> AcrossChannelNormalizationTest(
534 armnn::IWorkloadFactory& workloadFactory,
535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
536 const armnn::ITensorHandleFactory& tensorHandleFactory)
537{
538 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
539 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
540 return AcrossChannelNormalizationTestImpl(workloadFactory,
541 memoryManager,
542 tensorHandleFactory,
543 normChannel,
544 normMethod);
545}