blob: 2b2ff0cc145abd31d68bce085d4caaaca0e549ec [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "NormalizationTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
David Beckac42efd2018-09-26 17:41:13 +01008#include <armnn/Exceptions.hpp>
9#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
Matthew Sloyan171214c2020-09-09 09:07:37 +010011#include <armnn/utility/NumericCast.hpp>
12
Colm Donelan0c479742021-12-10 12:43:54 +000013#include <armnn/backends/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010014
Sadik Armagana097d2a2021-11-24 15:47:28 +000015#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000016#include <armnnTestUtils/WorkloadTestUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010017
Sadik Armagana097d2a2021-11-24 15:47:28 +000018#include <TensorHelpers.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010019
20namespace
21{
telsoa014fcda012018-03-09 14:13:49 +000022
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000023LayerTestResult<float,4> SimpleNormalizationTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +010026 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000027 armnn::NormalizationAlgorithmChannel normChannel,
28 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +000029{
Jan Eilers8eb25602020-03-09 12:13:48 +000030 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000031 const unsigned int inputHeight = 2;
32 const unsigned int inputWidth = 2;
33 const unsigned int inputChannels = 1;
34 const unsigned int inputNum = 2;
35
36 unsigned int outputHeight = inputHeight;
37 unsigned int outputWidth = inputWidth;
38 unsigned int outputChannels = inputChannels;
39 unsigned int outputNum = inputNum;
40
41 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
42 unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
43
44 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
45 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
46
Sadik Armagan483c8112021-06-01 09:24:52 +010047 std::vector<float> input =
48 {
telsoa014fcda012018-03-09 14:13:49 +000049 // Batch #0
50 1.0f, 2.0f,
51 3.0f, 4.0f,
52 // Batch #1
53 5.0f, 6.0f,
54 7.0f, 8.0f
Sadik Armagan483c8112021-06-01 09:24:52 +010055 };
56
57 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
58 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
telsoa014fcda012018-03-09 14:13:49 +000059
60 float alpha = 1.f;
61 float beta = 1.f;
62 float kappa = 1.f;
63 uint32_t normSize = 3;
64
Finn Williams826a5432020-08-27 16:15:20 +010065 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
66 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +000067
68 armnn::NormalizationQueueDescriptor data;
69 armnn::WorkloadInfo info;
70 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
71 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
72 data.m_Parameters.m_NormChannelType = normChannel;
73 data.m_Parameters.m_NormMethodType = normMethod;
74 data.m_Parameters.m_NormSize = normSize;
75 data.m_Parameters.m_Alpha = alpha;
76 data.m_Parameters.m_Beta = beta;
77 data.m_Parameters.m_K = kappa;
narpra0155a97bc2018-10-02 14:35:53 +010078 data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
telsoa014fcda012018-03-09 14:13:49 +000079
Sadik Armagan483c8112021-06-01 09:24:52 +010080 armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
telsoa014fcda012018-03-09 14:13:49 +000081 armnn::NormalizationQueueDescriptor refData = data;
82 armnn::WorkloadInfo refInfo = info;
83 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
84
Teresa Charlin611c7fb2022-01-07 09:47:29 +000085 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
86 data,
87 info);
telsoa014fcda012018-03-09 14:13:49 +000088
89 inputHandle->Allocate();
90 outputHandle->Allocate();
91
Sadik Armagan483c8112021-06-01 09:24:52 +010092 CopyDataToITensorHandle(inputHandle.get(), input.data());
telsoa014fcda012018-03-09 14:13:49 +000093
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000094 ExecuteWorkload(*workload, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000095
Sadik Armagan483c8112021-06-01 09:24:52 +010096 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
telsoa014fcda012018-03-09 14:13:49 +000097
98 switch (normMethod)
99 {
100 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
101 {
102 switch (normChannel)
103 {
104 case armnn::NormalizationAlgorithmChannel::Within:
105 {
106 // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
107 // Therefore, all output values should equal the inputs, but divided by:
108 // pow((kappa + (accumulatedScale * alpha)), beta)
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // ...where accumulatedScale is the sum of every element squared.
telsoa014fcda012018-03-09 14:13:49 +0000110 float divisor[inputNum];
Sadik Armagan483c8112021-06-01 09:24:52 +0100111
112 float accumulatedScale1 = 0.0f;
113 for (size_t i = 0; i < input.size()/2; ++i)
telsoa014fcda012018-03-09 14:13:49 +0000114 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100115 accumulatedScale1 += input[i]*input[i];
telsoa014fcda012018-03-09 14:13:49 +0000116 }
Sadik Armagan483c8112021-06-01 09:24:52 +0100117
118 float accumulatedScale2 = 0.0f;
119 for (size_t i = input.size()/2; i < input.size(); ++i)
120 {
121 accumulatedScale2 += input[i]*input[i];
122 }
123
124 divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta);
125 divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta);
126
127 std::vector<float> output;
128 unsigned int divisorIndex = 0;
129 for (size_t i = 0; i < input.size(); ++i)
130 {
131 if (i == input.size()/2)
132 {
133 divisorIndex++;
134 }
135 output.emplace_back(input[i]/divisor[divisorIndex]);
136 }
137
138 expectedOutput = output;
telsoa014fcda012018-03-09 14:13:49 +0000139 break;
140 }
141 case armnn::NormalizationAlgorithmChannel::Across:
142 {
143 // When normalising across channels, all output values should equal the inputs, but multiplied by:
144 // pow((kappa + (accumulatedScale * alpha)), -beta)
145 // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
146 // ...where adjacent channels means within half the normSize for the channel
147 // The test data has only one channel, so this is simplified below.
148 std::vector<float> outputVector;
Sadik Armagan483c8112021-06-01 09:24:52 +0100149
150 for (unsigned int i = 0; i < input.size(); ++i)
telsoa014fcda012018-03-09 14:13:49 +0000151 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100152 float accumulatedScale = input[i]*input[i];
153 float scale = powf((kappa + accumulatedScale * alpha), -beta);
154 outputVector.push_back(input[i] * scale);
telsoa014fcda012018-03-09 14:13:49 +0000155 }
Sadik Armagan483c8112021-06-01 09:24:52 +0100156 expectedOutput = outputVector;
telsoa014fcda012018-03-09 14:13:49 +0000157 break;
158 }
159 default:
160 {
161 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
162 "only Across and Within are supported");
163 }
164 }
165 break;
166 }
telsoa01c577f2c2018-08-31 09:22:23 +0100167 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
telsoa014fcda012018-03-09 14:13:49 +0000168 default:
169 {
170 throw armnn::UnimplementedException("Unsupported normalisation method type, "
171 "only LocalBrightness is supported");
172 }
173 }
174
Sadik Armagan483c8112021-06-01 09:24:52 +0100175 return LayerTestResult<float, 4>(actualOutput,
176 expectedOutput,
177 outputHandle->GetShape(),
178 outputTensorInfo.GetShape());
telsoa014fcda012018-03-09 14:13:49 +0000179}
180
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000181LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
182 armnn::IWorkloadFactory& workloadFactory,
183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100184 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000185 armnn::NormalizationAlgorithmChannel normChannel,
186 armnn::NormalizationAlgorithmMethod normMethod)
narpra0155a97bc2018-10-02 14:35:53 +0100187{
188 const unsigned int inputHeight = 2;
189 const unsigned int inputWidth = 2;
190 const unsigned int inputChannels = 1;
191 const unsigned int inputNum = 2;
192
193 unsigned int outputHeight = inputHeight;
194 unsigned int outputWidth = inputWidth;
195 unsigned int outputChannels = inputChannels;
196 unsigned int outputNum = inputNum;
197
198 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
199 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
200
201 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
202 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
203
Sadik Armagan483c8112021-06-01 09:24:52 +0100204 std::vector<float> input =
205 {
narpra0155a97bc2018-10-02 14:35:53 +0100206 // Batch #0
207 1.0f, 2.0f,
208 3.0f, 4.0f,
209 // Batch #1
210 5.0f, 6.0f,
211 7.0f, 8.0f
Sadik Armagan483c8112021-06-01 09:24:52 +0100212 };
213
214 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
215 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
narpra0155a97bc2018-10-02 14:35:53 +0100216
217 float alpha = 1.f;
218 float beta = 1.f;
219 float kappa = 1.f;
220 uint32_t normSize = 3;
221
Finn Williams826a5432020-08-27 16:15:20 +0100222 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
223 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
narpra0155a97bc2018-10-02 14:35:53 +0100224
225 armnn::NormalizationQueueDescriptor data;
226 armnn::WorkloadInfo info;
227 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
228 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
229 data.m_Parameters.m_NormChannelType = normChannel;
230 data.m_Parameters.m_NormMethodType = normMethod;
231 data.m_Parameters.m_NormSize = normSize;
232 data.m_Parameters.m_Alpha = alpha;
233 data.m_Parameters.m_Beta = beta;
234 data.m_Parameters.m_K = kappa;
235 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
236
Sadik Armagan483c8112021-06-01 09:24:52 +0100237 armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
narpra0155a97bc2018-10-02 14:35:53 +0100238 armnn::NormalizationQueueDescriptor refData = data;
239 armnn::WorkloadInfo refInfo = info;
240 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
241
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000242 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
243 data,
244 info);
narpra0155a97bc2018-10-02 14:35:53 +0100245
246 inputHandle->Allocate();
247 outputHandle->Allocate();
248
Sadik Armagan483c8112021-06-01 09:24:52 +0100249 CopyDataToITensorHandle(inputHandle.get(), input.data());
narpra0155a97bc2018-10-02 14:35:53 +0100250
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000251 ExecuteWorkload(*workload, memoryManager);
narpra0155a97bc2018-10-02 14:35:53 +0100252
Sadik Armagan483c8112021-06-01 09:24:52 +0100253 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
narpra0155a97bc2018-10-02 14:35:53 +0100254
255 switch (normMethod)
256 {
257 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
258 {
259 switch (normChannel)
260 {
261 case armnn::NormalizationAlgorithmChannel::Across:
262 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100263 expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
264 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
narpra0155a97bc2018-10-02 14:35:53 +0100265 break;
266 }
267 default:
268 {
269 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
270 "Only Cross-map is supported for NHWC layout");
271 }
272 }
273 break;
274 }
275 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
276 default:
277 {
278 throw armnn::UnimplementedException("Unsupported normalisation method type, "
279 "only LocalBrightness is supported");
280 }
281 }
282
Sadik Armagan483c8112021-06-01 09:24:52 +0100283 return LayerTestResult<float, 4>(actualOutput,
284 expectedOutput,
285 outputHandle->GetShape(),
286 outputTensorInfo.GetShape());
narpra0155a97bc2018-10-02 14:35:53 +0100287}
288
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000289LayerTestResult<float,4> CompareNormalizationTestImpl(
290 armnn::IWorkloadFactory& workloadFactory,
291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
292 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100293 const armnn::ITensorHandleFactory& tensorHandleFactory,
294 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000295 armnn::NormalizationAlgorithmChannel normChannel,
296 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000297{
298 constexpr unsigned int inputNum = 5;
299 constexpr unsigned int inputChannels = 3;
300 constexpr unsigned int inputHeight = 32;
301 constexpr unsigned int inputWidth = 24;
302
303 constexpr unsigned int outputNum = inputNum;
304 constexpr unsigned int outputChannels = inputChannels;
305 constexpr unsigned int outputHeight = inputHeight;
306 constexpr unsigned int outputWidth = inputWidth;
307
308 armnn::TensorInfo inputTensorInfo;
309 armnn::TensorInfo outputTensorInfo;
310
311 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
312 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
313
314 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
315 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
316
317 LayerTestResult<float,4> ret(outputTensorInfo);
318
Sadik Armagan483c8112021-06-01 09:24:52 +0100319 auto input = MakeRandomTensor<float>(inputTensorInfo, 111234);
320
321 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
322 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
telsoa014fcda012018-03-09 14:13:49 +0000323
324 constexpr float alpha = 1.f;
325 constexpr float beta = 1.f;
326 constexpr float kappa = 1.f;
327 constexpr uint32_t normSize = 5;
328
Finn Williams826a5432020-08-27 16:15:20 +0100329 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
330 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000331
332 armnn::NormalizationQueueDescriptor data;
333 armnn::WorkloadInfo info;
334 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
335 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
336 data.m_Parameters.m_NormChannelType = normChannel;
337 data.m_Parameters.m_NormMethodType = normMethod;
338 data.m_Parameters.m_NormSize = normSize;
339 data.m_Parameters.m_Alpha = alpha;
340 data.m_Parameters.m_Beta = beta;
341 data.m_Parameters.m_K = kappa;
342
Finn Williams826a5432020-08-27 16:15:20 +0100343 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
344 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000345
346 armnn::NormalizationQueueDescriptor refData = data;
347 armnn::WorkloadInfo refInfo = info;
348 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
349 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
350
351 // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100352 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000353 const size_t reasonIfUnsupportedMaxLen = 255;
354 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
Sadik Armagan483c8112021-06-01 09:24:52 +0100355 ret.m_Supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
356 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
357 if (!ret.m_Supported)
telsoa014fcda012018-03-09 14:13:49 +0000358 {
359 return ret;
360 }
361
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000362 std::unique_ptr<armnn::IWorkload> workload
363 = workloadFactory.CreateWorkload(armnn::LayerType::Normalization, data, info);
364 std::unique_ptr<armnn::IWorkload> workloadRef
365 = refWorkloadFactory.CreateWorkload(armnn::LayerType::Normalization, refData, refInfo);
telsoa014fcda012018-03-09 14:13:49 +0000366
367 outputHandleRef->Allocate();
368 inputHandleRef->Allocate();
369
370 inputHandle->Allocate();
371 outputHandle->Allocate();
372
Sadik Armagan483c8112021-06-01 09:24:52 +0100373 CopyDataToITensorHandle(inputHandle.get(), input.data());
374 CopyDataToITensorHandle(inputHandleRef.get(), input.data());
telsoa014fcda012018-03-09 14:13:49 +0000375
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000376 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tar60578952018-10-31 11:04:01 +0000377
telsoa014fcda012018-03-09 14:13:49 +0000378 workloadRef->Execute();
379
Sadik Armagan483c8112021-06-01 09:24:52 +0100380 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
381 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
382 ret.m_ActualData = actualOutput;
383 ret.m_ExpectedData = expectedOutput;
telsoa014fcda012018-03-09 14:13:49 +0000384
385 return ret;
386}
387
Sadik Armagan483c8112021-06-01 09:24:52 +0100388LayerTestResult<float,4> AcrossChannelNormalizationTestImpl(
389 armnn::IWorkloadFactory& workloadFactory,
390 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
391 const armnn::ITensorHandleFactory& tensorHandleFactory,
392 armnn::NormalizationAlgorithmChannel normChannel,
393 armnn::NormalizationAlgorithmMethod normMethod)
394{
395 const unsigned int inputHeight = 1;
396 const unsigned int inputWidth = 2;
397 const unsigned int inputChannels = 3;
398 const unsigned int inputNum = 2;
399
400 unsigned int outputHeight = inputHeight;
401 unsigned int outputWidth = inputWidth;
402 unsigned int outputChannels = inputChannels;
403 unsigned int outputNum = inputNum;
404
405 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
406 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
407
408 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
409 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
410
411 std::vector<float> input =
412 {
413 // Batch #0
414 -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
415 // Batch #1
416 -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
417 };
418
419 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
420 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
421
422 float alpha = 4.f;
423 float beta = 0.5f;
424 float kappa = 9.f;
425 uint32_t normSize = 5;
426
427 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
428 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
429
430 armnn::NormalizationQueueDescriptor data;
431 armnn::WorkloadInfo info;
432 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
433 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
434 data.m_Parameters.m_NormChannelType = normChannel;
435 data.m_Parameters.m_NormMethodType = normMethod;
436 data.m_Parameters.m_NormSize = normSize;
437 data.m_Parameters.m_Alpha = alpha;
438 data.m_Parameters.m_Beta = beta;
439 data.m_Parameters.m_K = kappa;
440 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
441
442 armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
443 armnn::NormalizationQueueDescriptor refData = data;
444 armnn::WorkloadInfo refInfo = info;
445 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
446
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000447 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
448 data,
449 info);
Sadik Armagan483c8112021-06-01 09:24:52 +0100450
451 inputHandle->Allocate();
452 outputHandle->Allocate();
453
454 CopyDataToITensorHandle(inputHandle.get(), input.data());
455
456 ExecuteWorkload(*workload, memoryManager);
457
458 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
459
460 switch (normMethod)
461 {
462 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
463 {
464 switch (normChannel)
465 {
466 case armnn::NormalizationAlgorithmChannel::Across:
467 {
468 expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f,
469 -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, };
470 break;
471 }
472 default:
473 {
474 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
475 "only Across and Within are supported");
476 }
477 }
478 break;
479 }
480 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
481 default:
482 {
483 throw armnn::UnimplementedException("Unsupported normalisation method type, "
484 "only LocalBrightness is supported");
485 }
486 }
487
488 return LayerTestResult<float, 4>(actualOutput,
489 expectedOutput,
490 outputHandle->GetShape(),
491 outputTensorInfo.GetShape());
492}
493
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100494} // anonymous namespace
495
496LayerTestResult<float,4> SimpleNormalizationAcrossTest(
497 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
499 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100500{
501 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
502 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Finn Williams826a5432020-08-27 16:15:20 +0100503 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100504}
505
506LayerTestResult<float,4> SimpleNormalizationWithinTest(
507 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
509 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100510{
511 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
512 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Finn Williams826a5432020-08-27 16:15:20 +0100513 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100514}
515
516LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
517 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
519 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100520{
521 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
522 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Finn Williams826a5432020-08-27 16:15:20 +0100523 return SimpleNormalizationNhwcTestImpl(
524 workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100525}
526
527LayerTestResult<float,4> CompareNormalizationTest(
528 armnn::IWorkloadFactory& workloadFactory,
529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
530 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100531 const armnn::ITensorHandleFactory& tensorHandleFactory,
532 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100533 armnn::NormalizationAlgorithmChannel normChannel,
534 armnn::NormalizationAlgorithmMethod normMethod)
535{
Finn Williams826a5432020-08-27 16:15:20 +0100536 return CompareNormalizationTestImpl(
537 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
538 normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100539}
Sadik Armagan483c8112021-06-01 09:24:52 +0100540
541LayerTestResult<float,4> AcrossChannelNormalizationTest(
542 armnn::IWorkloadFactory& workloadFactory,
543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
544 const armnn::ITensorHandleFactory& tensorHandleFactory)
545{
546 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
547 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
548 return AcrossChannelNormalizationTestImpl(workloadFactory,
549 memoryManager,
550 tensorHandleFactory,
551 normChannel,
552 normMethod);
553}