blob: 2e8e16f0c26ef4d872713c144e5360a60d8c77ab [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "NormalizationTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
David Beckac42efd2018-09-26 17:41:13 +01008#include <armnn/Exceptions.hpp>
9#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
Matthew Sloyan171214c2020-09-09 09:07:37 +010011#include <armnn/utility/NumericCast.hpp>
12
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000013#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010014
15#include <backendsCommon/test/TensorCopyUtils.hpp>
16#include <backendsCommon/test/WorkloadTestUtils.hpp>
17
18#include <test/TensorHelpers.hpp>
19
20namespace
21{
telsoa014fcda012018-03-09 14:13:49 +000022
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000023LayerTestResult<float,4> SimpleNormalizationTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +010026 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000027 armnn::NormalizationAlgorithmChannel normChannel,
28 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +000029{
Jan Eilers8eb25602020-03-09 12:13:48 +000030 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000031 const unsigned int inputHeight = 2;
32 const unsigned int inputWidth = 2;
33 const unsigned int inputChannels = 1;
34 const unsigned int inputNum = 2;
35
36 unsigned int outputHeight = inputHeight;
37 unsigned int outputWidth = inputWidth;
38 unsigned int outputChannels = inputChannels;
39 unsigned int outputNum = inputNum;
40
41 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
42 unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
43
44 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
45 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
46
47 LayerTestResult<float,4> ret(outputTensorInfo);
48
49 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
50 // Batch #0
51 1.0f, 2.0f,
52 3.0f, 4.0f,
53 // Batch #1
54 5.0f, 6.0f,
55 7.0f, 8.0f
56 }));
57
58 float alpha = 1.f;
59 float beta = 1.f;
60 float kappa = 1.f;
61 uint32_t normSize = 3;
62
Finn Williams826a5432020-08-27 16:15:20 +010063 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
64 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +000065
66 armnn::NormalizationQueueDescriptor data;
67 armnn::WorkloadInfo info;
68 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
69 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
70 data.m_Parameters.m_NormChannelType = normChannel;
71 data.m_Parameters.m_NormMethodType = normMethod;
72 data.m_Parameters.m_NormSize = normSize;
73 data.m_Parameters.m_Alpha = alpha;
74 data.m_Parameters.m_Beta = beta;
75 data.m_Parameters.m_K = kappa;
narpra0155a97bc2018-10-02 14:35:53 +010076 data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
telsoa014fcda012018-03-09 14:13:49 +000077
78 armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
79 armnn::NormalizationQueueDescriptor refData = data;
80 armnn::WorkloadInfo refInfo = info;
81 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
82
83 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
84
85 inputHandle->Allocate();
86 outputHandle->Allocate();
87
88 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
89
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000090 ExecuteWorkload(*workload, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000091
92 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
93
94 switch (normMethod)
95 {
96 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
97 {
98 switch (normChannel)
99 {
100 case armnn::NormalizationAlgorithmChannel::Within:
101 {
102 // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
103 // Therefore, all output values should equal the inputs, but divided by:
104 // pow((kappa + (accumulatedScale * alpha)), beta)
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // ...where accumulatedScale is the sum of every element squared.
telsoa014fcda012018-03-09 14:13:49 +0000106 float divisor[inputNum];
Matthew Sloyan171214c2020-09-09 09:07:37 +0100107 for(int i = 0; i < armnn::numeric_cast<int>(inputNum); i++)
telsoa014fcda012018-03-09 14:13:49 +0000108 {
109 float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
110 input[i][0][0][1]*input[i][0][0][1] +
111 input[i][0][1][0]*input[i][0][1][0] +
112 input[i][0][1][1]*input[i][0][1][1];
113 divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
114 }
115 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
116 std::vector<float>({input[0][0][0][0]/divisor[0],
117 input[0][0][0][1]/divisor[0],
118 input[0][0][1][0]/divisor[0],
119 input[0][0][1][1]/divisor[0],
120 input[1][0][0][0]/divisor[1],
121 input[1][0][0][1]/divisor[1],
122 input[1][0][1][0]/divisor[1],
123 input[1][0][1][1]/divisor[1]}));
124 break;
125 }
126 case armnn::NormalizationAlgorithmChannel::Across:
127 {
128 // When normalising across channels, all output values should equal the inputs, but multiplied by:
129 // pow((kappa + (accumulatedScale * alpha)), -beta)
130 // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
131 // ...where adjacent channels means within half the normSize for the channel
132 // The test data has only one channel, so this is simplified below.
133 std::vector<float> outputVector;
Matthew Sloyan171214c2020-09-09 09:07:37 +0100134 for (int n = 0; n < armnn::numeric_cast<int>(inputNum); ++n)
telsoa014fcda012018-03-09 14:13:49 +0000135 {
Matthew Sloyan171214c2020-09-09 09:07:37 +0100136 for (int h = 0; h < armnn::numeric_cast<int>(inputHeight); ++h)
telsoa014fcda012018-03-09 14:13:49 +0000137 {
Matthew Sloyan171214c2020-09-09 09:07:37 +0100138 for (int w = 0; w < armnn::numeric_cast<int>(inputWidth); ++w)
telsoa014fcda012018-03-09 14:13:49 +0000139 {
140 float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
141 float scale = powf((kappa + accumulatedScale * alpha), -beta);
142 outputVector.push_back(input[n][0][h][w] * scale);
143 }
144 }
145 }
146 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
147 break;
148 }
149 default:
150 {
151 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
152 "only Across and Within are supported");
153 }
154 }
155 break;
156 }
telsoa01c577f2c2018-08-31 09:22:23 +0100157 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
telsoa014fcda012018-03-09 14:13:49 +0000158 default:
159 {
160 throw armnn::UnimplementedException("Unsupported normalisation method type, "
161 "only LocalBrightness is supported");
162 }
163 }
164
165 return ret;
166}
167
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000168LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
169 armnn::IWorkloadFactory& workloadFactory,
170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100171 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000172 armnn::NormalizationAlgorithmChannel normChannel,
173 armnn::NormalizationAlgorithmMethod normMethod)
narpra0155a97bc2018-10-02 14:35:53 +0100174{
175 const unsigned int inputHeight = 2;
176 const unsigned int inputWidth = 2;
177 const unsigned int inputChannels = 1;
178 const unsigned int inputNum = 2;
179
180 unsigned int outputHeight = inputHeight;
181 unsigned int outputWidth = inputWidth;
182 unsigned int outputChannels = inputChannels;
183 unsigned int outputNum = inputNum;
184
185 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
186 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
187
188 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
189 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
190
191 LayerTestResult<float,4> ret(outputTensorInfo);
192
193 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
194 // Batch #0
195 1.0f, 2.0f,
196 3.0f, 4.0f,
197 // Batch #1
198 5.0f, 6.0f,
199 7.0f, 8.0f
200 }));
201
202 float alpha = 1.f;
203 float beta = 1.f;
204 float kappa = 1.f;
205 uint32_t normSize = 3;
206
Finn Williams826a5432020-08-27 16:15:20 +0100207 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
208 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
narpra0155a97bc2018-10-02 14:35:53 +0100209
210 armnn::NormalizationQueueDescriptor data;
211 armnn::WorkloadInfo info;
212 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
213 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
214 data.m_Parameters.m_NormChannelType = normChannel;
215 data.m_Parameters.m_NormMethodType = normMethod;
216 data.m_Parameters.m_NormSize = normSize;
217 data.m_Parameters.m_Alpha = alpha;
218 data.m_Parameters.m_Beta = beta;
219 data.m_Parameters.m_K = kappa;
220 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
221
222 armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
223 armnn::NormalizationQueueDescriptor refData = data;
224 armnn::WorkloadInfo refInfo = info;
225 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
226
227 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
228
229 inputHandle->Allocate();
230 outputHandle->Allocate();
231
232 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
233
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000234 ExecuteWorkload(*workload, memoryManager);
narpra0155a97bc2018-10-02 14:35:53 +0100235
236 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
237
238 switch (normMethod)
239 {
240 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
241 {
242 switch (normChannel)
243 {
244 case armnn::NormalizationAlgorithmChannel::Across:
245 {
246 std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
247 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
248 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
249 break;
250 }
251 default:
252 {
253 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
254 "Only Cross-map is supported for NHWC layout");
255 }
256 }
257 break;
258 }
259 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
260 default:
261 {
262 throw armnn::UnimplementedException("Unsupported normalisation method type, "
263 "only LocalBrightness is supported");
264 }
265 }
266
267 return ret;
268}
269
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000270LayerTestResult<float,4> CompareNormalizationTestImpl(
271 armnn::IWorkloadFactory& workloadFactory,
272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
273 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100274 const armnn::ITensorHandleFactory& tensorHandleFactory,
275 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000276 armnn::NormalizationAlgorithmChannel normChannel,
277 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000278{
279 constexpr unsigned int inputNum = 5;
280 constexpr unsigned int inputChannels = 3;
281 constexpr unsigned int inputHeight = 32;
282 constexpr unsigned int inputWidth = 24;
283
284 constexpr unsigned int outputNum = inputNum;
285 constexpr unsigned int outputChannels = inputChannels;
286 constexpr unsigned int outputHeight = inputHeight;
287 constexpr unsigned int outputWidth = inputWidth;
288
289 armnn::TensorInfo inputTensorInfo;
290 armnn::TensorInfo outputTensorInfo;
291
292 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
293 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
294
295 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
296 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
297
298 LayerTestResult<float,4> ret(outputTensorInfo);
299
300 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
301
302 constexpr float alpha = 1.f;
303 constexpr float beta = 1.f;
304 constexpr float kappa = 1.f;
305 constexpr uint32_t normSize = 5;
306
Finn Williams826a5432020-08-27 16:15:20 +0100307 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
308 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000309
310 armnn::NormalizationQueueDescriptor data;
311 armnn::WorkloadInfo info;
312 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
313 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
314 data.m_Parameters.m_NormChannelType = normChannel;
315 data.m_Parameters.m_NormMethodType = normMethod;
316 data.m_Parameters.m_NormSize = normSize;
317 data.m_Parameters.m_Alpha = alpha;
318 data.m_Parameters.m_Beta = beta;
319 data.m_Parameters.m_K = kappa;
320
Finn Williams826a5432020-08-27 16:15:20 +0100321 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
322 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000323
324 armnn::NormalizationQueueDescriptor refData = data;
325 armnn::WorkloadInfo refInfo = info;
326 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
327 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
328
329 // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100330 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000331 const size_t reasonIfUnsupportedMaxLen = 255;
332 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100333 ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
telsoa014fcda012018-03-09 14:13:49 +0000334 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
335 if (!ret.supported)
336 {
337 return ret;
338 }
339
340 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
341 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
342
343 outputHandleRef->Allocate();
344 inputHandleRef->Allocate();
345
346 inputHandle->Allocate();
347 outputHandle->Allocate();
348
349 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
350 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
351
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000352 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tar60578952018-10-31 11:04:01 +0000353
telsoa014fcda012018-03-09 14:13:49 +0000354 workloadRef->Execute();
355
356 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
357 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
358
359 return ret;
360}
361
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100362} // anonymous namespace
363
364LayerTestResult<float,4> SimpleNormalizationAcrossTest(
365 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
367 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100368{
369 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
370 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Finn Williams826a5432020-08-27 16:15:20 +0100371 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100372}
373
374LayerTestResult<float,4> SimpleNormalizationWithinTest(
375 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
377 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100378{
379 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
380 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Finn Williams826a5432020-08-27 16:15:20 +0100381 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100382}
383
384LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
385 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
387 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100388{
389 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
390 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Finn Williams826a5432020-08-27 16:15:20 +0100391 return SimpleNormalizationNhwcTestImpl(
392 workloadFactory, memoryManager, tensorHandleFactory, normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100393}
394
395LayerTestResult<float,4> CompareNormalizationTest(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100399 const armnn::ITensorHandleFactory& tensorHandleFactory,
400 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100401 armnn::NormalizationAlgorithmChannel normChannel,
402 armnn::NormalizationAlgorithmMethod normMethod)
403{
Finn Williams826a5432020-08-27 16:15:20 +0100404 return CompareNormalizationTestImpl(
405 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
406 normChannel, normMethod);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100407}