blob: e500a126f6359fa589aab3e0851d2f67e0511d5c [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "L2NormalizationTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010
11#include <armnnUtils/TensorUtils.hpp>
12#include <armnnUtils/Permute.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010013
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
19namespace
20{
21
22template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
23LayerTestResult<T, 4> L2NormalizationTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 const armnn::TensorShape& inputOutputTensorShape,
27 float scale,
28 int32_t offset,
29 const std::vector<float>& inputValues,
30 float outScale,
31 int32_t outOffset,
32 const std::vector<float>& expectedOutputValues,
33 const armnn::DataLayout layout,
34 float epsilon = 1e-12f)
35{
Derek Lambertic374ff02019-12-10 21:57:35 +000036 boost::ignore_unused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010037 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
38 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
39
40 // at this point if we require it permute the input data
41 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
42 std::vector<float> inputData = inputValues;
43 if (layout == armnn::DataLayout::NHWC)
44 {
45 std::vector<float> tmp(inputData.size());
46 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
47 inputData = tmp;
48 }
49
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010050 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
51 armnnUtils::QuantizedVector<T>(inputData,
52 inputTensorInfo.GetQuantizationScale(),
53 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010054
55 std::vector<float> expectedOutputData = expectedOutputValues;
56 if (layout == armnn::DataLayout::NHWC)
57 {
58 std::vector<float> tmp(expectedOutputData.size());
59 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
60 sizeof(float));
61 expectedOutputData = tmp;
62 }
63
64 LayerTestResult<T, 4> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010065 result.outputExpected =
66 MakeTensor<T, 4>(outputTensorInfo,
67 armnnUtils::QuantizedVector<T>(expectedOutputData,
68 outputTensorInfo.GetQuantizationScale(),
69 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010070
71 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
72 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
73
74 armnn::L2NormalizationQueueDescriptor descriptor;
75 descriptor.m_Parameters.m_Eps = epsilon;
76 descriptor.m_Parameters.m_DataLayout = layout;
77 armnn::WorkloadInfo info;
78
79 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
80 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
81
82 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
83
84 inputHandle->Allocate();
85 outputHandle->Allocate();
86
87 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
88
89 workload->PostAllocationConfigure();
90 ExecuteWorkload(*workload, memoryManager);
91
92 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
93
94 return result;
95}
96
97float CalcInvL2Norm(std::initializer_list<float> elements)
98{
99 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
100 [](float acc, float element) { return acc + element * element; });
101 return 1.0f / sqrtf(reduction);
102}
103
104template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
105LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
106 armnn::IWorkloadFactory& workloadFactory,
107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
108 float scale,
109 int32_t offset,
110 float outScale,
111 int32_t outOffset,
112 const armnn::DataLayout layout,
113 float epsilon)
114{
115 // Width: 1
116 // Height: 1
117 // Channels: 3
118 // BatchSize: 1
119 unsigned int numberOfBatches = 1;
120 unsigned int numberOfChannels = 3;
121 unsigned int height = 1;
122 unsigned int width = 1;
123
124 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
125 numberOfBatches, numberOfChannels, height, width, layout);
126
127 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
128 std::vector<float> inputValues
129 {
130 // Batch 0, Channel 0, Height (1) x Width (1)
131 0.00000001f,
132
133 // Batch 0, Channel 1, Height (1) x Width (1)
134 0.00000002f,
135
136 // Batch 0, Channel 2, Height (1) x Width (1)
137 0.00000003f,
138 };
139
140 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
141 std::vector<float> expectedOutputValues
142 {
143 // Batch 0, Channel 0, Height (1) x Width (1)
144 0.00000001f * approxInvL2Norm,
145 0.00000002f * approxInvL2Norm,
146 0.00000003f * approxInvL2Norm,
147 };
148
149 return L2NormalizationTestImpl<ArmnnType>(
150 workloadFactory,
151 memoryManager,
152 inputOutputShape,
153 scale,
154 offset,
155 inputValues,
156 outScale,
157 outOffset,
158 expectedOutputValues,
159 layout,
160 epsilon);
161}
162
163
164template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
165LayerTestResult<T, 4> L2Normalization1dTestCommon(
166 armnn::IWorkloadFactory& workloadFactory,
167 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
168 float scale,
169 int32_t offset,
170 float outScale,
171 int32_t outOffset,
172 const armnn::DataLayout layout)
173{
174 // Width: 1
175 // Height: 1
176 // Channels: 10
177 // BatchSize: 1
178 unsigned int numberOfBatches = 1;
179 unsigned int numberOfChannels = 10;
180 unsigned int height = 1;
181 unsigned int width = 1;
182
183
184 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
185 numberOfBatches, numberOfChannels, height, width, layout);
186 std::vector<float> inputValues
187 {
188 // Batch 0, Channel 0, Height (1) x Width (1)
189 1.0f,
190
191 // Batch 0, Channel 1, Height (1) x Width (1)
192 2.0f,
193
194 // Batch 0, Channel 2, Height (1) x Width (1)
195 3.0f,
196
197 // Batch 0, Channel 3, Height (1) x Width (1)
198 4.0f,
199
200 // Batch 0, Channel 4, Height (1) x Width (1)
201 5.0f,
202
203 // Batch 0, Channel 5, Height (1) x Width (1)
204 6.0f,
205
206 // Batch 0, Channel 6, Height (1) x Width (1)
207 7.0f,
208
209 // Batch 0, Channel 7, Height (1) x Width (1)
210 8.0f,
211
212 // Batch 0, Channel 8, Height (1) x Width (1)
213 9.0f,
214
215 // Batch 0, Channel 9, Height (1) x Width (1)
216 10.0f
217 };
218 const float approxInvL2Norm = 0.050964719f;
219 std::vector<float> expectedOutputValues
220 {
221 // Batch 0, Channel 0, Height (1) x Width (1)
222 1.0f * approxInvL2Norm,
223 2.0f * approxInvL2Norm,
224 3.0f * approxInvL2Norm,
225 4.0f * approxInvL2Norm,
226 5.0f * approxInvL2Norm,
227 6.0f * approxInvL2Norm,
228 7.0f * approxInvL2Norm,
229 8.0f * approxInvL2Norm,
230 9.0f * approxInvL2Norm,
231 10.0f * approxInvL2Norm
232 };
233
234
235 return L2NormalizationTestImpl<ArmnnType>(
236 workloadFactory,
237 memoryManager,
238 inputOutputShape,
239 scale,
240 offset,
241 inputValues,
242 outScale,
243 outOffset,
244 expectedOutputValues,
245 layout);
246}
247
248template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
249LayerTestResult<T, 4> L2Normalization2dTestCommon(
250 armnn::IWorkloadFactory& workloadFactory,
251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
252 float scale,
253 int32_t offset,
254 float outScale,
255 int32_t outOffset,
256 const armnn::DataLayout layout)
257{
258 // Width: 5
259 // Height: 1
260 // Channels: 2
261 // BatchSize: 1
262 unsigned int numberOfBatches = 1;
263 unsigned int numberOfChannels = 2;
264 unsigned int height = 1;
265 unsigned int width = 5;
266
267 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
268 numberOfBatches, numberOfChannels, height, width, layout);
269 std::vector<float> inputValues
270 {
271 // Batch 0, Channel 0, Height (1) x Width (5)
272 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
273
274 // Batch 0, Channel 1, Height (1) x Width (5)
275 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
276 };
277 std::vector<float> expectedOutputValues
278 {
279 // Batch 0, Channel 0, Height (1) x Width (5)
280 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
281 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
282 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
283 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
284 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
285
286 // Batch 0, Channel 1, Height (1) x Width (5)
287 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
288 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
289 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
290 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
291 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
292 };
293
294 return L2NormalizationTestImpl<ArmnnType>(
295 workloadFactory,
296 memoryManager,
297 inputOutputShape,
298 scale,
299 offset,
300 inputValues,
301 outScale,
302 outOffset,
303 expectedOutputValues,
304 layout);
305}
306
307template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
308LayerTestResult<T, 4> L2Normalization3dTestCommon(
309 armnn::IWorkloadFactory& workloadFactory,
310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
311 float scale,
312 int32_t offset,
313 float outScale,
314 int32_t outOffset,
315 const armnn::DataLayout layout)
316{
317 // Width: 3
318 // Height: 4
319 // Channels: 2
320 // BatchSize: 1
321 unsigned int numberOfBatches = 1;
322 unsigned int numberOfChannels = 2;
323 unsigned int height = 4;
324 unsigned int width = 3;
325
326 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
327 numberOfBatches, numberOfChannels, height, width, layout);
328 std::vector<float> inputValues
329 {
330 // Batch 0, Channel 0, Height (4) x Width (3)
331 119.0f, 21.0f, 150.0f,
332 149.0f, 32.0f, 179.0f,
333 15.0f, 227.0f, 141.0f,
334 147.0f, 199.0f, 220.0f,
335
336 // Batch 0, Channel 1, Height (4) x Width (3)
337 110.0f, 140.0f, 73.0f,
338 211.0f, 212.0f, 89.0f,
339 24.0f, 138.0f, 188.0f,
340 162.0f, 12.0f, 161.0f
341 };
342 std::vector<float> expectedOutputValues
343 {
344 // Batch 0, Channel 0, Height (4) x Width (3)
345 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
346 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
347 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
348 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
349 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
350 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
351 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
352 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
353 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
354 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
355 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
356 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
357
358 // Batch 0, Channel 1, Height (4) x Width (3)
359 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
360 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
361 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
362 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
363 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
364 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
365 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
366 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
367 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
368 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
369 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
370 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
371 };
372
373 return L2NormalizationTestImpl<ArmnnType>(
374 workloadFactory,
375 memoryManager,
376 inputOutputShape,
377 scale,
378 offset,
379 inputValues,
380 outScale,
381 outOffset,
382 expectedOutputValues,
383 layout);
384}
385
386template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
387LayerTestResult<T, 4> L2Normalization4dTestCommon(
388 armnn::IWorkloadFactory& workloadFactory,
389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
390 float scale,
391 int32_t offset,
392 float outScale,
393 int32_t outOffset,
394 const armnn::DataLayout layout)
395{
396 // Width: 3
397 // Height: 4
398 // Channels: 3
399 // BatchSize: 2
400 unsigned int numberOfBatches = 2;
401 unsigned int numberOfChannels = 3;
402 unsigned int height = 4;
403 unsigned int width = 3;
404
405 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
406 numberOfBatches, numberOfChannels, height, width, layout);
407 std::vector<float> inputValues
408 {
409 // Batch 0, Channel 0, Height (4) x Width (3)
410 235.0f, 46.0f, 178.0f,
411 100.0f, 123.0f, 19.0f,
412 172.0f, 74.0f, 250.0f,
413 6.0f, 195.0f, 80.0f,
414
415 // Batch 0, Channel 1, Height (4) x Width (3)
416 113.0f, 95.0f, 202.0f,
417 77.0f, 114.0f, 71.0f,
418 122.0f, 246.0f, 166.0f,
419 82.0f, 28.0f, 37.0f,
420
421 // Batch 0, Channel 2, Height (4) x Width (3)
422 56.0f, 170.0f, 162.0f,
423 194.0f, 89.0f, 254.0f,
424 12.0f, 209.0f, 200.0f,
425 1.0f, 64.0f, 54.0f,
426
427 // Batch 1, Channel 0, Height (4) x Width (3)
428 67.0f, 90.0f, 49.0f,
429 7.0f, 163.0f, 18.0f,
430 25.0f, 117.0f, 103.0f,
431 247.0f, 59.0f, 189.0f,
432
433 // Batch 1, Channel 1, Height (4) x Width (3)
434 239.0f, 104.0f, 199.0f,
435 17.0f, 124.0f, 153.0f,
436 222.0f, 217.0f, 75.0f,
437 32.0f, 126.0f, 21.0f,
438
439 // Batch 1, Channel 2, Height (4) x Width (3)
440 97.0f, 145.0f, 215.0f,
441 115.0f, 116.0f, 238.0f,
442 226.0f, 16.0f, 132.0f,
443 92.0f, 125.0f, 88.0f
444 };
445 std::vector<float> expectedOutputValues
446 {
447 // Batch 0, Channel 0, Height (4) x Width (3)
448 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
449 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
450 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
451 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
452 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
453 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
454 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
455 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
456 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
457 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
458 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
459 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
460
461 // Batch 0, Channel 1, Height (4) x Width (3)
462 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
463 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
464 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
465 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
466 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
467 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
468 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
469 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
470 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
471 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
472 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
473 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
474
475 // Batch 0, Channel 2, Height (4) x Width (3)
476 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
477 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
478 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
479 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
480 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
481 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
482 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
483 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
484 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
485 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
486 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
487 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
488
489 // Batch 1, Channel 0, Height (4) x Width (3)
490 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
491 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
492 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
493 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
494 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
495 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
496 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
497 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
498 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
499 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
500 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
501 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
502
503 // Batch 1, Channel 1, Height (4) x Width (3)
504 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
505 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
506 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
507 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
508 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
509 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
510 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
511 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
512 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
513 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
514 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
515 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
516
517 // Batch 1, Channel 2, Height (4) x Width (3)
518 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
519 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
520 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
521 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
522 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
523 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
524 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
525 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
526 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
527 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
528 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
529 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
530 };
531
532 return L2NormalizationTestImpl<ArmnnType>(
533 workloadFactory,
534 memoryManager,
535 inputOutputShape,
536 scale,
537 offset,
538 inputValues,
539 outScale,
540 outOffset,
541 expectedOutputValues,
542 layout);
543}
544
545} // anonymous namespace
546
547LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
548 armnn::IWorkloadFactory& workloadFactory,
549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
550 const armnn::DataLayout layout)
551{
552 // Dummy descriptor to get the default value of epsilon.
553 armnn::L2NormalizationDescriptor descriptor;
554
555 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
556 workloadFactory,
557 memoryManager,
558 0.f,
559 0,
560 0.f,
561 0,
562 layout,
563 descriptor.m_Eps);
564}
565
566LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
567 armnn::IWorkloadFactory& workloadFactory,
568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
569 const armnn::DataLayout layout)
570{
571 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
572 workloadFactory,
573 memoryManager,
574 0.f,
575 0,
576 0.f,
577 0,
578 layout,
579 1e-9f);
580}
581
582LayerTestResult<float, 4> L2Normalization1dTest(
583 armnn::IWorkloadFactory& workloadFactory,
584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
585 const armnn::DataLayout layout)
586{
587 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
588 workloadFactory,
589 memoryManager,
590 0.f,
591 0,
592 0.f,
593 0,
594 layout);
595}
596
597LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
598 armnn::IWorkloadFactory& workloadFactory,
599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
600 const armnn::DataLayout layout)
601{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000602 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100603 workloadFactory,
604 memoryManager,
605 1.f,
606 0,
607 1.f,
608 0,
609 layout);
610}
611
612LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
613 armnn::IWorkloadFactory& workloadFactory,
614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
615 const armnn::DataLayout layout)
616{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000617 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100618 workloadFactory,
619 memoryManager,
620 1.f,
621 0,
622 1.f / 128,
623 128,
624 layout);
625}
626
627LayerTestResult<float, 4> L2Normalization2dTest(
628 armnn::IWorkloadFactory& workloadFactory,
629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
630 const armnn::DataLayout layout)
631{
632 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
633 workloadFactory,
634 memoryManager,
635 0.f,
636 0,
637 0.f,
638 0,
639 layout);
640}
641
642LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
643 armnn::IWorkloadFactory& workloadFactory,
644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
645 const armnn::DataLayout layout)
646{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000647 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100648 workloadFactory,
649 memoryManager,
650 1.f,
651 0,
652 1.f,
653 0,
654 layout);
655}
656
657LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
658 armnn::IWorkloadFactory& workloadFactory,
659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
660 const armnn::DataLayout layout)
661{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000662 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100663 workloadFactory,
664 memoryManager,
665 1.f,
666 0,
667 1.f / 128,
668 128,
669 layout);
670}
671
672LayerTestResult<float, 2> L2Normalization2dShapeTest(
673 armnn::IWorkloadFactory& workloadFactory,
674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
675{
676 const armnn::DataLayout layout = armnn::DataLayout::NHWC;
677 const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
678
679 std::vector<float> inputData
680 {
681 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
682 };
683 std::vector<float> expectedOutputData
684 {
685 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
686 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
687 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
688 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
689 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
690 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
691 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
692 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
693 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
694 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
695 };
696
697 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
698 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
699
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100700 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100701
702 LayerTestResult<float, 2> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100703 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100704
705 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
706 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
707
708 armnn::L2NormalizationQueueDescriptor descriptor;
709 descriptor.m_Parameters.m_Eps = 1e-12f;
710 descriptor.m_Parameters.m_DataLayout = layout;
711 armnn::WorkloadInfo info;
712
713 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
714 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
715
716 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
717
718 inputHandle->Allocate();
719 outputHandle->Allocate();
720
721 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
722
723 workload->PostAllocationConfigure();
724 ExecuteWorkload(*workload, memoryManager);
725
726 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
727
728 return result;
729}
730
731LayerTestResult<float, 4> L2Normalization3dTest(
732 armnn::IWorkloadFactory& workloadFactory,
733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
734 const armnn::DataLayout layout)
735{
736 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
737 workloadFactory,
738 memoryManager,
739 0.f,
740 0,
741 0.f,
742 0,
743 layout);
744}
745
746LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
747 armnn::IWorkloadFactory& workloadFactory,
748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
749 const armnn::DataLayout layout)
750{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000751 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100752 workloadFactory,
753 memoryManager,
754 1.f,
755 0,
756 1.f,
757 0,
758 layout);
759}
760
761LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
762 armnn::IWorkloadFactory& workloadFactory,
763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
764 const armnn::DataLayout layout)
765{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000766 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100767 workloadFactory,
768 memoryManager,
769 1.f,
770 0,
771 1.f / 128,
772 128,
773 layout);
774}
775
776LayerTestResult<float, 4> L2Normalization4dTest(
777 armnn::IWorkloadFactory& workloadFactory,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
779 const armnn::DataLayout layout)
780{
781 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
782 workloadFactory,
783 memoryManager,
784 0.f,
785 0,
786 0.f,
787 0,
788 layout);
789}
790
791LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
792 armnn::IWorkloadFactory& workloadFactory,
793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
794 const armnn::DataLayout layout)
795{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000796 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100797 workloadFactory,
798 memoryManager,
799 1.f,
800 0,
801 1.f,
802 0,
803 layout);
804}
805
806LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
807 armnn::IWorkloadFactory& workloadFactory,
808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
809 const armnn::DataLayout layout)
810{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000811 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100812 workloadFactory,
813 memoryManager,
814 1.f,
815 0,
816 1.f / 128,
817 128,
818 layout);
819}