blob: d67f7b64aadc79189b0e7996edc53cd166e0b206 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "L2NormalizationTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010
11#include <armnnUtils/TensorUtils.hpp>
12#include <armnnUtils/Permute.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010013
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
19namespace
20{
21
22template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
23LayerTestResult<T, 4> L2NormalizationTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 const armnn::TensorShape& inputOutputTensorShape,
27 float scale,
28 int32_t offset,
29 const std::vector<float>& inputValues,
30 float outScale,
31 int32_t outOffset,
32 const std::vector<float>& expectedOutputValues,
33 const armnn::DataLayout layout,
34 float epsilon = 1e-12f)
35{
36 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
37 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
38
39 // at this point if we require it permute the input data
40 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
41 std::vector<float> inputData = inputValues;
42 if (layout == armnn::DataLayout::NHWC)
43 {
44 std::vector<float> tmp(inputData.size());
45 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
46 inputData = tmp;
47 }
48
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010049 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
50 armnnUtils::QuantizedVector<T>(inputData,
51 inputTensorInfo.GetQuantizationScale(),
52 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010053
54 std::vector<float> expectedOutputData = expectedOutputValues;
55 if (layout == armnn::DataLayout::NHWC)
56 {
57 std::vector<float> tmp(expectedOutputData.size());
58 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
59 sizeof(float));
60 expectedOutputData = tmp;
61 }
62
63 LayerTestResult<T, 4> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010064 result.outputExpected =
65 MakeTensor<T, 4>(outputTensorInfo,
66 armnnUtils::QuantizedVector<T>(expectedOutputData,
67 outputTensorInfo.GetQuantizationScale(),
68 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010069
70 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
71 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
72
73 armnn::L2NormalizationQueueDescriptor descriptor;
74 descriptor.m_Parameters.m_Eps = epsilon;
75 descriptor.m_Parameters.m_DataLayout = layout;
76 armnn::WorkloadInfo info;
77
78 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
79 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
80
81 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
82
83 inputHandle->Allocate();
84 outputHandle->Allocate();
85
86 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
87
88 workload->PostAllocationConfigure();
89 ExecuteWorkload(*workload, memoryManager);
90
91 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
92
93 return result;
94}
95
96float CalcInvL2Norm(std::initializer_list<float> elements)
97{
98 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
99 [](float acc, float element) { return acc + element * element; });
100 return 1.0f / sqrtf(reduction);
101}
102
103template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
104LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
105 armnn::IWorkloadFactory& workloadFactory,
106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
107 float scale,
108 int32_t offset,
109 float outScale,
110 int32_t outOffset,
111 const armnn::DataLayout layout,
112 float epsilon)
113{
114 // Width: 1
115 // Height: 1
116 // Channels: 3
117 // BatchSize: 1
118 unsigned int numberOfBatches = 1;
119 unsigned int numberOfChannels = 3;
120 unsigned int height = 1;
121 unsigned int width = 1;
122
123 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
124 numberOfBatches, numberOfChannels, height, width, layout);
125
126 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
127 std::vector<float> inputValues
128 {
129 // Batch 0, Channel 0, Height (1) x Width (1)
130 0.00000001f,
131
132 // Batch 0, Channel 1, Height (1) x Width (1)
133 0.00000002f,
134
135 // Batch 0, Channel 2, Height (1) x Width (1)
136 0.00000003f,
137 };
138
139 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
140 std::vector<float> expectedOutputValues
141 {
142 // Batch 0, Channel 0, Height (1) x Width (1)
143 0.00000001f * approxInvL2Norm,
144 0.00000002f * approxInvL2Norm,
145 0.00000003f * approxInvL2Norm,
146 };
147
148 return L2NormalizationTestImpl<ArmnnType>(
149 workloadFactory,
150 memoryManager,
151 inputOutputShape,
152 scale,
153 offset,
154 inputValues,
155 outScale,
156 outOffset,
157 expectedOutputValues,
158 layout,
159 epsilon);
160}
161
162
163template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
164LayerTestResult<T, 4> L2Normalization1dTestCommon(
165 armnn::IWorkloadFactory& workloadFactory,
166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
167 float scale,
168 int32_t offset,
169 float outScale,
170 int32_t outOffset,
171 const armnn::DataLayout layout)
172{
173 // Width: 1
174 // Height: 1
175 // Channels: 10
176 // BatchSize: 1
177 unsigned int numberOfBatches = 1;
178 unsigned int numberOfChannels = 10;
179 unsigned int height = 1;
180 unsigned int width = 1;
181
182
183 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
184 numberOfBatches, numberOfChannels, height, width, layout);
185 std::vector<float> inputValues
186 {
187 // Batch 0, Channel 0, Height (1) x Width (1)
188 1.0f,
189
190 // Batch 0, Channel 1, Height (1) x Width (1)
191 2.0f,
192
193 // Batch 0, Channel 2, Height (1) x Width (1)
194 3.0f,
195
196 // Batch 0, Channel 3, Height (1) x Width (1)
197 4.0f,
198
199 // Batch 0, Channel 4, Height (1) x Width (1)
200 5.0f,
201
202 // Batch 0, Channel 5, Height (1) x Width (1)
203 6.0f,
204
205 // Batch 0, Channel 6, Height (1) x Width (1)
206 7.0f,
207
208 // Batch 0, Channel 7, Height (1) x Width (1)
209 8.0f,
210
211 // Batch 0, Channel 8, Height (1) x Width (1)
212 9.0f,
213
214 // Batch 0, Channel 9, Height (1) x Width (1)
215 10.0f
216 };
217 const float approxInvL2Norm = 0.050964719f;
218 std::vector<float> expectedOutputValues
219 {
220 // Batch 0, Channel 0, Height (1) x Width (1)
221 1.0f * approxInvL2Norm,
222 2.0f * approxInvL2Norm,
223 3.0f * approxInvL2Norm,
224 4.0f * approxInvL2Norm,
225 5.0f * approxInvL2Norm,
226 6.0f * approxInvL2Norm,
227 7.0f * approxInvL2Norm,
228 8.0f * approxInvL2Norm,
229 9.0f * approxInvL2Norm,
230 10.0f * approxInvL2Norm
231 };
232
233
234 return L2NormalizationTestImpl<ArmnnType>(
235 workloadFactory,
236 memoryManager,
237 inputOutputShape,
238 scale,
239 offset,
240 inputValues,
241 outScale,
242 outOffset,
243 expectedOutputValues,
244 layout);
245}
246
247template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
248LayerTestResult<T, 4> L2Normalization2dTestCommon(
249 armnn::IWorkloadFactory& workloadFactory,
250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
251 float scale,
252 int32_t offset,
253 float outScale,
254 int32_t outOffset,
255 const armnn::DataLayout layout)
256{
257 // Width: 5
258 // Height: 1
259 // Channels: 2
260 // BatchSize: 1
261 unsigned int numberOfBatches = 1;
262 unsigned int numberOfChannels = 2;
263 unsigned int height = 1;
264 unsigned int width = 5;
265
266 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
267 numberOfBatches, numberOfChannels, height, width, layout);
268 std::vector<float> inputValues
269 {
270 // Batch 0, Channel 0, Height (1) x Width (5)
271 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
272
273 // Batch 0, Channel 1, Height (1) x Width (5)
274 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
275 };
276 std::vector<float> expectedOutputValues
277 {
278 // Batch 0, Channel 0, Height (1) x Width (5)
279 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
280 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
281 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
282 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
283 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
284
285 // Batch 0, Channel 1, Height (1) x Width (5)
286 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
287 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
288 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
289 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
290 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
291 };
292
293 return L2NormalizationTestImpl<ArmnnType>(
294 workloadFactory,
295 memoryManager,
296 inputOutputShape,
297 scale,
298 offset,
299 inputValues,
300 outScale,
301 outOffset,
302 expectedOutputValues,
303 layout);
304}
305
306template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
307LayerTestResult<T, 4> L2Normalization3dTestCommon(
308 armnn::IWorkloadFactory& workloadFactory,
309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
310 float scale,
311 int32_t offset,
312 float outScale,
313 int32_t outOffset,
314 const armnn::DataLayout layout)
315{
316 // Width: 3
317 // Height: 4
318 // Channels: 2
319 // BatchSize: 1
320 unsigned int numberOfBatches = 1;
321 unsigned int numberOfChannels = 2;
322 unsigned int height = 4;
323 unsigned int width = 3;
324
325 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
326 numberOfBatches, numberOfChannels, height, width, layout);
327 std::vector<float> inputValues
328 {
329 // Batch 0, Channel 0, Height (4) x Width (3)
330 119.0f, 21.0f, 150.0f,
331 149.0f, 32.0f, 179.0f,
332 15.0f, 227.0f, 141.0f,
333 147.0f, 199.0f, 220.0f,
334
335 // Batch 0, Channel 1, Height (4) x Width (3)
336 110.0f, 140.0f, 73.0f,
337 211.0f, 212.0f, 89.0f,
338 24.0f, 138.0f, 188.0f,
339 162.0f, 12.0f, 161.0f
340 };
341 std::vector<float> expectedOutputValues
342 {
343 // Batch 0, Channel 0, Height (4) x Width (3)
344 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
345 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
346 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
347 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
348 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
349 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
350 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
351 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
352 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
353 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
354 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
355 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
356
357 // Batch 0, Channel 1, Height (4) x Width (3)
358 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
359 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
360 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
361 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
362 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
363 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
364 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
365 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
366 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
367 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
368 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
369 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
370 };
371
372 return L2NormalizationTestImpl<ArmnnType>(
373 workloadFactory,
374 memoryManager,
375 inputOutputShape,
376 scale,
377 offset,
378 inputValues,
379 outScale,
380 outOffset,
381 expectedOutputValues,
382 layout);
383}
384
385template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
386LayerTestResult<T, 4> L2Normalization4dTestCommon(
387 armnn::IWorkloadFactory& workloadFactory,
388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
389 float scale,
390 int32_t offset,
391 float outScale,
392 int32_t outOffset,
393 const armnn::DataLayout layout)
394{
395 // Width: 3
396 // Height: 4
397 // Channels: 3
398 // BatchSize: 2
399 unsigned int numberOfBatches = 2;
400 unsigned int numberOfChannels = 3;
401 unsigned int height = 4;
402 unsigned int width = 3;
403
404 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
405 numberOfBatches, numberOfChannels, height, width, layout);
406 std::vector<float> inputValues
407 {
408 // Batch 0, Channel 0, Height (4) x Width (3)
409 235.0f, 46.0f, 178.0f,
410 100.0f, 123.0f, 19.0f,
411 172.0f, 74.0f, 250.0f,
412 6.0f, 195.0f, 80.0f,
413
414 // Batch 0, Channel 1, Height (4) x Width (3)
415 113.0f, 95.0f, 202.0f,
416 77.0f, 114.0f, 71.0f,
417 122.0f, 246.0f, 166.0f,
418 82.0f, 28.0f, 37.0f,
419
420 // Batch 0, Channel 2, Height (4) x Width (3)
421 56.0f, 170.0f, 162.0f,
422 194.0f, 89.0f, 254.0f,
423 12.0f, 209.0f, 200.0f,
424 1.0f, 64.0f, 54.0f,
425
426 // Batch 1, Channel 0, Height (4) x Width (3)
427 67.0f, 90.0f, 49.0f,
428 7.0f, 163.0f, 18.0f,
429 25.0f, 117.0f, 103.0f,
430 247.0f, 59.0f, 189.0f,
431
432 // Batch 1, Channel 1, Height (4) x Width (3)
433 239.0f, 104.0f, 199.0f,
434 17.0f, 124.0f, 153.0f,
435 222.0f, 217.0f, 75.0f,
436 32.0f, 126.0f, 21.0f,
437
438 // Batch 1, Channel 2, Height (4) x Width (3)
439 97.0f, 145.0f, 215.0f,
440 115.0f, 116.0f, 238.0f,
441 226.0f, 16.0f, 132.0f,
442 92.0f, 125.0f, 88.0f
443 };
444 std::vector<float> expectedOutputValues
445 {
446 // Batch 0, Channel 0, Height (4) x Width (3)
447 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
448 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
449 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
450 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
451 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
452 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
453 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
454 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
455 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
456 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
457 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
458 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
459
460 // Batch 0, Channel 1, Height (4) x Width (3)
461 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
462 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
463 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
464 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
465 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
466 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
467 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
468 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
469 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
470 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
471 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
472 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
473
474 // Batch 0, Channel 2, Height (4) x Width (3)
475 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
476 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
477 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
478 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
479 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
480 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
481 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
482 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
483 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
484 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
485 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
486 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
487
488 // Batch 1, Channel 0, Height (4) x Width (3)
489 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
490 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
491 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
492 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
493 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
494 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
495 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
496 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
497 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
498 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
499 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
500 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
501
502 // Batch 1, Channel 1, Height (4) x Width (3)
503 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
504 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
505 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
506 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
507 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
508 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
509 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
510 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
511 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
512 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
513 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
514 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
515
516 // Batch 1, Channel 2, Height (4) x Width (3)
517 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
518 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
519 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
520 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
521 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
522 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
523 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
524 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
525 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
526 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
527 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
528 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
529 };
530
531 return L2NormalizationTestImpl<ArmnnType>(
532 workloadFactory,
533 memoryManager,
534 inputOutputShape,
535 scale,
536 offset,
537 inputValues,
538 outScale,
539 outOffset,
540 expectedOutputValues,
541 layout);
542}
543
544} // anonymous namespace
545
546LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
547 armnn::IWorkloadFactory& workloadFactory,
548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
549 const armnn::DataLayout layout)
550{
551 // Dummy descriptor to get the default value of epsilon.
552 armnn::L2NormalizationDescriptor descriptor;
553
554 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
555 workloadFactory,
556 memoryManager,
557 0.f,
558 0,
559 0.f,
560 0,
561 layout,
562 descriptor.m_Eps);
563}
564
565LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
566 armnn::IWorkloadFactory& workloadFactory,
567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
568 const armnn::DataLayout layout)
569{
570 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
571 workloadFactory,
572 memoryManager,
573 0.f,
574 0,
575 0.f,
576 0,
577 layout,
578 1e-9f);
579}
580
581LayerTestResult<float, 4> L2Normalization1dTest(
582 armnn::IWorkloadFactory& workloadFactory,
583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
584 const armnn::DataLayout layout)
585{
586 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
587 workloadFactory,
588 memoryManager,
589 0.f,
590 0,
591 0.f,
592 0,
593 layout);
594}
595
596LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
597 armnn::IWorkloadFactory& workloadFactory,
598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
599 const armnn::DataLayout layout)
600{
601 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
602 workloadFactory,
603 memoryManager,
604 1.f,
605 0,
606 1.f,
607 0,
608 layout);
609}
610
611LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
612 armnn::IWorkloadFactory& workloadFactory,
613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
614 const armnn::DataLayout layout)
615{
616 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
617 workloadFactory,
618 memoryManager,
619 1.f,
620 0,
621 1.f / 128,
622 128,
623 layout);
624}
625
626LayerTestResult<float, 4> L2Normalization2dTest(
627 armnn::IWorkloadFactory& workloadFactory,
628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
629 const armnn::DataLayout layout)
630{
631 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
632 workloadFactory,
633 memoryManager,
634 0.f,
635 0,
636 0.f,
637 0,
638 layout);
639}
640
641LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
642 armnn::IWorkloadFactory& workloadFactory,
643 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
644 const armnn::DataLayout layout)
645{
646 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
647 workloadFactory,
648 memoryManager,
649 1.f,
650 0,
651 1.f,
652 0,
653 layout);
654}
655
656LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
657 armnn::IWorkloadFactory& workloadFactory,
658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
659 const armnn::DataLayout layout)
660{
661 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
662 workloadFactory,
663 memoryManager,
664 1.f,
665 0,
666 1.f / 128,
667 128,
668 layout);
669}
670
671LayerTestResult<float, 2> L2Normalization2dShapeTest(
672 armnn::IWorkloadFactory& workloadFactory,
673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
674{
675 const armnn::DataLayout layout = armnn::DataLayout::NHWC;
676 const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
677
678 std::vector<float> inputData
679 {
680 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
681 };
682 std::vector<float> expectedOutputData
683 {
684 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
685 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
686 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
687 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
688 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
689 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
690 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
691 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
692 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
693 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
694 };
695
696 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
697 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
698
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100699 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100700
701 LayerTestResult<float, 2> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100702 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100703
704 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
705 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
706
707 armnn::L2NormalizationQueueDescriptor descriptor;
708 descriptor.m_Parameters.m_Eps = 1e-12f;
709 descriptor.m_Parameters.m_DataLayout = layout;
710 armnn::WorkloadInfo info;
711
712 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
713 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
714
715 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
716
717 inputHandle->Allocate();
718 outputHandle->Allocate();
719
720 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
721
722 workload->PostAllocationConfigure();
723 ExecuteWorkload(*workload, memoryManager);
724
725 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
726
727 return result;
728}
729
730LayerTestResult<float, 4> L2Normalization3dTest(
731 armnn::IWorkloadFactory& workloadFactory,
732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
733 const armnn::DataLayout layout)
734{
735 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
736 workloadFactory,
737 memoryManager,
738 0.f,
739 0,
740 0.f,
741 0,
742 layout);
743}
744
745LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
746 armnn::IWorkloadFactory& workloadFactory,
747 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
748 const armnn::DataLayout layout)
749{
750 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
751 workloadFactory,
752 memoryManager,
753 1.f,
754 0,
755 1.f,
756 0,
757 layout);
758}
759
760LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
761 armnn::IWorkloadFactory& workloadFactory,
762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
763 const armnn::DataLayout layout)
764{
765 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
766 workloadFactory,
767 memoryManager,
768 1.f,
769 0,
770 1.f / 128,
771 128,
772 layout);
773}
774
775LayerTestResult<float, 4> L2Normalization4dTest(
776 armnn::IWorkloadFactory& workloadFactory,
777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
778 const armnn::DataLayout layout)
779{
780 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
781 workloadFactory,
782 memoryManager,
783 0.f,
784 0,
785 0.f,
786 0,
787 layout);
788}
789
790LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
791 armnn::IWorkloadFactory& workloadFactory,
792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
793 const armnn::DataLayout layout)
794{
795 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
796 workloadFactory,
797 memoryManager,
798 1.f,
799 0,
800 1.f,
801 0,
802 layout);
803}
804
805LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
806 armnn::IWorkloadFactory& workloadFactory,
807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
808 const armnn::DataLayout layout)
809{
810 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
811 workloadFactory,
812 memoryManager,
813 1.f,
814 0,
815 1.f / 128,
816 128,
817 layout);
818}