blob: 569f5af2276c26af5ab59ec1485383536b1ec7cc [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "L2NormalizationTestImpl.hpp"
7
8#include <Permute.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010010#include <ResolveType.hpp>
11#include <TensorUtils.hpp>
12
13#include <backendsCommon/test/TensorCopyUtils.hpp>
14#include <backendsCommon/test/WorkloadTestUtils.hpp>
15
16#include <test/TensorHelpers.hpp>
17
18namespace
19{
20
21template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
22LayerTestResult<T, 4> L2NormalizationTestImpl(
23 armnn::IWorkloadFactory& workloadFactory,
24 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
25 const armnn::TensorShape& inputOutputTensorShape,
26 float scale,
27 int32_t offset,
28 const std::vector<float>& inputValues,
29 float outScale,
30 int32_t outOffset,
31 const std::vector<float>& expectedOutputValues,
32 const armnn::DataLayout layout,
33 float epsilon = 1e-12f)
34{
35 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
36 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
37
38 // at this point if we require it permute the input data
39 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
40 std::vector<float> inputData = inputValues;
41 if (layout == armnn::DataLayout::NHWC)
42 {
43 std::vector<float> tmp(inputData.size());
44 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
45 inputData = tmp;
46 }
47
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010048 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
49 armnnUtils::QuantizedVector<T>(inputData,
50 inputTensorInfo.GetQuantizationScale(),
51 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010052
53 std::vector<float> expectedOutputData = expectedOutputValues;
54 if (layout == armnn::DataLayout::NHWC)
55 {
56 std::vector<float> tmp(expectedOutputData.size());
57 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
58 sizeof(float));
59 expectedOutputData = tmp;
60 }
61
62 LayerTestResult<T, 4> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010063 result.outputExpected =
64 MakeTensor<T, 4>(outputTensorInfo,
65 armnnUtils::QuantizedVector<T>(expectedOutputData,
66 outputTensorInfo.GetQuantizationScale(),
67 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010068
69 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
70 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
71
72 armnn::L2NormalizationQueueDescriptor descriptor;
73 descriptor.m_Parameters.m_Eps = epsilon;
74 descriptor.m_Parameters.m_DataLayout = layout;
75 armnn::WorkloadInfo info;
76
77 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
78 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
79
80 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
81
82 inputHandle->Allocate();
83 outputHandle->Allocate();
84
85 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
86
87 workload->PostAllocationConfigure();
88 ExecuteWorkload(*workload, memoryManager);
89
90 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
91
92 return result;
93}
94
95float CalcInvL2Norm(std::initializer_list<float> elements)
96{
97 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
98 [](float acc, float element) { return acc + element * element; });
99 return 1.0f / sqrtf(reduction);
100}
101
102template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
103LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
104 armnn::IWorkloadFactory& workloadFactory,
105 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
106 float scale,
107 int32_t offset,
108 float outScale,
109 int32_t outOffset,
110 const armnn::DataLayout layout,
111 float epsilon)
112{
113 // Width: 1
114 // Height: 1
115 // Channels: 3
116 // BatchSize: 1
117 unsigned int numberOfBatches = 1;
118 unsigned int numberOfChannels = 3;
119 unsigned int height = 1;
120 unsigned int width = 1;
121
122 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
123 numberOfBatches, numberOfChannels, height, width, layout);
124
125 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
126 std::vector<float> inputValues
127 {
128 // Batch 0, Channel 0, Height (1) x Width (1)
129 0.00000001f,
130
131 // Batch 0, Channel 1, Height (1) x Width (1)
132 0.00000002f,
133
134 // Batch 0, Channel 2, Height (1) x Width (1)
135 0.00000003f,
136 };
137
138 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
139 std::vector<float> expectedOutputValues
140 {
141 // Batch 0, Channel 0, Height (1) x Width (1)
142 0.00000001f * approxInvL2Norm,
143 0.00000002f * approxInvL2Norm,
144 0.00000003f * approxInvL2Norm,
145 };
146
147 return L2NormalizationTestImpl<ArmnnType>(
148 workloadFactory,
149 memoryManager,
150 inputOutputShape,
151 scale,
152 offset,
153 inputValues,
154 outScale,
155 outOffset,
156 expectedOutputValues,
157 layout,
158 epsilon);
159}
160
161
162template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
163LayerTestResult<T, 4> L2Normalization1dTestCommon(
164 armnn::IWorkloadFactory& workloadFactory,
165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
166 float scale,
167 int32_t offset,
168 float outScale,
169 int32_t outOffset,
170 const armnn::DataLayout layout)
171{
172 // Width: 1
173 // Height: 1
174 // Channels: 10
175 // BatchSize: 1
176 unsigned int numberOfBatches = 1;
177 unsigned int numberOfChannels = 10;
178 unsigned int height = 1;
179 unsigned int width = 1;
180
181
182 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
183 numberOfBatches, numberOfChannels, height, width, layout);
184 std::vector<float> inputValues
185 {
186 // Batch 0, Channel 0, Height (1) x Width (1)
187 1.0f,
188
189 // Batch 0, Channel 1, Height (1) x Width (1)
190 2.0f,
191
192 // Batch 0, Channel 2, Height (1) x Width (1)
193 3.0f,
194
195 // Batch 0, Channel 3, Height (1) x Width (1)
196 4.0f,
197
198 // Batch 0, Channel 4, Height (1) x Width (1)
199 5.0f,
200
201 // Batch 0, Channel 5, Height (1) x Width (1)
202 6.0f,
203
204 // Batch 0, Channel 6, Height (1) x Width (1)
205 7.0f,
206
207 // Batch 0, Channel 7, Height (1) x Width (1)
208 8.0f,
209
210 // Batch 0, Channel 8, Height (1) x Width (1)
211 9.0f,
212
213 // Batch 0, Channel 9, Height (1) x Width (1)
214 10.0f
215 };
216 const float approxInvL2Norm = 0.050964719f;
217 std::vector<float> expectedOutputValues
218 {
219 // Batch 0, Channel 0, Height (1) x Width (1)
220 1.0f * approxInvL2Norm,
221 2.0f * approxInvL2Norm,
222 3.0f * approxInvL2Norm,
223 4.0f * approxInvL2Norm,
224 5.0f * approxInvL2Norm,
225 6.0f * approxInvL2Norm,
226 7.0f * approxInvL2Norm,
227 8.0f * approxInvL2Norm,
228 9.0f * approxInvL2Norm,
229 10.0f * approxInvL2Norm
230 };
231
232
233 return L2NormalizationTestImpl<ArmnnType>(
234 workloadFactory,
235 memoryManager,
236 inputOutputShape,
237 scale,
238 offset,
239 inputValues,
240 outScale,
241 outOffset,
242 expectedOutputValues,
243 layout);
244}
245
246template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
247LayerTestResult<T, 4> L2Normalization2dTestCommon(
248 armnn::IWorkloadFactory& workloadFactory,
249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
250 float scale,
251 int32_t offset,
252 float outScale,
253 int32_t outOffset,
254 const armnn::DataLayout layout)
255{
256 // Width: 5
257 // Height: 1
258 // Channels: 2
259 // BatchSize: 1
260 unsigned int numberOfBatches = 1;
261 unsigned int numberOfChannels = 2;
262 unsigned int height = 1;
263 unsigned int width = 5;
264
265 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
266 numberOfBatches, numberOfChannels, height, width, layout);
267 std::vector<float> inputValues
268 {
269 // Batch 0, Channel 0, Height (1) x Width (5)
270 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
271
272 // Batch 0, Channel 1, Height (1) x Width (5)
273 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
274 };
275 std::vector<float> expectedOutputValues
276 {
277 // Batch 0, Channel 0, Height (1) x Width (5)
278 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
279 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
280 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
281 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
282 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
283
284 // Batch 0, Channel 1, Height (1) x Width (5)
285 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
286 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
287 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
288 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
289 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
290 };
291
292 return L2NormalizationTestImpl<ArmnnType>(
293 workloadFactory,
294 memoryManager,
295 inputOutputShape,
296 scale,
297 offset,
298 inputValues,
299 outScale,
300 outOffset,
301 expectedOutputValues,
302 layout);
303}
304
305template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
306LayerTestResult<T, 4> L2Normalization3dTestCommon(
307 armnn::IWorkloadFactory& workloadFactory,
308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
309 float scale,
310 int32_t offset,
311 float outScale,
312 int32_t outOffset,
313 const armnn::DataLayout layout)
314{
315 // Width: 3
316 // Height: 4
317 // Channels: 2
318 // BatchSize: 1
319 unsigned int numberOfBatches = 1;
320 unsigned int numberOfChannels = 2;
321 unsigned int height = 4;
322 unsigned int width = 3;
323
324 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
325 numberOfBatches, numberOfChannels, height, width, layout);
326 std::vector<float> inputValues
327 {
328 // Batch 0, Channel 0, Height (4) x Width (3)
329 119.0f, 21.0f, 150.0f,
330 149.0f, 32.0f, 179.0f,
331 15.0f, 227.0f, 141.0f,
332 147.0f, 199.0f, 220.0f,
333
334 // Batch 0, Channel 1, Height (4) x Width (3)
335 110.0f, 140.0f, 73.0f,
336 211.0f, 212.0f, 89.0f,
337 24.0f, 138.0f, 188.0f,
338 162.0f, 12.0f, 161.0f
339 };
340 std::vector<float> expectedOutputValues
341 {
342 // Batch 0, Channel 0, Height (4) x Width (3)
343 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
344 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
345 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
346 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
347 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
348 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
349 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
350 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
351 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
352 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
353 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
354 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
355
356 // Batch 0, Channel 1, Height (4) x Width (3)
357 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
358 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
359 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
360 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
361 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
362 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
363 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
364 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
365 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
366 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
367 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
368 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
369 };
370
371 return L2NormalizationTestImpl<ArmnnType>(
372 workloadFactory,
373 memoryManager,
374 inputOutputShape,
375 scale,
376 offset,
377 inputValues,
378 outScale,
379 outOffset,
380 expectedOutputValues,
381 layout);
382}
383
384template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
385LayerTestResult<T, 4> L2Normalization4dTestCommon(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 float scale,
389 int32_t offset,
390 float outScale,
391 int32_t outOffset,
392 const armnn::DataLayout layout)
393{
394 // Width: 3
395 // Height: 4
396 // Channels: 3
397 // BatchSize: 2
398 unsigned int numberOfBatches = 2;
399 unsigned int numberOfChannels = 3;
400 unsigned int height = 4;
401 unsigned int width = 3;
402
403 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
404 numberOfBatches, numberOfChannels, height, width, layout);
405 std::vector<float> inputValues
406 {
407 // Batch 0, Channel 0, Height (4) x Width (3)
408 235.0f, 46.0f, 178.0f,
409 100.0f, 123.0f, 19.0f,
410 172.0f, 74.0f, 250.0f,
411 6.0f, 195.0f, 80.0f,
412
413 // Batch 0, Channel 1, Height (4) x Width (3)
414 113.0f, 95.0f, 202.0f,
415 77.0f, 114.0f, 71.0f,
416 122.0f, 246.0f, 166.0f,
417 82.0f, 28.0f, 37.0f,
418
419 // Batch 0, Channel 2, Height (4) x Width (3)
420 56.0f, 170.0f, 162.0f,
421 194.0f, 89.0f, 254.0f,
422 12.0f, 209.0f, 200.0f,
423 1.0f, 64.0f, 54.0f,
424
425 // Batch 1, Channel 0, Height (4) x Width (3)
426 67.0f, 90.0f, 49.0f,
427 7.0f, 163.0f, 18.0f,
428 25.0f, 117.0f, 103.0f,
429 247.0f, 59.0f, 189.0f,
430
431 // Batch 1, Channel 1, Height (4) x Width (3)
432 239.0f, 104.0f, 199.0f,
433 17.0f, 124.0f, 153.0f,
434 222.0f, 217.0f, 75.0f,
435 32.0f, 126.0f, 21.0f,
436
437 // Batch 1, Channel 2, Height (4) x Width (3)
438 97.0f, 145.0f, 215.0f,
439 115.0f, 116.0f, 238.0f,
440 226.0f, 16.0f, 132.0f,
441 92.0f, 125.0f, 88.0f
442 };
443 std::vector<float> expectedOutputValues
444 {
445 // Batch 0, Channel 0, Height (4) x Width (3)
446 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
447 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
448 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
449 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
450 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
451 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
452 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
453 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
454 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
455 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
456 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
457 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
458
459 // Batch 0, Channel 1, Height (4) x Width (3)
460 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
461 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
462 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
463 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
464 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
465 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
466 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
467 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
468 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
469 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
470 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
471 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
472
473 // Batch 0, Channel 2, Height (4) x Width (3)
474 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
475 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
476 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
477 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
478 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
479 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
480 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
481 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
482 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
483 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
484 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
485 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
486
487 // Batch 1, Channel 0, Height (4) x Width (3)
488 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
489 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
490 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
491 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
492 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
493 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
494 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
495 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
496 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
497 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
498 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
499 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
500
501 // Batch 1, Channel 1, Height (4) x Width (3)
502 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
503 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
504 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
505 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
506 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
507 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
508 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
509 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
510 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
511 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
512 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
513 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
514
515 // Batch 1, Channel 2, Height (4) x Width (3)
516 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
517 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
518 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
519 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
520 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
521 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
522 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
523 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
524 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
525 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
526 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
527 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
528 };
529
530 return L2NormalizationTestImpl<ArmnnType>(
531 workloadFactory,
532 memoryManager,
533 inputOutputShape,
534 scale,
535 offset,
536 inputValues,
537 outScale,
538 outOffset,
539 expectedOutputValues,
540 layout);
541}
542
543} // anonymous namespace
544
545LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
546 armnn::IWorkloadFactory& workloadFactory,
547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
548 const armnn::DataLayout layout)
549{
550 // Dummy descriptor to get the default value of epsilon.
551 armnn::L2NormalizationDescriptor descriptor;
552
553 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
554 workloadFactory,
555 memoryManager,
556 0.f,
557 0,
558 0.f,
559 0,
560 layout,
561 descriptor.m_Eps);
562}
563
564LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
565 armnn::IWorkloadFactory& workloadFactory,
566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
567 const armnn::DataLayout layout)
568{
569 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
570 workloadFactory,
571 memoryManager,
572 0.f,
573 0,
574 0.f,
575 0,
576 layout,
577 1e-9f);
578}
579
580LayerTestResult<float, 4> L2Normalization1dTest(
581 armnn::IWorkloadFactory& workloadFactory,
582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
583 const armnn::DataLayout layout)
584{
585 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
586 workloadFactory,
587 memoryManager,
588 0.f,
589 0,
590 0.f,
591 0,
592 layout);
593}
594
595LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
596 armnn::IWorkloadFactory& workloadFactory,
597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
598 const armnn::DataLayout layout)
599{
600 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
601 workloadFactory,
602 memoryManager,
603 1.f,
604 0,
605 1.f,
606 0,
607 layout);
608}
609
610LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
611 armnn::IWorkloadFactory& workloadFactory,
612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
613 const armnn::DataLayout layout)
614{
615 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
616 workloadFactory,
617 memoryManager,
618 1.f,
619 0,
620 1.f / 128,
621 128,
622 layout);
623}
624
625LayerTestResult<float, 4> L2Normalization2dTest(
626 armnn::IWorkloadFactory& workloadFactory,
627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
628 const armnn::DataLayout layout)
629{
630 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
631 workloadFactory,
632 memoryManager,
633 0.f,
634 0,
635 0.f,
636 0,
637 layout);
638}
639
640LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
641 armnn::IWorkloadFactory& workloadFactory,
642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
643 const armnn::DataLayout layout)
644{
645 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
646 workloadFactory,
647 memoryManager,
648 1.f,
649 0,
650 1.f,
651 0,
652 layout);
653}
654
655LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
656 armnn::IWorkloadFactory& workloadFactory,
657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
658 const armnn::DataLayout layout)
659{
660 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
661 workloadFactory,
662 memoryManager,
663 1.f,
664 0,
665 1.f / 128,
666 128,
667 layout);
668}
669
670LayerTestResult<float, 2> L2Normalization2dShapeTest(
671 armnn::IWorkloadFactory& workloadFactory,
672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
673{
674 const armnn::DataLayout layout = armnn::DataLayout::NHWC;
675 const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
676
677 std::vector<float> inputData
678 {
679 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
680 };
681 std::vector<float> expectedOutputData
682 {
683 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
684 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
685 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
686 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
687 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
688 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
689 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
690 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
691 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
692 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
693 };
694
695 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
696 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
697
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100698 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100699
700 LayerTestResult<float, 2> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100701 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100702
703 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
704 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
705
706 armnn::L2NormalizationQueueDescriptor descriptor;
707 descriptor.m_Parameters.m_Eps = 1e-12f;
708 descriptor.m_Parameters.m_DataLayout = layout;
709 armnn::WorkloadInfo info;
710
711 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
712 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
713
714 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
715
716 inputHandle->Allocate();
717 outputHandle->Allocate();
718
719 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
720
721 workload->PostAllocationConfigure();
722 ExecuteWorkload(*workload, memoryManager);
723
724 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
725
726 return result;
727}
728
729LayerTestResult<float, 4> L2Normalization3dTest(
730 armnn::IWorkloadFactory& workloadFactory,
731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
732 const armnn::DataLayout layout)
733{
734 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
735 workloadFactory,
736 memoryManager,
737 0.f,
738 0,
739 0.f,
740 0,
741 layout);
742}
743
744LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
745 armnn::IWorkloadFactory& workloadFactory,
746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
747 const armnn::DataLayout layout)
748{
749 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
750 workloadFactory,
751 memoryManager,
752 1.f,
753 0,
754 1.f,
755 0,
756 layout);
757}
758
759LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
760 armnn::IWorkloadFactory& workloadFactory,
761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
762 const armnn::DataLayout layout)
763{
764 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
765 workloadFactory,
766 memoryManager,
767 1.f,
768 0,
769 1.f / 128,
770 128,
771 layout);
772}
773
774LayerTestResult<float, 4> L2Normalization4dTest(
775 armnn::IWorkloadFactory& workloadFactory,
776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
777 const armnn::DataLayout layout)
778{
779 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
780 workloadFactory,
781 memoryManager,
782 0.f,
783 0,
784 0.f,
785 0,
786 layout);
787}
788
789LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
790 armnn::IWorkloadFactory& workloadFactory,
791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
792 const armnn::DataLayout layout)
793{
794 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
795 workloadFactory,
796 memoryManager,
797 1.f,
798 0,
799 1.f,
800 0,
801 layout);
802}
803
804LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
805 armnn::IWorkloadFactory& workloadFactory,
806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
807 const armnn::DataLayout layout)
808{
809 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
810 workloadFactory,
811 memoryManager,
812 1.f,
813 0,
814 1.f / 128,
815 128,
816 layout);
817}