blob: 6c3fe5b1a1523a0ef7fcd9b9447f1ca880439bfb [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "L2NormalizationTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010
11#include <armnnUtils/TensorUtils.hpp>
12#include <armnnUtils/Permute.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010013
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
19namespace
20{
21
22template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
23LayerTestResult<T, 4> L2NormalizationTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 const armnn::TensorShape& inputOutputTensorShape,
27 float scale,
28 int32_t offset,
29 const std::vector<float>& inputValues,
30 float outScale,
31 int32_t outOffset,
32 const std::vector<float>& expectedOutputValues,
33 const armnn::DataLayout layout,
34 float epsilon = 1e-12f)
35{
Jan Eilers8eb25602020-03-09 12:13:48 +000036 IgnoreUnused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010037 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
38 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
39
40 // at this point if we require it permute the input data
41 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
42 std::vector<float> inputData = inputValues;
43 if (layout == armnn::DataLayout::NHWC)
44 {
45 std::vector<float> tmp(inputData.size());
46 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
47 inputData = tmp;
48 }
49
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010050 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
51 armnnUtils::QuantizedVector<T>(inputData,
52 inputTensorInfo.GetQuantizationScale(),
53 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010054
55 std::vector<float> expectedOutputData = expectedOutputValues;
56 if (layout == armnn::DataLayout::NHWC)
57 {
58 std::vector<float> tmp(expectedOutputData.size());
59 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
60 sizeof(float));
61 expectedOutputData = tmp;
62 }
63
64 LayerTestResult<T, 4> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010065 result.outputExpected =
66 MakeTensor<T, 4>(outputTensorInfo,
67 armnnUtils::QuantizedVector<T>(expectedOutputData,
68 outputTensorInfo.GetQuantizationScale(),
69 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010070
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010071 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010072 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
73 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010074 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010075
76 armnn::L2NormalizationQueueDescriptor descriptor;
77 descriptor.m_Parameters.m_Eps = epsilon;
78 descriptor.m_Parameters.m_DataLayout = layout;
79 armnn::WorkloadInfo info;
80
81 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
82 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
83
84 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
85
86 inputHandle->Allocate();
87 outputHandle->Allocate();
88
89 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
90
91 workload->PostAllocationConfigure();
92 ExecuteWorkload(*workload, memoryManager);
93
94 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
95
96 return result;
97}
98
99float CalcInvL2Norm(std::initializer_list<float> elements)
100{
101 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
102 [](float acc, float element) { return acc + element * element; });
103 return 1.0f / sqrtf(reduction);
104}
105
106template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
107LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
108 armnn::IWorkloadFactory& workloadFactory,
109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
110 float scale,
111 int32_t offset,
112 float outScale,
113 int32_t outOffset,
114 const armnn::DataLayout layout,
115 float epsilon)
116{
117 // Width: 1
118 // Height: 1
119 // Channels: 3
120 // BatchSize: 1
121 unsigned int numberOfBatches = 1;
122 unsigned int numberOfChannels = 3;
123 unsigned int height = 1;
124 unsigned int width = 1;
125
126 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
127 numberOfBatches, numberOfChannels, height, width, layout);
128
129 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
130 std::vector<float> inputValues
131 {
132 // Batch 0, Channel 0, Height (1) x Width (1)
133 0.00000001f,
134
135 // Batch 0, Channel 1, Height (1) x Width (1)
136 0.00000002f,
137
138 // Batch 0, Channel 2, Height (1) x Width (1)
139 0.00000003f,
140 };
141
142 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
143 std::vector<float> expectedOutputValues
144 {
145 // Batch 0, Channel 0, Height (1) x Width (1)
146 0.00000001f * approxInvL2Norm,
147 0.00000002f * approxInvL2Norm,
148 0.00000003f * approxInvL2Norm,
149 };
150
151 return L2NormalizationTestImpl<ArmnnType>(
152 workloadFactory,
153 memoryManager,
154 inputOutputShape,
155 scale,
156 offset,
157 inputValues,
158 outScale,
159 outOffset,
160 expectedOutputValues,
161 layout,
162 epsilon);
163}
164
165
166template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
167LayerTestResult<T, 4> L2Normalization1dTestCommon(
168 armnn::IWorkloadFactory& workloadFactory,
169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
170 float scale,
171 int32_t offset,
172 float outScale,
173 int32_t outOffset,
174 const armnn::DataLayout layout)
175{
176 // Width: 1
177 // Height: 1
178 // Channels: 10
179 // BatchSize: 1
180 unsigned int numberOfBatches = 1;
181 unsigned int numberOfChannels = 10;
182 unsigned int height = 1;
183 unsigned int width = 1;
184
185
186 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
187 numberOfBatches, numberOfChannels, height, width, layout);
188 std::vector<float> inputValues
189 {
190 // Batch 0, Channel 0, Height (1) x Width (1)
191 1.0f,
192
193 // Batch 0, Channel 1, Height (1) x Width (1)
194 2.0f,
195
196 // Batch 0, Channel 2, Height (1) x Width (1)
197 3.0f,
198
199 // Batch 0, Channel 3, Height (1) x Width (1)
200 4.0f,
201
202 // Batch 0, Channel 4, Height (1) x Width (1)
203 5.0f,
204
205 // Batch 0, Channel 5, Height (1) x Width (1)
206 6.0f,
207
208 // Batch 0, Channel 6, Height (1) x Width (1)
209 7.0f,
210
211 // Batch 0, Channel 7, Height (1) x Width (1)
212 8.0f,
213
214 // Batch 0, Channel 8, Height (1) x Width (1)
215 9.0f,
216
217 // Batch 0, Channel 9, Height (1) x Width (1)
218 10.0f
219 };
220 const float approxInvL2Norm = 0.050964719f;
221 std::vector<float> expectedOutputValues
222 {
223 // Batch 0, Channel 0, Height (1) x Width (1)
224 1.0f * approxInvL2Norm,
225 2.0f * approxInvL2Norm,
226 3.0f * approxInvL2Norm,
227 4.0f * approxInvL2Norm,
228 5.0f * approxInvL2Norm,
229 6.0f * approxInvL2Norm,
230 7.0f * approxInvL2Norm,
231 8.0f * approxInvL2Norm,
232 9.0f * approxInvL2Norm,
233 10.0f * approxInvL2Norm
234 };
235
236
237 return L2NormalizationTestImpl<ArmnnType>(
238 workloadFactory,
239 memoryManager,
240 inputOutputShape,
241 scale,
242 offset,
243 inputValues,
244 outScale,
245 outOffset,
246 expectedOutputValues,
247 layout);
248}
249
250template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
251LayerTestResult<T, 4> L2Normalization2dTestCommon(
252 armnn::IWorkloadFactory& workloadFactory,
253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
254 float scale,
255 int32_t offset,
256 float outScale,
257 int32_t outOffset,
258 const armnn::DataLayout layout)
259{
260 // Width: 5
261 // Height: 1
262 // Channels: 2
263 // BatchSize: 1
264 unsigned int numberOfBatches = 1;
265 unsigned int numberOfChannels = 2;
266 unsigned int height = 1;
267 unsigned int width = 5;
268
269 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
270 numberOfBatches, numberOfChannels, height, width, layout);
271 std::vector<float> inputValues
272 {
273 // Batch 0, Channel 0, Height (1) x Width (5)
274 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
275
276 // Batch 0, Channel 1, Height (1) x Width (5)
277 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
278 };
279 std::vector<float> expectedOutputValues
280 {
281 // Batch 0, Channel 0, Height (1) x Width (5)
282 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
283 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
284 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
285 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
286 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
287
288 // Batch 0, Channel 1, Height (1) x Width (5)
289 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
290 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
291 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
292 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
293 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
294 };
295
296 return L2NormalizationTestImpl<ArmnnType>(
297 workloadFactory,
298 memoryManager,
299 inputOutputShape,
300 scale,
301 offset,
302 inputValues,
303 outScale,
304 outOffset,
305 expectedOutputValues,
306 layout);
307}
308
309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
310LayerTestResult<T, 4> L2Normalization3dTestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float scale,
314 int32_t offset,
315 float outScale,
316 int32_t outOffset,
317 const armnn::DataLayout layout)
318{
319 // Width: 3
320 // Height: 4
321 // Channels: 2
322 // BatchSize: 1
323 unsigned int numberOfBatches = 1;
324 unsigned int numberOfChannels = 2;
325 unsigned int height = 4;
326 unsigned int width = 3;
327
328 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
329 numberOfBatches, numberOfChannels, height, width, layout);
330 std::vector<float> inputValues
331 {
332 // Batch 0, Channel 0, Height (4) x Width (3)
333 119.0f, 21.0f, 150.0f,
334 149.0f, 32.0f, 179.0f,
335 15.0f, 227.0f, 141.0f,
336 147.0f, 199.0f, 220.0f,
337
338 // Batch 0, Channel 1, Height (4) x Width (3)
339 110.0f, 140.0f, 73.0f,
340 211.0f, 212.0f, 89.0f,
341 24.0f, 138.0f, 188.0f,
342 162.0f, 12.0f, 161.0f
343 };
344 std::vector<float> expectedOutputValues
345 {
346 // Batch 0, Channel 0, Height (4) x Width (3)
347 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
348 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
349 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
350 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
351 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
352 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
353 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
354 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
355 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
356 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
357 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
358 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
359
360 // Batch 0, Channel 1, Height (4) x Width (3)
361 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
362 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
363 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
364 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
365 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
366 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
367 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
368 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
369 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
370 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
371 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
372 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
373 };
374
375 return L2NormalizationTestImpl<ArmnnType>(
376 workloadFactory,
377 memoryManager,
378 inputOutputShape,
379 scale,
380 offset,
381 inputValues,
382 outScale,
383 outOffset,
384 expectedOutputValues,
385 layout);
386}
387
388template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
389LayerTestResult<T, 4> L2Normalization4dTestCommon(
390 armnn::IWorkloadFactory& workloadFactory,
391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
392 float scale,
393 int32_t offset,
394 float outScale,
395 int32_t outOffset,
396 const armnn::DataLayout layout)
397{
398 // Width: 3
399 // Height: 4
400 // Channels: 3
401 // BatchSize: 2
402 unsigned int numberOfBatches = 2;
403 unsigned int numberOfChannels = 3;
404 unsigned int height = 4;
405 unsigned int width = 3;
406
407 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
408 numberOfBatches, numberOfChannels, height, width, layout);
409 std::vector<float> inputValues
410 {
411 // Batch 0, Channel 0, Height (4) x Width (3)
412 235.0f, 46.0f, 178.0f,
413 100.0f, 123.0f, 19.0f,
414 172.0f, 74.0f, 250.0f,
415 6.0f, 195.0f, 80.0f,
416
417 // Batch 0, Channel 1, Height (4) x Width (3)
418 113.0f, 95.0f, 202.0f,
419 77.0f, 114.0f, 71.0f,
420 122.0f, 246.0f, 166.0f,
421 82.0f, 28.0f, 37.0f,
422
423 // Batch 0, Channel 2, Height (4) x Width (3)
424 56.0f, 170.0f, 162.0f,
425 194.0f, 89.0f, 254.0f,
426 12.0f, 209.0f, 200.0f,
427 1.0f, 64.0f, 54.0f,
428
429 // Batch 1, Channel 0, Height (4) x Width (3)
430 67.0f, 90.0f, 49.0f,
431 7.0f, 163.0f, 18.0f,
432 25.0f, 117.0f, 103.0f,
433 247.0f, 59.0f, 189.0f,
434
435 // Batch 1, Channel 1, Height (4) x Width (3)
436 239.0f, 104.0f, 199.0f,
437 17.0f, 124.0f, 153.0f,
438 222.0f, 217.0f, 75.0f,
439 32.0f, 126.0f, 21.0f,
440
441 // Batch 1, Channel 2, Height (4) x Width (3)
442 97.0f, 145.0f, 215.0f,
443 115.0f, 116.0f, 238.0f,
444 226.0f, 16.0f, 132.0f,
445 92.0f, 125.0f, 88.0f
446 };
447 std::vector<float> expectedOutputValues
448 {
449 // Batch 0, Channel 0, Height (4) x Width (3)
450 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
451 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
452 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
453 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
454 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
455 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
456 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
457 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
458 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
459 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
460 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
461 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
462
463 // Batch 0, Channel 1, Height (4) x Width (3)
464 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
465 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
466 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
467 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
468 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
469 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
470 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
471 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
472 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
473 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
474 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
475 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
476
477 // Batch 0, Channel 2, Height (4) x Width (3)
478 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
479 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
480 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
481 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
482 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
483 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
484 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
485 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
486 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
487 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
488 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
489 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
490
491 // Batch 1, Channel 0, Height (4) x Width (3)
492 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
493 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
494 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
495 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
496 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
497 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
498 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
499 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
500 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
501 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
502 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
503 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
504
505 // Batch 1, Channel 1, Height (4) x Width (3)
506 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
507 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
508 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
509 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
510 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
511 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
512 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
513 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
514 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
515 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
516 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
517 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
518
519 // Batch 1, Channel 2, Height (4) x Width (3)
520 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
521 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
522 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
523 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
524 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
525 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
526 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
527 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
528 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
529 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
530 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
531 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
532 };
533
534 return L2NormalizationTestImpl<ArmnnType>(
535 workloadFactory,
536 memoryManager,
537 inputOutputShape,
538 scale,
539 offset,
540 inputValues,
541 outScale,
542 outOffset,
543 expectedOutputValues,
544 layout);
545}
546
547} // anonymous namespace
548
549LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
550 armnn::IWorkloadFactory& workloadFactory,
551 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
552 const armnn::DataLayout layout)
553{
554 // Dummy descriptor to get the default value of epsilon.
555 armnn::L2NormalizationDescriptor descriptor;
556
557 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
558 workloadFactory,
559 memoryManager,
560 0.f,
561 0,
562 0.f,
563 0,
564 layout,
565 descriptor.m_Eps);
566}
567
568LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
569 armnn::IWorkloadFactory& workloadFactory,
570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
571 const armnn::DataLayout layout)
572{
573 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
574 workloadFactory,
575 memoryManager,
576 0.f,
577 0,
578 0.f,
579 0,
580 layout,
581 1e-9f);
582}
583
584LayerTestResult<float, 4> L2Normalization1dTest(
585 armnn::IWorkloadFactory& workloadFactory,
586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
587 const armnn::DataLayout layout)
588{
589 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
590 workloadFactory,
591 memoryManager,
592 0.f,
593 0,
594 0.f,
595 0,
596 layout);
597}
598
599LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
600 armnn::IWorkloadFactory& workloadFactory,
601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
602 const armnn::DataLayout layout)
603{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000604 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100605 workloadFactory,
606 memoryManager,
607 1.f,
608 0,
609 1.f,
610 0,
611 layout);
612}
613
614LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
615 armnn::IWorkloadFactory& workloadFactory,
616 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
617 const armnn::DataLayout layout)
618{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000619 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100620 workloadFactory,
621 memoryManager,
622 1.f,
623 0,
624 1.f / 128,
625 128,
626 layout);
627}
628
629LayerTestResult<float, 4> L2Normalization2dTest(
630 armnn::IWorkloadFactory& workloadFactory,
631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
632 const armnn::DataLayout layout)
633{
634 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
635 workloadFactory,
636 memoryManager,
637 0.f,
638 0,
639 0.f,
640 0,
641 layout);
642}
643
644LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
645 armnn::IWorkloadFactory& workloadFactory,
646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
647 const armnn::DataLayout layout)
648{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000649 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100650 workloadFactory,
651 memoryManager,
652 1.f,
653 0,
654 1.f,
655 0,
656 layout);
657}
658
659LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
660 armnn::IWorkloadFactory& workloadFactory,
661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
662 const armnn::DataLayout layout)
663{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000664 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100665 workloadFactory,
666 memoryManager,
667 1.f,
668 0,
669 1.f / 128,
670 128,
671 layout);
672}
673
674LayerTestResult<float, 2> L2Normalization2dShapeTest(
675 armnn::IWorkloadFactory& workloadFactory,
676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
677{
678 const armnn::DataLayout layout = armnn::DataLayout::NHWC;
679 const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
680
681 std::vector<float> inputData
682 {
683 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
684 };
685 std::vector<float> expectedOutputData
686 {
687 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
688 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
689 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
690 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
691 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
692 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
693 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
694 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
695 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
696 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
697 };
698
699 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
700 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
701
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100702 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100703
704 LayerTestResult<float, 2> result(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100705 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100706
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100707 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100708 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
709 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100710 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100711
712 armnn::L2NormalizationQueueDescriptor descriptor;
713 descriptor.m_Parameters.m_Eps = 1e-12f;
714 descriptor.m_Parameters.m_DataLayout = layout;
715 armnn::WorkloadInfo info;
716
717 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
718 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
719
720 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
721
722 inputHandle->Allocate();
723 outputHandle->Allocate();
724
725 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
726
727 workload->PostAllocationConfigure();
728 ExecuteWorkload(*workload, memoryManager);
729
730 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
731
732 return result;
733}
734
735LayerTestResult<float, 4> L2Normalization3dTest(
736 armnn::IWorkloadFactory& workloadFactory,
737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
738 const armnn::DataLayout layout)
739{
740 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
741 workloadFactory,
742 memoryManager,
743 0.f,
744 0,
745 0.f,
746 0,
747 layout);
748}
749
750LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
751 armnn::IWorkloadFactory& workloadFactory,
752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
753 const armnn::DataLayout layout)
754{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000755 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100756 workloadFactory,
757 memoryManager,
758 1.f,
759 0,
760 1.f,
761 0,
762 layout);
763}
764
765LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
766 armnn::IWorkloadFactory& workloadFactory,
767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
768 const armnn::DataLayout layout)
769{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000770 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100771 workloadFactory,
772 memoryManager,
773 1.f,
774 0,
775 1.f / 128,
776 128,
777 layout);
778}
779
780LayerTestResult<float, 4> L2Normalization4dTest(
781 armnn::IWorkloadFactory& workloadFactory,
782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
783 const armnn::DataLayout layout)
784{
785 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
786 workloadFactory,
787 memoryManager,
788 0.f,
789 0,
790 0.f,
791 0,
792 layout);
793}
794
795LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
796 armnn::IWorkloadFactory& workloadFactory,
797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
798 const armnn::DataLayout layout)
799{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000800 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100801 workloadFactory,
802 memoryManager,
803 1.f,
804 0,
805 1.f,
806 0,
807 layout);
808}
809
810LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
811 armnn::IWorkloadFactory& workloadFactory,
812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
813 const armnn::DataLayout layout)
814{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000815 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100816 workloadFactory,
817 memoryManager,
818 1.f,
819 0,
820 1.f / 128,
821 128,
822 layout);
823}