blob: ef430883d44451272436a3d0a4fbd243c8d1d887 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "BatchNormalizationTestImpl.hpp"
7
8#include <DataLayoutIndexed.hpp>
9#include <ResolveType.hpp>
10
11#include <armnn/ArmNN.hpp>
12
13#include <backendsCommon/CpuTensorHandle.hpp>
14#include <backendsCommon/IBackendInternal.hpp>
15#include <backendsCommon/WorkloadFactory.hpp>
16
17#include <backendsCommon/test/QuantizeHelper.hpp>
18#include <backendsCommon/test/TensorCopyUtils.hpp>
19#include <backendsCommon/test/WorkloadTestUtils.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23namespace
24{
25
26template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
27LayerTestResult<T, 4> BatchNormTestImpl(
28 armnn::IWorkloadFactory& workloadFactory,
29 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
30 const armnn::TensorShape& inputOutputTensorShape,
31 const std::vector<float>& inputValues,
32 const std::vector<float>& expectedOutputValues,
33 float qScale,
34 int32_t qOffset,
35 armnn::DataLayout dataLayout)
36{
37 armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
38 armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
39
40 armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
41
42 armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
43 ArmnnType);
44
45 // Set quantization parameters if the requested type is a quantized type.
46 if (armnn::IsQuantizedType<T>())
47 {
48 inputTensorInfo.SetQuantizationScale(qScale);
49 inputTensorInfo.SetQuantizationOffset(qOffset);
50 outputTensorInfo.SetQuantizationScale(qScale);
51 outputTensorInfo.SetQuantizationOffset(qOffset);
52 tensorInfo.SetQuantizationScale(qScale);
53 tensorInfo.SetQuantizationOffset(qOffset);
54 }
55
56 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
57 QuantizedVector<T>(qScale, qOffset, inputValues));
58
59 // These values are per-channel of the input.
60 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
61 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
62 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
63 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
64
65 LayerTestResult<T, 4> result(outputTensorInfo);
66
67 result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
68 QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
69
70 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
71 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
72
73 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
74 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
75 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
76 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
77
78 armnn::BatchNormalizationQueueDescriptor descriptor;
79 descriptor.m_Mean = &meanTensor;
80 descriptor.m_Variance = &varianceTensor;
81 descriptor.m_Beta = &betaTensor;
82 descriptor.m_Gamma = &gammaTensor;
83 descriptor.m_Parameters.m_Eps = 0.0f;
84 descriptor.m_Parameters.m_DataLayout = dataLayout;
85 armnn::WorkloadInfo info;
86
87 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
88 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
89 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
90 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
91
92 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
93 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
94
95 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
96
97 inputHandle->Allocate();
98 outputHandle->Allocate();
99
100 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
101
102 workload->Execute();
103
104 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
105
106 return result;
107}
108
109template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
110LayerTestResult<T,4> BatchNormTestNhwcImpl(
111 armnn::IWorkloadFactory& workloadFactory,
112 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
113 float qScale,
114 int32_t qOffset)
115{
116 const unsigned int width = 2;
117 const unsigned int height = 3;
118 const unsigned int channels = 2;
119 const unsigned int num = 1;
120
121 armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
122 armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
123 armnn::TensorInfo tensorInfo({channels}, ArmnnType);
124
125 // Set quantization parameters if the requested type is a quantized type.
126 if(armnn::IsQuantizedType<T>())
127 {
128 inputTensorInfo.SetQuantizationScale(qScale);
129 inputTensorInfo.SetQuantizationOffset(qOffset);
130 outputTensorInfo.SetQuantizationScale(qScale);
131 outputTensorInfo.SetQuantizationOffset(qOffset);
132 tensorInfo.SetQuantizationScale(qScale);
133 tensorInfo.SetQuantizationOffset(qOffset);
134 }
135
136 auto input = MakeTensor<T, 4>(inputTensorInfo,
137 QuantizedVector<T>(qScale, qOffset,
138 {
139 1.f, 1.f, 4.f, 1.f,
140 4.f, 4.f, 2.f, 1.f,
141 1.f, -2.f, 6.f, 4.f
142 }));
143 // These values are per-channel of the input.
144 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
145 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
146 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
147 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
148 LayerTestResult<T,4> ret(outputTensorInfo);
149
150 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
151 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
152
153 armnn::BatchNormalizationQueueDescriptor data;
154 armnn::WorkloadInfo info;
155 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
156 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
157 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
158 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
159
160 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
161 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
162 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
163 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
164
165 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
166 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
167 data.m_Mean = &meanTensor;
168 data.m_Variance = &varianceTensor;
169 data.m_Beta = &betaTensor;
170 data.m_Gamma = &gammaTensor;
171 data.m_Parameters.m_Eps = 0.0f;
172 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
173
174 // For each channel:
175 // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
176 // multiply by gamma and add beta
177 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
178 QuantizedVector<T>(qScale, qOffset,
179 {
180 1.f, 3.f, 4.f, 3.f,
181 4.f, 4.f, 2.f, 3.f,
182 1.f, 2.f, 6.f, 4.f
183 }));
184
185 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
186
187 inputHandle->Allocate();
188 outputHandle->Allocate();
189
190 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
191
192 workload->Execute();
193
194 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
195
196 return ret;
197}
198
199} // anonymous namespace
200
Matthew Jackson9bff1442019-09-12 09:08:23 +0100201LayerTestResult<float, 4> BatchNormFloat32Test(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100202 armnn::IWorkloadFactory& workloadFactory,
203 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
204{
205 // BatchSize: 1
206 // Channels: 2
207 // Height: 3
208 // Width: 2
209
210 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
211 std::vector<float> inputValues
212 {
213 // Batch 0, Channel 0, Height (3) x Width (2)
214 1.f, 4.f,
215 4.f, 2.f,
216 1.f, 6.f,
217
218 // Batch 0, Channel 1, Height (3) x Width (2)
219 1.f, 1.f,
220 4.f, 1.f,
221 -2.f, 4.f
222 };
223 std::vector<float> expectedOutputValues
224 {
225 // Batch 0, Channel 0, Height (3) x Width (2)
226 1.f, 4.f,
227 4.f, 2.f,
228 1.f, 6.f,
229
230 // Batch 0, Channel 1, Height (3) x Width (2)
231 3.f, 3.f,
232 4.f, 3.f,
233 2.f, 4.f
234 };
235
236 return BatchNormTestImpl<armnn::DataType::Float32>(
237 workloadFactory,
238 memoryManager,
239 inputOutputShape,
240 inputValues,
241 expectedOutputValues,
242 0.f,
243 0,
244 armnn::DataLayout::NCHW);
245}
246
Matthew Jackson9bff1442019-09-12 09:08:23 +0100247LayerTestResult<float, 4> BatchNormFloat32NhwcTest(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100248 armnn::IWorkloadFactory& workloadFactory,
249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
250{
251 // BatchSize: 1
252 // Height: 3
253 // Width: 2
254 // Channels: 2
255
256 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
257 std::vector<float> inputValues
258 {
259 // Batch 0, Height 0, Width (2) x Channel (2)
260 1.f, 1.f,
261 4.f, 1.f,
262
263 // Batch 0, Height 1, Width (2) x Channel (2)
264 4.f, 4.f,
265 2.f, 1.f,
266
267 // Batch 0, Height 2, Width (2) x Channel (2)
268 1.f, -2.f,
269 6.f, 4.f
270 };
271 std::vector<float> expectedOutputValues
272 {
273 // Batch 0, Height 0, Width (2) x Channel (2)
274 1.f, 3.f,
275 4.f, 3.f,
276
277 // Batch 0, Height 1, Width (2) x Channel (2)
278 4.f, 4.f,
279 2.f, 3.f,
280
281 // Batch 0, Height 2, Width (2) x Channel (2)
282 1.f, 2.f,
283 6.f, 4.f
284 };
285
286 return BatchNormTestImpl<armnn::DataType::Float32>(
287 workloadFactory,
288 memoryManager,
289 inputOutputShape,
290 inputValues,
291 expectedOutputValues,
292 0.f,
293 0,
294 armnn::DataLayout::NHWC);
295}
296
Matthew Jackson9bff1442019-09-12 09:08:23 +0100297LayerTestResult<armnn::Half, 4> BatchNormFloat16Test(
298 armnn::IWorkloadFactory& workloadFactory,
299 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
300{
301 // BatchSize: 1
302 // Channels: 2
303 // Height: 3
304 // Width: 2
305
306 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
307 std::vector<float> inputValues
308 {
309 // Batch 0, Channel 0, Height (3) x Width (2)
310 1.f, 4.f,
311 4.f, 2.f,
312 1.f, 6.f,
313
314 // Batch 0, Channel 1, Height (3) x Width (2)
315 1.f, 1.f,
316 4.f, 1.f,
317 -2.f, 4.f
318 };
319 std::vector<float> expectedOutputValues
320 {
321 // Batch 0, Channel 0, Height (3) x Width (2)
322 1.f, 4.f,
323 4.f, 2.f,
324 1.f, 6.f,
325
326 // Batch 0, Channel 1, Height (3) x Width (2)
327 3.f, 3.f,
328 4.f, 3.f,
329 2.f, 4.f
330 };
331
332 return BatchNormTestImpl<armnn::DataType::Float16>(
333 workloadFactory,
334 memoryManager,
335 inputOutputShape,
336 inputValues,
337 expectedOutputValues,
338 0.f,
339 0,
340 armnn::DataLayout::NCHW);
341}
342
343LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest(
344 armnn::IWorkloadFactory& workloadFactory,
345 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
346{
347 // BatchSize: 1
348 // Height: 3
349 // Width: 2
350 // Channels: 2
351
352 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
353 std::vector<float> inputValues
354 {
355 // Batch 0, Height 0, Width (2) x Channel (2)
356 1.f, 1.f,
357 4.f, 1.f,
358
359 // Batch 0, Height 1, Width (2) x Channel (2)
360 4.f, 4.f,
361 2.f, 1.f,
362
363 // Batch 0, Height 2, Width (2) x Channel (2)
364 1.f, -2.f,
365 6.f, 4.f
366 };
367 std::vector<float> expectedOutputValues
368 {
369 // Batch 0, Height 0, Width (2) x Channel (2)
370 1.f, 3.f,
371 4.f, 3.f,
372
373 // Batch 0, Height 1, Width (2) x Channel (2)
374 4.f, 4.f,
375 2.f, 3.f,
376
377 // Batch 0, Height 2, Width (2) x Channel (2)
378 1.f, 2.f,
379 6.f, 4.f
380 };
381
382 return BatchNormTestImpl<armnn::DataType::Float16>(
383 workloadFactory,
384 memoryManager,
385 inputOutputShape,
386 inputValues,
387 expectedOutputValues,
388 0.f,
389 0,
390 armnn::DataLayout::NHWC);
391}
392
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100393LayerTestResult<uint8_t, 4> BatchNormUint8Test(
394 armnn::IWorkloadFactory& workloadFactory,
395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
396{
397 // BatchSize: 1
398 // Channels: 2
399 // Height: 3
400 // Width: 2
401
402 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
403 std::vector<float> inputValues
404 {
405 // Batch 0, Channel 0, Height (3) x Width (2)
406 1.f, 4.f,
407 4.f, 2.f,
408 1.f, 6.f,
409
410 // Batch 0, Channel 1, Height (3) x Width (2)
411 1.f, 1.f,
412 4.f, 1.f,
413 -2.f, 4.f
414 };
415 std::vector<float> expectedOutputValues
416 {
417 // Batch 0, Channel 0, Height (3) x Width (2)
418 1.f, 4.f,
419 4.f, 2.f,
420 1.f, 6.f,
421
422 // Batch 0, Channel 1, Height (3) x Width (2)
423 3.f, 3.f,
424 4.f, 3.f,
425 2.f, 4.f
426 };
427
428 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
429 workloadFactory,
430 memoryManager,
431 inputOutputShape,
432 inputValues,
433 expectedOutputValues,
434 1.f / 20.f,
435 50,
436 armnn::DataLayout::NCHW);
437}
438
439LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
440 armnn::IWorkloadFactory& workloadFactory,
441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
442{
443 // BatchSize: 1
444 // Height: 3
445 // Width: 2
446 // Channels: 2
447
448 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
449 std::vector<float> inputValues
450 {
451 // Batch 0, Height 0, Width (2) x Channel (2)
452 1.f, 1.f,
453 4.f, 1.f,
454
455 // Batch 0, Height 1, Width (2) x Channel (2)
456 4.f, 4.f,
457 2.f, 1.f,
458
459 // Batch 0, Height 2, Width (2) x Channel (2)
460 1.f, -2.f,
461 6.f, 4.f
462 };
463 std::vector<float> expectedOutputValues
464 {
465 // Batch 0, Height 0, Width (2) x Channel (2)
466 1.f, 3.f,
467 4.f, 3.f,
468
469 // Batch 0, Height 1, Width (2) x Channel (2)
470 4.f, 4.f,
471 2.f, 3.f,
472
473 // Batch 0, Height 2, Width (2) x Channel (2)
474 1.f, 2.f,
475 6.f, 4.f
476 };
477
478 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
479 workloadFactory,
480 memoryManager,
481 inputOutputShape, inputValues, expectedOutputValues,
482 1.f/20.f, 50, armnn::DataLayout::NHWC);
483}
484
485LayerTestResult<int16_t, 4> BatchNormInt16Test(
486 armnn::IWorkloadFactory& workloadFactory,
487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
488{
489 // BatchSize: 1
490 // Channels: 2
491 // Height: 3
492 // Width: 2
493
494 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
495 std::vector<float> inputValues
496 {
497 // Batch 0, Channel 0, Height (3) x Width (2)
498 1.f, 4.f,
499 4.f, 2.f,
500 1.f, 6.f,
501
502 // Batch 0, Channel 1, Height (3) x Width (2)
503 1.f, 1.f,
504 4.f, 1.f,
505 -2.f, 4.f
506 };
507 std::vector<float> expectedOutputValues
508 {
509 // Batch 0, Channel 0, Height (3) x Width (2)
510 1.f, 4.f,
511 4.f, 2.f,
512 1.f, 6.f,
513
514 // Batch 0, Channel 1, Height (3) x Width (2)
515 3.f, 3.f,
516 4.f, 3.f,
517 2.f, 4.f
518 };
519
520 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
521 workloadFactory,
522 memoryManager,
523 inputOutputShape,
524 inputValues,
525 expectedOutputValues,
526 1.f / 20.f,
527 50,
528 armnn::DataLayout::NCHW);
529}
530
531LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
532 armnn::IWorkloadFactory& workloadFactory,
533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
534{
535 // BatchSize: 1
536 // Height: 3
537 // Width: 2
538 // Channels: 2
539
540 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
541 std::vector<float> inputValues
542 {
543 // Batch 0, Height 0, Width (2) x Channel (2)
544 1.f, 1.f,
545 4.f, 1.f,
546
547 // Batch 0, Height 1, Width (2) x Channel (2)
548 4.f, 4.f,
549 2.f, 1.f,
550
551 // Batch 0, Height 2, Width (2) x Channel (2)
552 1.f, -2.f,
553 6.f, 4.f
554 };
555 std::vector<float> expectedOutputValues
556 {
557 // Batch 0, Height 0, Width (2) x Channel (2)
558 1.f, 3.f,
559 4.f, 3.f,
560
561 // Batch 0, Height 1, Width (2) x Channel (2)
562 4.f, 4.f,
563 2.f, 3.f,
564
565 // Batch 0, Height 2, Width (2) x Channel (2)
566 1.f, 2.f,
567 6.f, 4.f
568 };
569
570 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
571 workloadFactory,
572 memoryManager,
573 inputOutputShape,
574 inputValues,
575 expectedOutputValues,
576 1.f / 20.f,
577 50,
578 armnn::DataLayout::NHWC);
579}
580
581LayerTestResult<float,4> CompareBatchNormTest(
582 armnn::IWorkloadFactory& workloadFactory,
583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
584 armnn::IWorkloadFactory& refWorkloadFactory)
585{
586 const unsigned int width = 2;
587 const unsigned int height = 3;
588 const unsigned int channels = 5;
589 const unsigned int batchSize = 3;
590
591 armnn::TensorInfo inputTensorInfo;
592 armnn::TensorInfo outputTensorInfo;
593 armnn::TensorInfo tensorInfo;
594
595 constexpr unsigned int shape[] = {batchSize, channels, height, width};
596 constexpr unsigned int tensorShape[] = {channels};
597
598 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
599 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
600 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
601
602 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
603
604 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
605 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
606 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
607 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
608
609 LayerTestResult<float,4> ret(outputTensorInfo);
610
611 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
612 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
613
614 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
615 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
616
617 armnn::BatchNormalizationQueueDescriptor data;
618 armnn::WorkloadInfo info;
619 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
620 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
621 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
622 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
623
624 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
625 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
626 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
627 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
628
629 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
630 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
631 data.m_Mean = &meanTensor;
632 data.m_Variance = &varianceTensor;
633 data.m_Beta = &betaTensor;
634 data.m_Gamma = &gammaTensor;
635 data.m_Parameters.m_Eps = 0.01f;
636
637 armnn::BatchNormalizationQueueDescriptor refData = data;
638 armnn::WorkloadInfo refInfo = info;
639 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
640 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
641
642 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
643 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
644
645 inputHandle->Allocate();
646 outputHandle->Allocate();
647 inputHandleRef->Allocate();
648 outputHandleRef->Allocate();
649
650 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
651 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
652
653 workload->PostAllocationConfigure();
654 workload->Execute();
655 workloadRef->PostAllocationConfigure();
656 workloadRef->Execute();
657
658 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
659 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
660
661 return ret;
662}