blob: 7a55146b37ccdf69e15e59aa706b2ff77d4c0a4c [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "BatchNormalizationTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
10
11#include <armnn/ArmNN.hpp>
12
Matteo Martincighe011d202019-11-28 11:35:47 +000013#include <armnnUtils/DataLayoutIndexed.hpp>
14
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010015#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000016#include <armnn/backends/IBackendInternal.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010017#include <backendsCommon/WorkloadFactory.hpp>
18
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010019#include <backendsCommon/test/TensorCopyUtils.hpp>
20#include <backendsCommon/test/WorkloadTestUtils.hpp>
21
22#include <test/TensorHelpers.hpp>
23
24namespace
25{
26
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010027using namespace armnnUtils;
28
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010029template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
30LayerTestResult<T, 4> BatchNormTestImpl(
31 armnn::IWorkloadFactory& workloadFactory,
32 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
33 const armnn::TensorShape& inputOutputTensorShape,
34 const std::vector<float>& inputValues,
35 const std::vector<float>& expectedOutputValues,
36 float qScale,
37 int32_t qOffset,
38 armnn::DataLayout dataLayout)
39{
Derek Lambertic374ff02019-12-10 21:57:35 +000040 boost::ignore_unused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010041 armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
42 armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
43
44 armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
45
46 armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
47 ArmnnType);
48
49 // Set quantization parameters if the requested type is a quantized type.
50 if (armnn::IsQuantizedType<T>())
51 {
52 inputTensorInfo.SetQuantizationScale(qScale);
53 inputTensorInfo.SetQuantizationOffset(qOffset);
54 outputTensorInfo.SetQuantizationScale(qScale);
55 outputTensorInfo.SetQuantizationOffset(qOffset);
56 tensorInfo.SetQuantizationScale(qScale);
57 tensorInfo.SetQuantizationOffset(qOffset);
58 }
59
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010060 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010061
62 // These values are per-channel of the input.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010063 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
64 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
65 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
66 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010067
68 LayerTestResult<T, 4> result(outputTensorInfo);
69
70 result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010071 QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010072
73 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
74 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
75
76 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
77 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
78 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
79 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
80
81 armnn::BatchNormalizationQueueDescriptor descriptor;
82 descriptor.m_Mean = &meanTensor;
83 descriptor.m_Variance = &varianceTensor;
84 descriptor.m_Beta = &betaTensor;
85 descriptor.m_Gamma = &gammaTensor;
86 descriptor.m_Parameters.m_Eps = 0.0f;
87 descriptor.m_Parameters.m_DataLayout = dataLayout;
88 armnn::WorkloadInfo info;
89
90 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
91 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
92 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
93 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
94
95 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
96 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
97
98 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
99
100 inputHandle->Allocate();
101 outputHandle->Allocate();
102
103 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
104
105 workload->Execute();
106
107 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
108
109 return result;
110}
111
112template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
113LayerTestResult<T,4> BatchNormTestNhwcImpl(
114 armnn::IWorkloadFactory& workloadFactory,
115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
116 float qScale,
117 int32_t qOffset)
118{
Derek Lambertic374ff02019-12-10 21:57:35 +0000119 boost::ignore_unused(memoryManager);
120
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100121 const unsigned int width = 2;
122 const unsigned int height = 3;
123 const unsigned int channels = 2;
124 const unsigned int num = 1;
125
126 armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
127 armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
128 armnn::TensorInfo tensorInfo({channels}, ArmnnType);
129
130 // Set quantization parameters if the requested type is a quantized type.
131 if(armnn::IsQuantizedType<T>())
132 {
133 inputTensorInfo.SetQuantizationScale(qScale);
134 inputTensorInfo.SetQuantizationOffset(qOffset);
135 outputTensorInfo.SetQuantizationScale(qScale);
136 outputTensorInfo.SetQuantizationOffset(qOffset);
137 tensorInfo.SetQuantizationScale(qScale);
138 tensorInfo.SetQuantizationOffset(qOffset);
139 }
140
141 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100142 QuantizedVector<T>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100143 {
144 1.f, 1.f, 4.f, 1.f,
145 4.f, 4.f, 2.f, 1.f,
146 1.f, -2.f, 6.f, 4.f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100147 },
148 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100149 // These values are per-channel of the input.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100150 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
151 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
152 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
153 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100154 LayerTestResult<T,4> ret(outputTensorInfo);
155
156 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
157 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
158
159 armnn::BatchNormalizationQueueDescriptor data;
160 armnn::WorkloadInfo info;
161 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
162 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
163 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
164 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
165
166 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
167 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
168 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
169 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
170
171 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
172 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
173 data.m_Mean = &meanTensor;
174 data.m_Variance = &varianceTensor;
175 data.m_Beta = &betaTensor;
176 data.m_Gamma = &gammaTensor;
177 data.m_Parameters.m_Eps = 0.0f;
178 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
179
180 // For each channel:
181 // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
182 // multiply by gamma and add beta
183 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100184 QuantizedVector<T>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100185 {
186 1.f, 3.f, 4.f, 3.f,
187 4.f, 4.f, 2.f, 3.f,
188 1.f, 2.f, 6.f, 4.f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100189 },
190 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100191
192 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
193
194 inputHandle->Allocate();
195 outputHandle->Allocate();
196
197 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
198
199 workload->Execute();
200
201 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
202
203 return ret;
204}
205
206} // anonymous namespace
207
Matthew Jackson9bff1442019-09-12 09:08:23 +0100208LayerTestResult<float, 4> BatchNormFloat32Test(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100209 armnn::IWorkloadFactory& workloadFactory,
210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
211{
212 // BatchSize: 1
213 // Channels: 2
214 // Height: 3
215 // Width: 2
216
217 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
218 std::vector<float> inputValues
219 {
220 // Batch 0, Channel 0, Height (3) x Width (2)
221 1.f, 4.f,
222 4.f, 2.f,
223 1.f, 6.f,
224
225 // Batch 0, Channel 1, Height (3) x Width (2)
226 1.f, 1.f,
227 4.f, 1.f,
228 -2.f, 4.f
229 };
230 std::vector<float> expectedOutputValues
231 {
232 // Batch 0, Channel 0, Height (3) x Width (2)
233 1.f, 4.f,
234 4.f, 2.f,
235 1.f, 6.f,
236
237 // Batch 0, Channel 1, Height (3) x Width (2)
238 3.f, 3.f,
239 4.f, 3.f,
240 2.f, 4.f
241 };
242
243 return BatchNormTestImpl<armnn::DataType::Float32>(
244 workloadFactory,
245 memoryManager,
246 inputOutputShape,
247 inputValues,
248 expectedOutputValues,
249 0.f,
250 0,
251 armnn::DataLayout::NCHW);
252}
253
Matthew Jackson9bff1442019-09-12 09:08:23 +0100254LayerTestResult<float, 4> BatchNormFloat32NhwcTest(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100255 armnn::IWorkloadFactory& workloadFactory,
256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
257{
258 // BatchSize: 1
259 // Height: 3
260 // Width: 2
261 // Channels: 2
262
263 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
264 std::vector<float> inputValues
265 {
266 // Batch 0, Height 0, Width (2) x Channel (2)
267 1.f, 1.f,
268 4.f, 1.f,
269
270 // Batch 0, Height 1, Width (2) x Channel (2)
271 4.f, 4.f,
272 2.f, 1.f,
273
274 // Batch 0, Height 2, Width (2) x Channel (2)
275 1.f, -2.f,
276 6.f, 4.f
277 };
278 std::vector<float> expectedOutputValues
279 {
280 // Batch 0, Height 0, Width (2) x Channel (2)
281 1.f, 3.f,
282 4.f, 3.f,
283
284 // Batch 0, Height 1, Width (2) x Channel (2)
285 4.f, 4.f,
286 2.f, 3.f,
287
288 // Batch 0, Height 2, Width (2) x Channel (2)
289 1.f, 2.f,
290 6.f, 4.f
291 };
292
293 return BatchNormTestImpl<armnn::DataType::Float32>(
294 workloadFactory,
295 memoryManager,
296 inputOutputShape,
297 inputValues,
298 expectedOutputValues,
299 0.f,
300 0,
301 armnn::DataLayout::NHWC);
302}
303
Matthew Jackson9bff1442019-09-12 09:08:23 +0100304LayerTestResult<armnn::Half, 4> BatchNormFloat16Test(
305 armnn::IWorkloadFactory& workloadFactory,
306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
307{
308 // BatchSize: 1
309 // Channels: 2
310 // Height: 3
311 // Width: 2
312
313 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
314 std::vector<float> inputValues
315 {
316 // Batch 0, Channel 0, Height (3) x Width (2)
317 1.f, 4.f,
318 4.f, 2.f,
319 1.f, 6.f,
320
321 // Batch 0, Channel 1, Height (3) x Width (2)
322 1.f, 1.f,
323 4.f, 1.f,
324 -2.f, 4.f
325 };
326 std::vector<float> expectedOutputValues
327 {
328 // Batch 0, Channel 0, Height (3) x Width (2)
329 1.f, 4.f,
330 4.f, 2.f,
331 1.f, 6.f,
332
333 // Batch 0, Channel 1, Height (3) x Width (2)
334 3.f, 3.f,
335 4.f, 3.f,
336 2.f, 4.f
337 };
338
339 return BatchNormTestImpl<armnn::DataType::Float16>(
340 workloadFactory,
341 memoryManager,
342 inputOutputShape,
343 inputValues,
344 expectedOutputValues,
345 0.f,
346 0,
347 armnn::DataLayout::NCHW);
348}
349
350LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest(
351 armnn::IWorkloadFactory& workloadFactory,
352 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
353{
354 // BatchSize: 1
355 // Height: 3
356 // Width: 2
357 // Channels: 2
358
359 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
360 std::vector<float> inputValues
361 {
362 // Batch 0, Height 0, Width (2) x Channel (2)
363 1.f, 1.f,
364 4.f, 1.f,
365
366 // Batch 0, Height 1, Width (2) x Channel (2)
367 4.f, 4.f,
368 2.f, 1.f,
369
370 // Batch 0, Height 2, Width (2) x Channel (2)
371 1.f, -2.f,
372 6.f, 4.f
373 };
374 std::vector<float> expectedOutputValues
375 {
376 // Batch 0, Height 0, Width (2) x Channel (2)
377 1.f, 3.f,
378 4.f, 3.f,
379
380 // Batch 0, Height 1, Width (2) x Channel (2)
381 4.f, 4.f,
382 2.f, 3.f,
383
384 // Batch 0, Height 2, Width (2) x Channel (2)
385 1.f, 2.f,
386 6.f, 4.f
387 };
388
389 return BatchNormTestImpl<armnn::DataType::Float16>(
390 workloadFactory,
391 memoryManager,
392 inputOutputShape,
393 inputValues,
394 expectedOutputValues,
395 0.f,
396 0,
397 armnn::DataLayout::NHWC);
398}
399
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100400LayerTestResult<uint8_t, 4> BatchNormUint8Test(
401 armnn::IWorkloadFactory& workloadFactory,
402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
403{
404 // BatchSize: 1
405 // Channels: 2
406 // Height: 3
407 // Width: 2
408
409 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
410 std::vector<float> inputValues
411 {
412 // Batch 0, Channel 0, Height (3) x Width (2)
413 1.f, 4.f,
414 4.f, 2.f,
415 1.f, 6.f,
416
417 // Batch 0, Channel 1, Height (3) x Width (2)
418 1.f, 1.f,
419 4.f, 1.f,
420 -2.f, 4.f
421 };
422 std::vector<float> expectedOutputValues
423 {
424 // Batch 0, Channel 0, Height (3) x Width (2)
425 1.f, 4.f,
426 4.f, 2.f,
427 1.f, 6.f,
428
429 // Batch 0, Channel 1, Height (3) x Width (2)
430 3.f, 3.f,
431 4.f, 3.f,
432 2.f, 4.f
433 };
434
Derek Lambertif90c56d2020-01-10 17:14:08 +0000435 return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100436 workloadFactory,
437 memoryManager,
438 inputOutputShape,
439 inputValues,
440 expectedOutputValues,
441 1.f / 20.f,
442 50,
443 armnn::DataLayout::NCHW);
444}
445
446LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
447 armnn::IWorkloadFactory& workloadFactory,
448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
449{
450 // BatchSize: 1
451 // Height: 3
452 // Width: 2
453 // Channels: 2
454
455 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
456 std::vector<float> inputValues
457 {
458 // Batch 0, Height 0, Width (2) x Channel (2)
459 1.f, 1.f,
460 4.f, 1.f,
461
462 // Batch 0, Height 1, Width (2) x Channel (2)
463 4.f, 4.f,
464 2.f, 1.f,
465
466 // Batch 0, Height 2, Width (2) x Channel (2)
467 1.f, -2.f,
468 6.f, 4.f
469 };
470 std::vector<float> expectedOutputValues
471 {
472 // Batch 0, Height 0, Width (2) x Channel (2)
473 1.f, 3.f,
474 4.f, 3.f,
475
476 // Batch 0, Height 1, Width (2) x Channel (2)
477 4.f, 4.f,
478 2.f, 3.f,
479
480 // Batch 0, Height 2, Width (2) x Channel (2)
481 1.f, 2.f,
482 6.f, 4.f
483 };
484
Derek Lambertif90c56d2020-01-10 17:14:08 +0000485 return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100486 workloadFactory,
487 memoryManager,
488 inputOutputShape, inputValues, expectedOutputValues,
489 1.f/20.f, 50, armnn::DataLayout::NHWC);
490}
491
492LayerTestResult<int16_t, 4> BatchNormInt16Test(
493 armnn::IWorkloadFactory& workloadFactory,
494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
495{
496 // BatchSize: 1
497 // Channels: 2
498 // Height: 3
499 // Width: 2
500
501 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
502 std::vector<float> inputValues
503 {
504 // Batch 0, Channel 0, Height (3) x Width (2)
505 1.f, 4.f,
506 4.f, 2.f,
507 1.f, 6.f,
508
509 // Batch 0, Channel 1, Height (3) x Width (2)
510 1.f, 1.f,
511 4.f, 1.f,
512 -2.f, 4.f
513 };
514 std::vector<float> expectedOutputValues
515 {
516 // Batch 0, Channel 0, Height (3) x Width (2)
517 1.f, 4.f,
518 4.f, 2.f,
519 1.f, 6.f,
520
521 // Batch 0, Channel 1, Height (3) x Width (2)
522 3.f, 3.f,
523 4.f, 3.f,
524 2.f, 4.f
525 };
526
Derek Lambertif90c56d2020-01-10 17:14:08 +0000527 return BatchNormTestImpl<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100528 workloadFactory,
529 memoryManager,
530 inputOutputShape,
531 inputValues,
532 expectedOutputValues,
533 1.f / 20.f,
534 50,
535 armnn::DataLayout::NCHW);
536}
537
538LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
539 armnn::IWorkloadFactory& workloadFactory,
540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
541{
542 // BatchSize: 1
543 // Height: 3
544 // Width: 2
545 // Channels: 2
546
547 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
548 std::vector<float> inputValues
549 {
550 // Batch 0, Height 0, Width (2) x Channel (2)
551 1.f, 1.f,
552 4.f, 1.f,
553
554 // Batch 0, Height 1, Width (2) x Channel (2)
555 4.f, 4.f,
556 2.f, 1.f,
557
558 // Batch 0, Height 2, Width (2) x Channel (2)
559 1.f, -2.f,
560 6.f, 4.f
561 };
562 std::vector<float> expectedOutputValues
563 {
564 // Batch 0, Height 0, Width (2) x Channel (2)
565 1.f, 3.f,
566 4.f, 3.f,
567
568 // Batch 0, Height 1, Width (2) x Channel (2)
569 4.f, 4.f,
570 2.f, 3.f,
571
572 // Batch 0, Height 2, Width (2) x Channel (2)
573 1.f, 2.f,
574 6.f, 4.f
575 };
576
Derek Lambertif90c56d2020-01-10 17:14:08 +0000577 return BatchNormTestImpl<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100578 workloadFactory,
579 memoryManager,
580 inputOutputShape,
581 inputValues,
582 expectedOutputValues,
583 1.f / 20.f,
584 50,
585 armnn::DataLayout::NHWC);
586}
587
588LayerTestResult<float,4> CompareBatchNormTest(
589 armnn::IWorkloadFactory& workloadFactory,
590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
591 armnn::IWorkloadFactory& refWorkloadFactory)
592{
Derek Lambertic374ff02019-12-10 21:57:35 +0000593 boost::ignore_unused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100594 const unsigned int width = 2;
595 const unsigned int height = 3;
596 const unsigned int channels = 5;
597 const unsigned int batchSize = 3;
598
599 armnn::TensorInfo inputTensorInfo;
600 armnn::TensorInfo outputTensorInfo;
601 armnn::TensorInfo tensorInfo;
602
603 constexpr unsigned int shape[] = {batchSize, channels, height, width};
604 constexpr unsigned int tensorShape[] = {channels};
605
606 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
607 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
608 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
609
610 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
611
612 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
613 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
614 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
615 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
616
617 LayerTestResult<float,4> ret(outputTensorInfo);
618
619 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
620 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
621
622 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
623 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
624
625 armnn::BatchNormalizationQueueDescriptor data;
626 armnn::WorkloadInfo info;
627 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
628 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
629 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
630 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
631
632 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
633 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
634 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
635 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
636
637 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
638 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
639 data.m_Mean = &meanTensor;
640 data.m_Variance = &varianceTensor;
641 data.m_Beta = &betaTensor;
642 data.m_Gamma = &gammaTensor;
643 data.m_Parameters.m_Eps = 0.01f;
644
645 armnn::BatchNormalizationQueueDescriptor refData = data;
646 armnn::WorkloadInfo refInfo = info;
647 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
648 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
649
650 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
651 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
652
653 inputHandle->Allocate();
654 outputHandle->Allocate();
655 inputHandleRef->Allocate();
656 outputHandleRef->Allocate();
657
658 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
659 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
660
661 workload->PostAllocationConfigure();
662 workload->Execute();
663 workloadRef->PostAllocationConfigure();
664 workloadRef->Execute();
665
666 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
667 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
668
669 return ret;
670}