blob: 48f7257a2e4b8b1797588d8e6769ea4ef6e79989 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "BatchNormalizationTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
10
Jan Eilers8eb25602020-03-09 12:13:48 +000011#include <armnn/utility/IgnoreUnused.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000012#include <armnnUtils/DataLayoutIndexed.hpp>
13
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010014#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000015#include <armnn/backends/IBackendInternal.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010016#include <backendsCommon/WorkloadFactory.hpp>
17
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010018#include <backendsCommon/test/TensorCopyUtils.hpp>
19#include <backendsCommon/test/WorkloadTestUtils.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23namespace
24{
25
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010026using namespace armnnUtils;
27
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
29LayerTestResult<T, 4> BatchNormTestImpl(
30 armnn::IWorkloadFactory& workloadFactory,
31 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
32 const armnn::TensorShape& inputOutputTensorShape,
33 const std::vector<float>& inputValues,
34 const std::vector<float>& expectedOutputValues,
35 float qScale,
36 int32_t qOffset,
37 armnn::DataLayout dataLayout)
38{
Jan Eilers8eb25602020-03-09 12:13:48 +000039 IgnoreUnused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010040 armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
41 armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
42
43 armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
44
45 armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
46 ArmnnType);
47
48 // Set quantization parameters if the requested type is a quantized type.
49 if (armnn::IsQuantizedType<T>())
50 {
51 inputTensorInfo.SetQuantizationScale(qScale);
52 inputTensorInfo.SetQuantizationOffset(qOffset);
53 outputTensorInfo.SetQuantizationScale(qScale);
54 outputTensorInfo.SetQuantizationOffset(qOffset);
55 tensorInfo.SetQuantizationScale(qScale);
56 tensorInfo.SetQuantizationOffset(qOffset);
57 }
58
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010059 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010060
61 // These values are per-channel of the input.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010062 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
63 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
64 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
65 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010066
67 LayerTestResult<T, 4> result(outputTensorInfo);
68
69 result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010070 QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010071
72 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
73 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
74
75 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
76 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
77 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
78 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
79
80 armnn::BatchNormalizationQueueDescriptor descriptor;
81 descriptor.m_Mean = &meanTensor;
82 descriptor.m_Variance = &varianceTensor;
83 descriptor.m_Beta = &betaTensor;
84 descriptor.m_Gamma = &gammaTensor;
85 descriptor.m_Parameters.m_Eps = 0.0f;
86 descriptor.m_Parameters.m_DataLayout = dataLayout;
87 armnn::WorkloadInfo info;
88
89 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
90 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
91 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
92 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
93
94 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
95 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
96
97 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
98
99 inputHandle->Allocate();
100 outputHandle->Allocate();
101
102 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
103
104 workload->Execute();
105
106 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
107
108 return result;
109}
110
111template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
112LayerTestResult<T,4> BatchNormTestNhwcImpl(
113 armnn::IWorkloadFactory& workloadFactory,
114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
115 float qScale,
116 int32_t qOffset)
117{
Jan Eilers8eb25602020-03-09 12:13:48 +0000118 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +0000119
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100120 const unsigned int width = 2;
121 const unsigned int height = 3;
122 const unsigned int channels = 2;
123 const unsigned int num = 1;
124
125 armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
126 armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
127 armnn::TensorInfo tensorInfo({channels}, ArmnnType);
128
129 // Set quantization parameters if the requested type is a quantized type.
130 if(armnn::IsQuantizedType<T>())
131 {
132 inputTensorInfo.SetQuantizationScale(qScale);
133 inputTensorInfo.SetQuantizationOffset(qOffset);
134 outputTensorInfo.SetQuantizationScale(qScale);
135 outputTensorInfo.SetQuantizationOffset(qOffset);
136 tensorInfo.SetQuantizationScale(qScale);
137 tensorInfo.SetQuantizationOffset(qOffset);
138 }
139
140 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100141 QuantizedVector<T>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100142 {
143 1.f, 1.f, 4.f, 1.f,
144 4.f, 4.f, 2.f, 1.f,
145 1.f, -2.f, 6.f, 4.f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100146 },
147 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100148 // These values are per-channel of the input.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100149 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
150 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
151 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
152 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100153 LayerTestResult<T,4> ret(outputTensorInfo);
154
155 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
156 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
157
158 armnn::BatchNormalizationQueueDescriptor data;
159 armnn::WorkloadInfo info;
160 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
161 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
162 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
163 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
164
165 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
166 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
167 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
168 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
169
170 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
171 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
172 data.m_Mean = &meanTensor;
173 data.m_Variance = &varianceTensor;
174 data.m_Beta = &betaTensor;
175 data.m_Gamma = &gammaTensor;
176 data.m_Parameters.m_Eps = 0.0f;
177 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
178
179 // For each channel:
180 // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
181 // multiply by gamma and add beta
182 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100183 QuantizedVector<T>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100184 {
185 1.f, 3.f, 4.f, 3.f,
186 4.f, 4.f, 2.f, 3.f,
187 1.f, 2.f, 6.f, 4.f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100188 },
189 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100190
191 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
192
193 inputHandle->Allocate();
194 outputHandle->Allocate();
195
196 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
197
198 workload->Execute();
199
200 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
201
202 return ret;
203}
204
205} // anonymous namespace
206
Matthew Jackson9bff1442019-09-12 09:08:23 +0100207LayerTestResult<float, 4> BatchNormFloat32Test(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100208 armnn::IWorkloadFactory& workloadFactory,
209 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
210{
211 // BatchSize: 1
212 // Channels: 2
213 // Height: 3
214 // Width: 2
215
216 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
217 std::vector<float> inputValues
218 {
219 // Batch 0, Channel 0, Height (3) x Width (2)
220 1.f, 4.f,
221 4.f, 2.f,
222 1.f, 6.f,
223
224 // Batch 0, Channel 1, Height (3) x Width (2)
225 1.f, 1.f,
226 4.f, 1.f,
227 -2.f, 4.f
228 };
229 std::vector<float> expectedOutputValues
230 {
231 // Batch 0, Channel 0, Height (3) x Width (2)
232 1.f, 4.f,
233 4.f, 2.f,
234 1.f, 6.f,
235
236 // Batch 0, Channel 1, Height (3) x Width (2)
237 3.f, 3.f,
238 4.f, 3.f,
239 2.f, 4.f
240 };
241
242 return BatchNormTestImpl<armnn::DataType::Float32>(
243 workloadFactory,
244 memoryManager,
245 inputOutputShape,
246 inputValues,
247 expectedOutputValues,
248 0.f,
249 0,
250 armnn::DataLayout::NCHW);
251}
252
Matthew Jackson9bff1442019-09-12 09:08:23 +0100253LayerTestResult<float, 4> BatchNormFloat32NhwcTest(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100254 armnn::IWorkloadFactory& workloadFactory,
255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
256{
257 // BatchSize: 1
258 // Height: 3
259 // Width: 2
260 // Channels: 2
261
262 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
263 std::vector<float> inputValues
264 {
265 // Batch 0, Height 0, Width (2) x Channel (2)
266 1.f, 1.f,
267 4.f, 1.f,
268
269 // Batch 0, Height 1, Width (2) x Channel (2)
270 4.f, 4.f,
271 2.f, 1.f,
272
273 // Batch 0, Height 2, Width (2) x Channel (2)
274 1.f, -2.f,
275 6.f, 4.f
276 };
277 std::vector<float> expectedOutputValues
278 {
279 // Batch 0, Height 0, Width (2) x Channel (2)
280 1.f, 3.f,
281 4.f, 3.f,
282
283 // Batch 0, Height 1, Width (2) x Channel (2)
284 4.f, 4.f,
285 2.f, 3.f,
286
287 // Batch 0, Height 2, Width (2) x Channel (2)
288 1.f, 2.f,
289 6.f, 4.f
290 };
291
292 return BatchNormTestImpl<armnn::DataType::Float32>(
293 workloadFactory,
294 memoryManager,
295 inputOutputShape,
296 inputValues,
297 expectedOutputValues,
298 0.f,
299 0,
300 armnn::DataLayout::NHWC);
301}
302
Matthew Jackson9bff1442019-09-12 09:08:23 +0100303LayerTestResult<armnn::Half, 4> BatchNormFloat16Test(
304 armnn::IWorkloadFactory& workloadFactory,
305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
306{
307 // BatchSize: 1
308 // Channels: 2
309 // Height: 3
310 // Width: 2
311
312 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
313 std::vector<float> inputValues
314 {
315 // Batch 0, Channel 0, Height (3) x Width (2)
316 1.f, 4.f,
317 4.f, 2.f,
318 1.f, 6.f,
319
320 // Batch 0, Channel 1, Height (3) x Width (2)
321 1.f, 1.f,
322 4.f, 1.f,
323 -2.f, 4.f
324 };
325 std::vector<float> expectedOutputValues
326 {
327 // Batch 0, Channel 0, Height (3) x Width (2)
328 1.f, 4.f,
329 4.f, 2.f,
330 1.f, 6.f,
331
332 // Batch 0, Channel 1, Height (3) x Width (2)
333 3.f, 3.f,
334 4.f, 3.f,
335 2.f, 4.f
336 };
337
338 return BatchNormTestImpl<armnn::DataType::Float16>(
339 workloadFactory,
340 memoryManager,
341 inputOutputShape,
342 inputValues,
343 expectedOutputValues,
344 0.f,
345 0,
346 armnn::DataLayout::NCHW);
347}
348
349LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest(
350 armnn::IWorkloadFactory& workloadFactory,
351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
352{
353 // BatchSize: 1
354 // Height: 3
355 // Width: 2
356 // Channels: 2
357
358 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
359 std::vector<float> inputValues
360 {
361 // Batch 0, Height 0, Width (2) x Channel (2)
362 1.f, 1.f,
363 4.f, 1.f,
364
365 // Batch 0, Height 1, Width (2) x Channel (2)
366 4.f, 4.f,
367 2.f, 1.f,
368
369 // Batch 0, Height 2, Width (2) x Channel (2)
370 1.f, -2.f,
371 6.f, 4.f
372 };
373 std::vector<float> expectedOutputValues
374 {
375 // Batch 0, Height 0, Width (2) x Channel (2)
376 1.f, 3.f,
377 4.f, 3.f,
378
379 // Batch 0, Height 1, Width (2) x Channel (2)
380 4.f, 4.f,
381 2.f, 3.f,
382
383 // Batch 0, Height 2, Width (2) x Channel (2)
384 1.f, 2.f,
385 6.f, 4.f
386 };
387
388 return BatchNormTestImpl<armnn::DataType::Float16>(
389 workloadFactory,
390 memoryManager,
391 inputOutputShape,
392 inputValues,
393 expectedOutputValues,
394 0.f,
395 0,
396 armnn::DataLayout::NHWC);
397}
398
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100399LayerTestResult<uint8_t, 4> BatchNormUint8Test(
400 armnn::IWorkloadFactory& workloadFactory,
401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
402{
403 // BatchSize: 1
404 // Channels: 2
405 // Height: 3
406 // Width: 2
407
408 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
409 std::vector<float> inputValues
410 {
411 // Batch 0, Channel 0, Height (3) x Width (2)
412 1.f, 4.f,
413 4.f, 2.f,
414 1.f, 6.f,
415
416 // Batch 0, Channel 1, Height (3) x Width (2)
417 1.f, 1.f,
418 4.f, 1.f,
419 -2.f, 4.f
420 };
421 std::vector<float> expectedOutputValues
422 {
423 // Batch 0, Channel 0, Height (3) x Width (2)
424 1.f, 4.f,
425 4.f, 2.f,
426 1.f, 6.f,
427
428 // Batch 0, Channel 1, Height (3) x Width (2)
429 3.f, 3.f,
430 4.f, 3.f,
431 2.f, 4.f
432 };
433
Derek Lambertif90c56d2020-01-10 17:14:08 +0000434 return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100435 workloadFactory,
436 memoryManager,
437 inputOutputShape,
438 inputValues,
439 expectedOutputValues,
440 1.f / 20.f,
441 50,
442 armnn::DataLayout::NCHW);
443}
444
445LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
446 armnn::IWorkloadFactory& workloadFactory,
447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
448{
449 // BatchSize: 1
450 // Height: 3
451 // Width: 2
452 // Channels: 2
453
454 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
455 std::vector<float> inputValues
456 {
457 // Batch 0, Height 0, Width (2) x Channel (2)
458 1.f, 1.f,
459 4.f, 1.f,
460
461 // Batch 0, Height 1, Width (2) x Channel (2)
462 4.f, 4.f,
463 2.f, 1.f,
464
465 // Batch 0, Height 2, Width (2) x Channel (2)
466 1.f, -2.f,
467 6.f, 4.f
468 };
469 std::vector<float> expectedOutputValues
470 {
471 // Batch 0, Height 0, Width (2) x Channel (2)
472 1.f, 3.f,
473 4.f, 3.f,
474
475 // Batch 0, Height 1, Width (2) x Channel (2)
476 4.f, 4.f,
477 2.f, 3.f,
478
479 // Batch 0, Height 2, Width (2) x Channel (2)
480 1.f, 2.f,
481 6.f, 4.f
482 };
483
Derek Lambertif90c56d2020-01-10 17:14:08 +0000484 return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100485 workloadFactory,
486 memoryManager,
487 inputOutputShape, inputValues, expectedOutputValues,
488 1.f/20.f, 50, armnn::DataLayout::NHWC);
489}
490
491LayerTestResult<int16_t, 4> BatchNormInt16Test(
492 armnn::IWorkloadFactory& workloadFactory,
493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
494{
495 // BatchSize: 1
496 // Channels: 2
497 // Height: 3
498 // Width: 2
499
500 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
501 std::vector<float> inputValues
502 {
503 // Batch 0, Channel 0, Height (3) x Width (2)
504 1.f, 4.f,
505 4.f, 2.f,
506 1.f, 6.f,
507
508 // Batch 0, Channel 1, Height (3) x Width (2)
509 1.f, 1.f,
510 4.f, 1.f,
511 -2.f, 4.f
512 };
513 std::vector<float> expectedOutputValues
514 {
515 // Batch 0, Channel 0, Height (3) x Width (2)
516 1.f, 4.f,
517 4.f, 2.f,
518 1.f, 6.f,
519
520 // Batch 0, Channel 1, Height (3) x Width (2)
521 3.f, 3.f,
522 4.f, 3.f,
523 2.f, 4.f
524 };
525
Derek Lambertif90c56d2020-01-10 17:14:08 +0000526 return BatchNormTestImpl<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100527 workloadFactory,
528 memoryManager,
529 inputOutputShape,
530 inputValues,
531 expectedOutputValues,
532 1.f / 20.f,
533 50,
534 armnn::DataLayout::NCHW);
535}
536
537LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
538 armnn::IWorkloadFactory& workloadFactory,
539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
540{
541 // BatchSize: 1
542 // Height: 3
543 // Width: 2
544 // Channels: 2
545
546 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
547 std::vector<float> inputValues
548 {
549 // Batch 0, Height 0, Width (2) x Channel (2)
550 1.f, 1.f,
551 4.f, 1.f,
552
553 // Batch 0, Height 1, Width (2) x Channel (2)
554 4.f, 4.f,
555 2.f, 1.f,
556
557 // Batch 0, Height 2, Width (2) x Channel (2)
558 1.f, -2.f,
559 6.f, 4.f
560 };
561 std::vector<float> expectedOutputValues
562 {
563 // Batch 0, Height 0, Width (2) x Channel (2)
564 1.f, 3.f,
565 4.f, 3.f,
566
567 // Batch 0, Height 1, Width (2) x Channel (2)
568 4.f, 4.f,
569 2.f, 3.f,
570
571 // Batch 0, Height 2, Width (2) x Channel (2)
572 1.f, 2.f,
573 6.f, 4.f
574 };
575
Derek Lambertif90c56d2020-01-10 17:14:08 +0000576 return BatchNormTestImpl<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100577 workloadFactory,
578 memoryManager,
579 inputOutputShape,
580 inputValues,
581 expectedOutputValues,
582 1.f / 20.f,
583 50,
584 armnn::DataLayout::NHWC);
585}
586
587LayerTestResult<float,4> CompareBatchNormTest(
588 armnn::IWorkloadFactory& workloadFactory,
589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
590 armnn::IWorkloadFactory& refWorkloadFactory)
591{
Jan Eilers8eb25602020-03-09 12:13:48 +0000592 IgnoreUnused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100593 const unsigned int width = 2;
594 const unsigned int height = 3;
595 const unsigned int channels = 5;
596 const unsigned int batchSize = 3;
597
598 armnn::TensorInfo inputTensorInfo;
599 armnn::TensorInfo outputTensorInfo;
600 armnn::TensorInfo tensorInfo;
601
602 constexpr unsigned int shape[] = {batchSize, channels, height, width};
603 constexpr unsigned int tensorShape[] = {channels};
604
605 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
606 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
607 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
608
609 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
610
611 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
612 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
613 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
614 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
615
616 LayerTestResult<float,4> ret(outputTensorInfo);
617
618 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
619 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
620
621 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
622 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
623
624 armnn::BatchNormalizationQueueDescriptor data;
625 armnn::WorkloadInfo info;
626 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
627 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
628 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
629 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
630
631 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
632 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
633 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
634 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
635
636 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
637 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
638 data.m_Mean = &meanTensor;
639 data.m_Variance = &varianceTensor;
640 data.m_Beta = &betaTensor;
641 data.m_Gamma = &gammaTensor;
642 data.m_Parameters.m_Eps = 0.01f;
643
644 armnn::BatchNormalizationQueueDescriptor refData = data;
645 armnn::WorkloadInfo refInfo = info;
646 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
647 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
648
649 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
650 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
651
652 inputHandle->Allocate();
653 outputHandle->Allocate();
654 inputHandleRef->Allocate();
655 outputHandleRef->Allocate();
656
657 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
658 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
659
660 workload->PostAllocationConfigure();
661 workload->Execute();
662 workloadRef->PostAllocationConfigure();
663 workloadRef->Execute();
664
665 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
666 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
667
668 return ret;
669}