blob: d8f87e15de35e37a7c29f1df56bfb5907cbe2144 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "BatchNormalizationTestImpl.hpp"
7
8#include <DataLayoutIndexed.hpp>
9#include <ResolveType.hpp>
10
11#include <armnn/ArmNN.hpp>
12
13#include <backendsCommon/CpuTensorHandle.hpp>
14#include <backendsCommon/IBackendInternal.hpp>
15#include <backendsCommon/WorkloadFactory.hpp>
16
17#include <backendsCommon/test/QuantizeHelper.hpp>
18#include <backendsCommon/test/TensorCopyUtils.hpp>
19#include <backendsCommon/test/WorkloadTestUtils.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23namespace
24{
25
26template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
27LayerTestResult<T, 4> BatchNormTestImpl(
28 armnn::IWorkloadFactory& workloadFactory,
29 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
30 const armnn::TensorShape& inputOutputTensorShape,
31 const std::vector<float>& inputValues,
32 const std::vector<float>& expectedOutputValues,
33 float qScale,
34 int32_t qOffset,
35 armnn::DataLayout dataLayout)
36{
37 armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
38 armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
39
40 armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
41
42 armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
43 ArmnnType);
44
45 // Set quantization parameters if the requested type is a quantized type.
46 if (armnn::IsQuantizedType<T>())
47 {
48 inputTensorInfo.SetQuantizationScale(qScale);
49 inputTensorInfo.SetQuantizationOffset(qOffset);
50 outputTensorInfo.SetQuantizationScale(qScale);
51 outputTensorInfo.SetQuantizationOffset(qOffset);
52 tensorInfo.SetQuantizationScale(qScale);
53 tensorInfo.SetQuantizationOffset(qOffset);
54 }
55
56 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
57 QuantizedVector<T>(qScale, qOffset, inputValues));
58
59 // These values are per-channel of the input.
60 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
61 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
62 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
63 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
64
65 LayerTestResult<T, 4> result(outputTensorInfo);
66
67 result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
68 QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
69
70 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
71 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
72
73 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
74 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
75 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
76 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
77
78 armnn::BatchNormalizationQueueDescriptor descriptor;
79 descriptor.m_Mean = &meanTensor;
80 descriptor.m_Variance = &varianceTensor;
81 descriptor.m_Beta = &betaTensor;
82 descriptor.m_Gamma = &gammaTensor;
83 descriptor.m_Parameters.m_Eps = 0.0f;
84 descriptor.m_Parameters.m_DataLayout = dataLayout;
85 armnn::WorkloadInfo info;
86
87 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
88 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
89 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
90 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
91
92 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
93 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
94
95 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
96
97 inputHandle->Allocate();
98 outputHandle->Allocate();
99
100 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
101
102 workload->Execute();
103
104 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
105
106 return result;
107}
108
109template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
110LayerTestResult<T,4> BatchNormTestNhwcImpl(
111 armnn::IWorkloadFactory& workloadFactory,
112 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
113 float qScale,
114 int32_t qOffset)
115{
116 const unsigned int width = 2;
117 const unsigned int height = 3;
118 const unsigned int channels = 2;
119 const unsigned int num = 1;
120
121 armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
122 armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
123 armnn::TensorInfo tensorInfo({channels}, ArmnnType);
124
125 // Set quantization parameters if the requested type is a quantized type.
126 if(armnn::IsQuantizedType<T>())
127 {
128 inputTensorInfo.SetQuantizationScale(qScale);
129 inputTensorInfo.SetQuantizationOffset(qOffset);
130 outputTensorInfo.SetQuantizationScale(qScale);
131 outputTensorInfo.SetQuantizationOffset(qOffset);
132 tensorInfo.SetQuantizationScale(qScale);
133 tensorInfo.SetQuantizationOffset(qOffset);
134 }
135
136 auto input = MakeTensor<T, 4>(inputTensorInfo,
137 QuantizedVector<T>(qScale, qOffset,
138 {
139 1.f, 1.f, 4.f, 1.f,
140 4.f, 4.f, 2.f, 1.f,
141 1.f, -2.f, 6.f, 4.f
142 }));
143 // These values are per-channel of the input.
144 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
145 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
146 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
147 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
148 LayerTestResult<T,4> ret(outputTensorInfo);
149
150 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
151 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
152
153 armnn::BatchNormalizationQueueDescriptor data;
154 armnn::WorkloadInfo info;
155 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
156 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
157 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
158 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
159
160 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
161 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
162 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
163 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
164
165 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
166 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
167 data.m_Mean = &meanTensor;
168 data.m_Variance = &varianceTensor;
169 data.m_Beta = &betaTensor;
170 data.m_Gamma = &gammaTensor;
171 data.m_Parameters.m_Eps = 0.0f;
172 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
173
174 // For each channel:
175 // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
176 // multiply by gamma and add beta
177 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
178 QuantizedVector<T>(qScale, qOffset,
179 {
180 1.f, 3.f, 4.f, 3.f,
181 4.f, 4.f, 2.f, 3.f,
182 1.f, 2.f, 6.f, 4.f
183 }));
184
185 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
186
187 inputHandle->Allocate();
188 outputHandle->Allocate();
189
190 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
191
192 workload->Execute();
193
194 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
195
196 return ret;
197}
198
199} // anonymous namespace
200
201LayerTestResult<float, 4> BatchNormFloatTest(
202 armnn::IWorkloadFactory& workloadFactory,
203 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
204{
205 // BatchSize: 1
206 // Channels: 2
207 // Height: 3
208 // Width: 2
209
210 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
211 std::vector<float> inputValues
212 {
213 // Batch 0, Channel 0, Height (3) x Width (2)
214 1.f, 4.f,
215 4.f, 2.f,
216 1.f, 6.f,
217
218 // Batch 0, Channel 1, Height (3) x Width (2)
219 1.f, 1.f,
220 4.f, 1.f,
221 -2.f, 4.f
222 };
223 std::vector<float> expectedOutputValues
224 {
225 // Batch 0, Channel 0, Height (3) x Width (2)
226 1.f, 4.f,
227 4.f, 2.f,
228 1.f, 6.f,
229
230 // Batch 0, Channel 1, Height (3) x Width (2)
231 3.f, 3.f,
232 4.f, 3.f,
233 2.f, 4.f
234 };
235
236 return BatchNormTestImpl<armnn::DataType::Float32>(
237 workloadFactory,
238 memoryManager,
239 inputOutputShape,
240 inputValues,
241 expectedOutputValues,
242 0.f,
243 0,
244 armnn::DataLayout::NCHW);
245}
246
247LayerTestResult<float, 4> BatchNormFloatNhwcTest(
248 armnn::IWorkloadFactory& workloadFactory,
249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
250{
251 // BatchSize: 1
252 // Height: 3
253 // Width: 2
254 // Channels: 2
255
256 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
257 std::vector<float> inputValues
258 {
259 // Batch 0, Height 0, Width (2) x Channel (2)
260 1.f, 1.f,
261 4.f, 1.f,
262
263 // Batch 0, Height 1, Width (2) x Channel (2)
264 4.f, 4.f,
265 2.f, 1.f,
266
267 // Batch 0, Height 2, Width (2) x Channel (2)
268 1.f, -2.f,
269 6.f, 4.f
270 };
271 std::vector<float> expectedOutputValues
272 {
273 // Batch 0, Height 0, Width (2) x Channel (2)
274 1.f, 3.f,
275 4.f, 3.f,
276
277 // Batch 0, Height 1, Width (2) x Channel (2)
278 4.f, 4.f,
279 2.f, 3.f,
280
281 // Batch 0, Height 2, Width (2) x Channel (2)
282 1.f, 2.f,
283 6.f, 4.f
284 };
285
286 return BatchNormTestImpl<armnn::DataType::Float32>(
287 workloadFactory,
288 memoryManager,
289 inputOutputShape,
290 inputValues,
291 expectedOutputValues,
292 0.f,
293 0,
294 armnn::DataLayout::NHWC);
295}
296
297LayerTestResult<uint8_t, 4> BatchNormUint8Test(
298 armnn::IWorkloadFactory& workloadFactory,
299 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
300{
301 // BatchSize: 1
302 // Channels: 2
303 // Height: 3
304 // Width: 2
305
306 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
307 std::vector<float> inputValues
308 {
309 // Batch 0, Channel 0, Height (3) x Width (2)
310 1.f, 4.f,
311 4.f, 2.f,
312 1.f, 6.f,
313
314 // Batch 0, Channel 1, Height (3) x Width (2)
315 1.f, 1.f,
316 4.f, 1.f,
317 -2.f, 4.f
318 };
319 std::vector<float> expectedOutputValues
320 {
321 // Batch 0, Channel 0, Height (3) x Width (2)
322 1.f, 4.f,
323 4.f, 2.f,
324 1.f, 6.f,
325
326 // Batch 0, Channel 1, Height (3) x Width (2)
327 3.f, 3.f,
328 4.f, 3.f,
329 2.f, 4.f
330 };
331
332 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
333 workloadFactory,
334 memoryManager,
335 inputOutputShape,
336 inputValues,
337 expectedOutputValues,
338 1.f / 20.f,
339 50,
340 armnn::DataLayout::NCHW);
341}
342
343LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
344 armnn::IWorkloadFactory& workloadFactory,
345 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
346{
347 // BatchSize: 1
348 // Height: 3
349 // Width: 2
350 // Channels: 2
351
352 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
353 std::vector<float> inputValues
354 {
355 // Batch 0, Height 0, Width (2) x Channel (2)
356 1.f, 1.f,
357 4.f, 1.f,
358
359 // Batch 0, Height 1, Width (2) x Channel (2)
360 4.f, 4.f,
361 2.f, 1.f,
362
363 // Batch 0, Height 2, Width (2) x Channel (2)
364 1.f, -2.f,
365 6.f, 4.f
366 };
367 std::vector<float> expectedOutputValues
368 {
369 // Batch 0, Height 0, Width (2) x Channel (2)
370 1.f, 3.f,
371 4.f, 3.f,
372
373 // Batch 0, Height 1, Width (2) x Channel (2)
374 4.f, 4.f,
375 2.f, 3.f,
376
377 // Batch 0, Height 2, Width (2) x Channel (2)
378 1.f, 2.f,
379 6.f, 4.f
380 };
381
382 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
383 workloadFactory,
384 memoryManager,
385 inputOutputShape, inputValues, expectedOutputValues,
386 1.f/20.f, 50, armnn::DataLayout::NHWC);
387}
388
389LayerTestResult<int16_t, 4> BatchNormInt16Test(
390 armnn::IWorkloadFactory& workloadFactory,
391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
392{
393 // BatchSize: 1
394 // Channels: 2
395 // Height: 3
396 // Width: 2
397
398 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
399 std::vector<float> inputValues
400 {
401 // Batch 0, Channel 0, Height (3) x Width (2)
402 1.f, 4.f,
403 4.f, 2.f,
404 1.f, 6.f,
405
406 // Batch 0, Channel 1, Height (3) x Width (2)
407 1.f, 1.f,
408 4.f, 1.f,
409 -2.f, 4.f
410 };
411 std::vector<float> expectedOutputValues
412 {
413 // Batch 0, Channel 0, Height (3) x Width (2)
414 1.f, 4.f,
415 4.f, 2.f,
416 1.f, 6.f,
417
418 // Batch 0, Channel 1, Height (3) x Width (2)
419 3.f, 3.f,
420 4.f, 3.f,
421 2.f, 4.f
422 };
423
424 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
425 workloadFactory,
426 memoryManager,
427 inputOutputShape,
428 inputValues,
429 expectedOutputValues,
430 1.f / 20.f,
431 50,
432 armnn::DataLayout::NCHW);
433}
434
435LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
436 armnn::IWorkloadFactory& workloadFactory,
437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
438{
439 // BatchSize: 1
440 // Height: 3
441 // Width: 2
442 // Channels: 2
443
444 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
445 std::vector<float> inputValues
446 {
447 // Batch 0, Height 0, Width (2) x Channel (2)
448 1.f, 1.f,
449 4.f, 1.f,
450
451 // Batch 0, Height 1, Width (2) x Channel (2)
452 4.f, 4.f,
453 2.f, 1.f,
454
455 // Batch 0, Height 2, Width (2) x Channel (2)
456 1.f, -2.f,
457 6.f, 4.f
458 };
459 std::vector<float> expectedOutputValues
460 {
461 // Batch 0, Height 0, Width (2) x Channel (2)
462 1.f, 3.f,
463 4.f, 3.f,
464
465 // Batch 0, Height 1, Width (2) x Channel (2)
466 4.f, 4.f,
467 2.f, 3.f,
468
469 // Batch 0, Height 2, Width (2) x Channel (2)
470 1.f, 2.f,
471 6.f, 4.f
472 };
473
474 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
475 workloadFactory,
476 memoryManager,
477 inputOutputShape,
478 inputValues,
479 expectedOutputValues,
480 1.f / 20.f,
481 50,
482 armnn::DataLayout::NHWC);
483}
484
485LayerTestResult<float,4> CompareBatchNormTest(
486 armnn::IWorkloadFactory& workloadFactory,
487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
488 armnn::IWorkloadFactory& refWorkloadFactory)
489{
490 const unsigned int width = 2;
491 const unsigned int height = 3;
492 const unsigned int channels = 5;
493 const unsigned int batchSize = 3;
494
495 armnn::TensorInfo inputTensorInfo;
496 armnn::TensorInfo outputTensorInfo;
497 armnn::TensorInfo tensorInfo;
498
499 constexpr unsigned int shape[] = {batchSize, channels, height, width};
500 constexpr unsigned int tensorShape[] = {channels};
501
502 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
503 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
504 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
505
506 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
507
508 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
509 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
510 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
511 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
512
513 LayerTestResult<float,4> ret(outputTensorInfo);
514
515 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
516 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
517
518 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
519 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
520
521 armnn::BatchNormalizationQueueDescriptor data;
522 armnn::WorkloadInfo info;
523 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
524 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
525 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
526 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
527
528 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
529 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
530 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
531 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
532
533 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
534 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
535 data.m_Mean = &meanTensor;
536 data.m_Variance = &varianceTensor;
537 data.m_Beta = &betaTensor;
538 data.m_Gamma = &gammaTensor;
539 data.m_Parameters.m_Eps = 0.01f;
540
541 armnn::BatchNormalizationQueueDescriptor refData = data;
542 armnn::WorkloadInfo refInfo = info;
543 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
544 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
545
546 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
547 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
548
549 inputHandle->Allocate();
550 outputHandle->Allocate();
551 inputHandleRef->Allocate();
552 outputHandleRef->Allocate();
553
554 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
555 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
556
557 workload->PostAllocationConfigure();
558 workload->Execute();
559 workloadRef->PostAllocationConfigure();
560 workloadRef->Execute();
561
562 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
563 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
564
565 return ret;
566}