blob: 67282ed819a394c6ad4c54b529d704d3ac80a6a4 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Tensor.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <backendsCommon/CpuTensorHandle.hpp>
11#include <backendsCommon/WorkloadFactory.hpp>
12#include <backendsCommon/test/QuantizeHelper.hpp>
13
David Beckac42efd2018-09-26 17:41:13 +010014#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
telsoa014fcda012018-03-09 14:13:49 +000016template<typename T>
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010017LayerTestResult<T, 4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
18 const armnn::TensorShape& inputOutputTensorShape,
19 const std::vector<float>& inputValues,
20 const std::vector<float>& expectedOutputValues,
21 float qScale,
22 int32_t qOffset,
23 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +000024{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010025 armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
26 armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
telsoa014fcda012018-03-09 14:13:49 +000027
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010028 armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout);
29
30 armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
31 armnn::GetDataType<T>());
telsoa014fcda012018-03-09 14:13:49 +000032
33 // Set quantization parameters if the requested type is a quantized type.
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010034 if (armnn::IsQuantizedType<T>())
telsoa014fcda012018-03-09 14:13:49 +000035 {
36 inputTensorInfo.SetQuantizationScale(qScale);
37 inputTensorInfo.SetQuantizationOffset(qOffset);
38 outputTensorInfo.SetQuantizationScale(qScale);
39 outputTensorInfo.SetQuantizationOffset(qOffset);
40 tensorInfo.SetQuantizationScale(qScale);
41 tensorInfo.SetQuantizationOffset(qOffset);
42 }
43
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010044 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
45 QuantizedVector<T>(qScale, qOffset, inputValues));
telsoa014fcda012018-03-09 14:13:49 +000046
telsoa01c577f2c2018-08-31 09:22:23 +010047 // These values are per-channel of the input.
telsoa014fcda012018-03-09 14:13:49 +000048 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010049 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
50 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
51 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
52
53 LayerTestResult<T, 4> result(outputTensorInfo);
54
55 result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
56 QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
telsoa014fcda012018-03-09 14:13:49 +000057
58 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
59 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
60
telsoa014fcda012018-03-09 14:13:49 +000061 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
62 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
63 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
64 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
65
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010066 armnn::BatchNormalizationQueueDescriptor descriptor;
67 descriptor.m_Mean = &meanTensor;
68 descriptor.m_Variance = &varianceTensor;
69 descriptor.m_Beta = &betaTensor;
70 descriptor.m_Gamma = &gammaTensor;
71 descriptor.m_Parameters.m_Eps = 0.0f;
72 descriptor.m_Parameters.m_DataLayout = dataLayout;
73 armnn::WorkloadInfo info;
74
telsoa014fcda012018-03-09 14:13:49 +000075 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
76 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
77 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
78 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
79
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010080 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
81 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
telsoa014fcda012018-03-09 14:13:49 +000082
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010083 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +000084
85 inputHandle->Allocate();
86 outputHandle->Allocate();
87
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010088 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +000089
90 workload->Execute();
91
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010092 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
telsoa014fcda012018-03-09 14:13:49 +000093
Matteo Martincigh8eb675e2018-10-17 14:43:29 +010094 return result;
Matteo Martincigh539b44d2018-10-01 09:26:39 +010095}
Nikhil Rajd1340932018-10-18 14:27:50 +010096
97
98template<typename T>
99LayerTestResult<T,4> BatchNormTestNhwcImpl(armnn::IWorkloadFactory& workloadFactory,
100 float qScale,
101 int32_t qOffset)
102{
103 const unsigned int width = 2;
104 const unsigned int height = 3;
105 const unsigned int channels = 2;
106 const unsigned int num = 1;
107
108 armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
109 armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
110 armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>());
111
112 // Set quantization parameters if the requested type is a quantized type.
113 if(armnn::IsQuantizedType<T>())
114 {
115 inputTensorInfo.SetQuantizationScale(qScale);
116 inputTensorInfo.SetQuantizationOffset(qOffset);
117 outputTensorInfo.SetQuantizationScale(qScale);
118 outputTensorInfo.SetQuantizationOffset(qOffset);
119 tensorInfo.SetQuantizationScale(qScale);
120 tensorInfo.SetQuantizationOffset(qOffset);
121 }
122
123 auto input = MakeTensor<T, 4>(inputTensorInfo,
124 QuantizedVector<T>(qScale, qOffset,
125 {
126 1.f, 1.f, 4.f, 1.f,
127 4.f, 4.f, 2.f, 1.f,
128 1.f, -2.f, 6.f, 4.f
129 }));
130 // These values are per-channel of the input.
131 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
132 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
133 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
134 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
135 LayerTestResult<T,4> ret(outputTensorInfo);
136
137 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
138 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
139
140 armnn::BatchNormalizationQueueDescriptor data;
141 armnn::WorkloadInfo info;
142 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
143 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
144 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
145 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
146
147 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
148 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
149 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
150 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
151
152 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
153 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
154 data.m_Mean = &meanTensor;
155 data.m_Variance = &varianceTensor;
156 data.m_Beta = &betaTensor;
157 data.m_Gamma = &gammaTensor;
158 data.m_Parameters.m_Eps = 0.0f;
159 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
160
161 // For each channel:
162 // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
163 // multiply by gamma and add beta
164 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
165 QuantizedVector<T>(qScale, qOffset,
166 {
167 1.f, 3.f, 4.f, 3.f,
168 4.f, 4.f, 2.f, 3.f,
169 1.f, 2.f, 6.f, 4.f
170 }));
171
172 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
173
174 inputHandle->Allocate();
175 outputHandle->Allocate();
176
177 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
178
Nikhil Rajd1340932018-10-18 14:27:50 +0100179 workload->Execute();
180
181 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
182
183 return ret;
184}