blob: 43bcfb1d761b314eb4d5d2dff49ca1cb1522a030 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "FullyConnectedTestImpl.hpp"
7
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01008
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
10
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010011#include <backendsCommon/CpuTensorHandle.hpp>
12
13#include <backendsCommon/test/DataTypeUtils.hpp>
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
19//
20// Implementation templates
21//
22
23template<typename T, typename B>
24LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
25 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27 armnn::TensorInfo inputTensorInfo,
28 armnn::TensorInfo outputTensorInfo,
29 armnn::TensorInfo weightsDesc,
30 armnn::TensorInfo biasesDesc,
31 boost::multi_array<T, 2>& weights,
32 boost::multi_array<B, 1>& bias,
33 boost::multi_array<T, 4>& input,
34 bool biasEnabled,
35 bool transposeWeights)
36{
Jan Eilers8eb25602020-03-09 12:13:48 +000037 IgnoreUnused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010038 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
39 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
40
41 armnn::FullyConnectedQueueDescriptor data;
42 armnn::WorkloadInfo info;
43 armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
44 armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
45
46 AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
47 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
48
49 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
50 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
51 data.m_Weight = &weightsTensor;
52 data.m_Bias = &biasTensor;
53 data.m_Parameters.m_BiasEnabled = biasEnabled;
54 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
55
56 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
57 LayerTestResult<T, 2> result(outputTensorInfo);
58
59 inputHandle->Allocate();
60 outputHandle->Allocate();
61 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
62
63 ExecuteWorkload(*workload, memoryManager);
64
65 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
66
67 return result;
68}
69
70template<armnn::DataType ArmnnType, typename T>
71LayerTestResult<T, 2> FullyConnectedTest(
72 armnn::IWorkloadFactory& workloadFactory,
73 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
74 bool biasEnabled)
75{
76 constexpr static unsigned int inputWidth = 3u;
77 constexpr static unsigned int inputHeight = 2u;
78 constexpr static unsigned int inputChannels = 1u;
79
80 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
81
82 constexpr static unsigned int outputChannels = 2u;
83
84 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
85 inputTensorInfo.SetQuantizationScale(0.1f);
86 inputTensorInfo.SetQuantizationOffset(63);
87
88 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
89 outputTensorInfo.SetQuantizationScale(5.f);
90 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
91
92 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
93 weightsDesc.SetQuantizationScale(0.2f);
94 weightsDesc.SetQuantizationOffset(93);
95
96 armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
97 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
98 biasesDesc.SetQuantizationOffset(0);
99
100 LayerTestResult<T, 2> result(outputTensorInfo);
101
102 auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
103 {
104 -1.2f, 6.1f, -3.5f,
105 18.8f, -5.5f, 2.9f
106 },
107 inputTensorInfo));
108
109 auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
110 {
111 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
112 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
113 },
114 weightsDesc));
115
116 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
117
118 result = SimpleFullyConnectedTestImpl<T>(
119 workloadFactory,
120 memoryManager,
121 inputTensorInfo, outputTensorInfo,
122 weightsDesc, biasesDesc,
123 weights, bias, input,
124 biasEnabled, true
125 );
126
127 if (biasEnabled)
128 {
129 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
130 ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
131 }
132 else
133 {
134 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
135 ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
136 }
137
138 return result;
139}
140
141//
142// ArmNN variant of the AndroidNN fully_connected_float_large test.
143//
144// Tests the fully connected layer with large values, optionally transposing weights.
145// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
146//
147template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
148LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
149 armnn::IWorkloadFactory& workloadFactory,
150 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
151 bool transposeWeights,
152 float qScale = 0.0f,
153 int32_t qOffset = 0)
154{
155 unsigned int inputWidth = 1;
156 unsigned int inputHeight = 1;
157 unsigned int inputChannels = 5;
158 unsigned int inputNum = 1;
159
160 unsigned int outputChannels = 1;
161 unsigned int outputNum = 1;
162
163 // Define the tensor descriptors.
164 armnn::TensorInfo inputTensorInfo;
165 armnn::TensorInfo outputTensorInfo;
166 armnn::TensorInfo weightsDesc;
167 armnn::TensorInfo biasesDesc;
168
169 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
170 unsigned int outputShape[] = { outputNum, outputChannels };
171 unsigned int weightsShape[] = { inputChannels, outputChannels };
172 if (transposeWeights)
173 {
174 std::swap(weightsShape[0], weightsShape[1]);
175 }
176
177 unsigned int biasShape[] = { outputChannels };
178
179 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
180 outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
181 weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
182 biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
183
184 // Set quantization parameters if the requested type is a quantized type.
185 if(armnn::IsQuantizedType<T>())
186 {
187 inputTensorInfo.SetQuantizationScale(qScale);
188 inputTensorInfo.SetQuantizationOffset(qOffset);
189 outputTensorInfo.SetQuantizationScale(qScale);
190 outputTensorInfo.SetQuantizationOffset(qOffset);
191 }
192
193 LayerTestResult<T, 2> result(outputTensorInfo);
194
195 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100196 armnnUtils::QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100197 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100198 },
199 qScale, qOffset)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100200 );
201
202 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100203 armnnUtils::QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100204 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100205 },
206 qScale, qOffset)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100207 );
208
209 std::vector<T> biasValues({900000.f});
210 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
211
212 result = SimpleFullyConnectedTestImpl<T>(
213 workloadFactory,
214 memoryManager,
215 inputTensorInfo, outputTensorInfo,
216 weightsDesc, biasesDesc,
217 weights, bias, input,
218 true, transposeWeights
219 );
220
221 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100222 armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100223
224 return result;
225}
226
227//
228// Explicit template specializations
229//
230
Derek Lambertif90c56d2020-01-10 17:14:08 +0000231template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
232FullyConnectedTest<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100233 armnn::IWorkloadFactory& workloadFactory,
234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
235 bool biasEnabled);
236
Derek Lambertif90c56d2020-01-10 17:14:08 +0000237template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
238FullyConnectedTest<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100239 armnn::IWorkloadFactory& workloadFactory,
240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
241 bool biasEnabled);
242
243//
244// Implementation functions
245//
246
247LayerTestResult<float, 2> FullyConnectedFloat32Test(
248 armnn::IWorkloadFactory& workloadFactory,
249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
250 bool biasEnabled,
251 bool transposeWeights)
252{
253 unsigned int inputWidth = 1;
254 unsigned int inputHeight = 1;
255 unsigned int inputChannels = 5;
256 unsigned int inputNum = 2;
257
258 unsigned int outputChannels = 3;
259 unsigned int outputNum = 2;
260
261 // Define the tensor descriptors.
262 armnn::TensorInfo inputTensorInfo;
263 armnn::TensorInfo outputTensorInfo;
264 armnn::TensorInfo weightsDesc;
265 armnn::TensorInfo biasesDesc;
266
267 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
268 unsigned int outputShape[] = { outputNum, outputChannels };
269 unsigned int weightsShape[] = { inputChannels, outputChannels };
270
271 if (transposeWeights)
272 {
273 std::swap(weightsShape[0], weightsShape[1]);
274 }
275
276 unsigned int biasShape[] = { outputChannels };
277
278 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
279 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
280 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
281 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
282
283 LayerTestResult<float, 2> result(outputTensorInfo);
284
285 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
286 {
287 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
288
289 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
290 })
291 );
292
293 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
294 {
295 .5f, 2.f, .5f,
296 .5f, 2.f, 1.f,
297 .5f, 2.f, 2.f,
298 .5f, 2.f, 3.f,
299 .5f, 2.f, 4.f
300 }));
301
302 if (transposeWeights)
303 {
304 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
305 {
306 .5f, .5f, .5f, .5f, .5f,
307 2.f, 2.f, 2.f, 2.f, 2.f,
308 .5f, 1.f, 2.f, 3.f, 4.f
309 }));
310 }
311
312
313 std::vector<float> biasValues({0.f, 0.f, 0.f});
314 if (biasEnabled)
315 {
316 biasValues = std::vector<float>({10.f, 20.f, 30.f});
317 }
318 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
319
320 result = SimpleFullyConnectedTestImpl<float>(
321 workloadFactory,
322 memoryManager,
323 inputTensorInfo, outputTensorInfo,
324 weightsDesc, biasesDesc,
325 weights, bias, input,
326 biasEnabled, transposeWeights
327 );
328
329 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
330 {
331 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
332 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
333 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
334
335 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
336 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
337 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
338 })
339 );
340
341 return result;
342}
343
344LayerTestResult<float, 2> FullyConnectedLargeTest(
345 armnn::IWorkloadFactory& workloadFactory,
346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
347 bool transposeWeights)
348{
349 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
350}