blob: a3fe8582c865ebb326e181bf7cded90b2ddcda4b [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "FullyConnectedTestImpl.hpp"
7
8#include <armnn/ArmNN.hpp>
9
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010010#include <QuantizeHelper.hpp>
11
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010012#include <backendsCommon/CpuTensorHandle.hpp>
13
14#include <backendsCommon/test/DataTypeUtils.hpp>
15#include <backendsCommon/test/TensorCopyUtils.hpp>
16#include <backendsCommon/test/WorkloadTestUtils.hpp>
17
18#include <test/TensorHelpers.hpp>
19
20//
21// Implementation templates
22//
23
24template<typename T, typename B>
25LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
26 armnn::IWorkloadFactory& workloadFactory,
27 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
28 armnn::TensorInfo inputTensorInfo,
29 armnn::TensorInfo outputTensorInfo,
30 armnn::TensorInfo weightsDesc,
31 armnn::TensorInfo biasesDesc,
32 boost::multi_array<T, 2>& weights,
33 boost::multi_array<B, 1>& bias,
34 boost::multi_array<T, 4>& input,
35 bool biasEnabled,
36 bool transposeWeights)
37{
Derek Lambertic374ff02019-12-10 21:57:35 +000038 boost::ignore_unused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010039 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
40 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
41
42 armnn::FullyConnectedQueueDescriptor data;
43 armnn::WorkloadInfo info;
44 armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
45 armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
46
47 AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
48 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
49
50 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
51 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
52 data.m_Weight = &weightsTensor;
53 data.m_Bias = &biasTensor;
54 data.m_Parameters.m_BiasEnabled = biasEnabled;
55 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
56
57 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
58 LayerTestResult<T, 2> result(outputTensorInfo);
59
60 inputHandle->Allocate();
61 outputHandle->Allocate();
62 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
63
64 ExecuteWorkload(*workload, memoryManager);
65
66 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
67
68 return result;
69}
70
71template<armnn::DataType ArmnnType, typename T>
72LayerTestResult<T, 2> FullyConnectedTest(
73 armnn::IWorkloadFactory& workloadFactory,
74 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
75 bool biasEnabled)
76{
77 constexpr static unsigned int inputWidth = 3u;
78 constexpr static unsigned int inputHeight = 2u;
79 constexpr static unsigned int inputChannels = 1u;
80
81 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
82
83 constexpr static unsigned int outputChannels = 2u;
84
85 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
86 inputTensorInfo.SetQuantizationScale(0.1f);
87 inputTensorInfo.SetQuantizationOffset(63);
88
89 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
90 outputTensorInfo.SetQuantizationScale(5.f);
91 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
92
93 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
94 weightsDesc.SetQuantizationScale(0.2f);
95 weightsDesc.SetQuantizationOffset(93);
96
97 armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
98 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
99 biasesDesc.SetQuantizationOffset(0);
100
101 LayerTestResult<T, 2> result(outputTensorInfo);
102
103 auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
104 {
105 -1.2f, 6.1f, -3.5f,
106 18.8f, -5.5f, 2.9f
107 },
108 inputTensorInfo));
109
110 auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
111 {
112 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
113 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
114 },
115 weightsDesc));
116
117 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
118
119 result = SimpleFullyConnectedTestImpl<T>(
120 workloadFactory,
121 memoryManager,
122 inputTensorInfo, outputTensorInfo,
123 weightsDesc, biasesDesc,
124 weights, bias, input,
125 biasEnabled, true
126 );
127
128 if (biasEnabled)
129 {
130 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
131 ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
132 }
133 else
134 {
135 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
136 ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
137 }
138
139 return result;
140}
141
142//
143// ArmNN variant of the AndroidNN fully_connected_float_large test.
144//
145// Tests the fully connected layer with large values, optionally transposing weights.
146// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
147//
148template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
149LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
150 armnn::IWorkloadFactory& workloadFactory,
151 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
152 bool transposeWeights,
153 float qScale = 0.0f,
154 int32_t qOffset = 0)
155{
156 unsigned int inputWidth = 1;
157 unsigned int inputHeight = 1;
158 unsigned int inputChannels = 5;
159 unsigned int inputNum = 1;
160
161 unsigned int outputChannels = 1;
162 unsigned int outputNum = 1;
163
164 // Define the tensor descriptors.
165 armnn::TensorInfo inputTensorInfo;
166 armnn::TensorInfo outputTensorInfo;
167 armnn::TensorInfo weightsDesc;
168 armnn::TensorInfo biasesDesc;
169
170 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
171 unsigned int outputShape[] = { outputNum, outputChannels };
172 unsigned int weightsShape[] = { inputChannels, outputChannels };
173 if (transposeWeights)
174 {
175 std::swap(weightsShape[0], weightsShape[1]);
176 }
177
178 unsigned int biasShape[] = { outputChannels };
179
180 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
181 outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
182 weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
183 biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
184
185 // Set quantization parameters if the requested type is a quantized type.
186 if(armnn::IsQuantizedType<T>())
187 {
188 inputTensorInfo.SetQuantizationScale(qScale);
189 inputTensorInfo.SetQuantizationOffset(qOffset);
190 outputTensorInfo.SetQuantizationScale(qScale);
191 outputTensorInfo.SetQuantizationOffset(qOffset);
192 }
193
194 LayerTestResult<T, 2> result(outputTensorInfo);
195
196 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100197 armnnUtils::QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100198 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100199 },
200 qScale, qOffset)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100201 );
202
203 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100204 armnnUtils::QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100205 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100206 },
207 qScale, qOffset)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100208 );
209
210 std::vector<T> biasValues({900000.f});
211 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
212
213 result = SimpleFullyConnectedTestImpl<T>(
214 workloadFactory,
215 memoryManager,
216 inputTensorInfo, outputTensorInfo,
217 weightsDesc, biasesDesc,
218 weights, bias, input,
219 true, transposeWeights
220 );
221
222 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100223 armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100224
225 return result;
226}
227
228//
229// Explicit template specializations
230//
231
232template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
233FullyConnectedTest<armnn::DataType::QuantisedAsymm8>(
234 armnn::IWorkloadFactory& workloadFactory,
235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
236 bool biasEnabled);
237
238template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
239FullyConnectedTest<armnn::DataType::QuantisedSymm16>(
240 armnn::IWorkloadFactory& workloadFactory,
241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
242 bool biasEnabled);
243
244//
245// Implementation functions
246//
247
248LayerTestResult<float, 2> FullyConnectedFloat32Test(
249 armnn::IWorkloadFactory& workloadFactory,
250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
251 bool biasEnabled,
252 bool transposeWeights)
253{
254 unsigned int inputWidth = 1;
255 unsigned int inputHeight = 1;
256 unsigned int inputChannels = 5;
257 unsigned int inputNum = 2;
258
259 unsigned int outputChannels = 3;
260 unsigned int outputNum = 2;
261
262 // Define the tensor descriptors.
263 armnn::TensorInfo inputTensorInfo;
264 armnn::TensorInfo outputTensorInfo;
265 armnn::TensorInfo weightsDesc;
266 armnn::TensorInfo biasesDesc;
267
268 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
269 unsigned int outputShape[] = { outputNum, outputChannels };
270 unsigned int weightsShape[] = { inputChannels, outputChannels };
271
272 if (transposeWeights)
273 {
274 std::swap(weightsShape[0], weightsShape[1]);
275 }
276
277 unsigned int biasShape[] = { outputChannels };
278
279 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
280 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
281 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
282 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
283
284 LayerTestResult<float, 2> result(outputTensorInfo);
285
286 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
287 {
288 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
289
290 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
291 })
292 );
293
294 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
295 {
296 .5f, 2.f, .5f,
297 .5f, 2.f, 1.f,
298 .5f, 2.f, 2.f,
299 .5f, 2.f, 3.f,
300 .5f, 2.f, 4.f
301 }));
302
303 if (transposeWeights)
304 {
305 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
306 {
307 .5f, .5f, .5f, .5f, .5f,
308 2.f, 2.f, 2.f, 2.f, 2.f,
309 .5f, 1.f, 2.f, 3.f, 4.f
310 }));
311 }
312
313
314 std::vector<float> biasValues({0.f, 0.f, 0.f});
315 if (biasEnabled)
316 {
317 biasValues = std::vector<float>({10.f, 20.f, 30.f});
318 }
319 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
320
321 result = SimpleFullyConnectedTestImpl<float>(
322 workloadFactory,
323 memoryManager,
324 inputTensorInfo, outputTensorInfo,
325 weightsDesc, biasesDesc,
326 weights, bias, input,
327 biasEnabled, transposeWeights
328 );
329
330 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
331 {
332 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
333 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
334 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
335
336 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
337 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
338 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
339 })
340 );
341
342 return result;
343}
344
345LayerTestResult<float, 2> FullyConnectedLargeTest(
346 armnn::IWorkloadFactory& workloadFactory,
347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
348 bool transposeWeights)
349{
350 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
351}