blob: cd7f4efe312b5f0d00ad91a179383d2f6c21788f [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "FullyConnectedTestImpl.hpp"
7
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01008
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
10
James Conroy1f58f032021-04-27 17:13:27 +010011#include <backendsCommon/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010012
13#include <backendsCommon/test/DataTypeUtils.hpp>
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
19//
20// Implementation templates
21//
22
23template<typename T, typename B>
24LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
25 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +010027 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028 armnn::TensorInfo inputTensorInfo,
29 armnn::TensorInfo outputTensorInfo,
30 armnn::TensorInfo weightsDesc,
31 armnn::TensorInfo biasesDesc,
32 boost::multi_array<T, 2>& weights,
33 boost::multi_array<B, 1>& bias,
34 boost::multi_array<T, 4>& input,
35 bool biasEnabled,
36 bool transposeWeights)
37{
Finn Williams7faf9a82020-08-27 10:37:36 +010038 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
39 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010040
41 armnn::FullyConnectedQueueDescriptor data;
42 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +010043 armnn::ScopedTensorHandle weightsTensor(weightsDesc);
44 armnn::ScopedTensorHandle biasTensor(biasesDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010045
46 AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
47 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
48
49 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
50 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
51 data.m_Weight = &weightsTensor;
52 data.m_Bias = &biasTensor;
53 data.m_Parameters.m_BiasEnabled = biasEnabled;
54 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
55
56 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
57 LayerTestResult<T, 2> result(outputTensorInfo);
58
59 inputHandle->Allocate();
60 outputHandle->Allocate();
61 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
62
63 ExecuteWorkload(*workload, memoryManager);
64
65 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
66
67 return result;
68}
69
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000070template<typename T, typename B>
71LayerTestResult<T, 2> SimpleFullyConnectedTestWeightsAsInputsImpl(
72 armnn::IWorkloadFactory& workloadFactory,
73 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
74 const armnn::ITensorHandleFactory& tensorHandleFactory,
75 armnn::TensorInfo inputTensorInfo,
76 armnn::TensorInfo outputTensorInfo,
77 armnn::TensorInfo weightsTensorInfo,
78 armnn::TensorInfo biasesTensorInfo,
79 boost::multi_array<T, 2>& weights,
80 boost::multi_array<B, 1>& bias,
81 boost::multi_array<T, 4>& input,
82 bool biasEnabled,
83 bool transposeWeights)
84{
85 std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
86 std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
87 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
88
89 armnn::FullyConnectedQueueDescriptor data;
90 armnn::WorkloadInfo info;
91
92 AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
93 AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
94 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
95 data.m_Parameters.m_BiasEnabled = biasEnabled;
96 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
97 data.m_Parameters.m_ConstantWeights = false;
98
99 std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
100 if (biasEnabled)
101 {
102 input2Handle = tensorHandleFactory.CreateTensorHandle(biasesTensorInfo);
103 AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
104 }
105
106 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
107 LayerTestResult<T, 2> result(outputTensorInfo);
108
109 input0Handle->Allocate();
110 input1Handle->Allocate();
111 outputHandle->Allocate();
112 CopyDataToITensorHandle(input0Handle.get(), &input[0][0][0][0]);
113 CopyDataToITensorHandle(input1Handle.get(), &weights[0][0]);
114 if (biasEnabled)
115 {
116 input2Handle->Allocate();
117 CopyDataToITensorHandle(input2Handle.get(), &bias[0]);
118 }
119
120 ExecuteWorkload(*workload, memoryManager);
121
122 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
123
124 return result;
125}
126
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100127template<armnn::DataType ArmnnType, typename T>
128LayerTestResult<T, 2> FullyConnectedTest(
129 armnn::IWorkloadFactory& workloadFactory,
130 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100131 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000132 bool biasEnabled,
133 bool constantWeights)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100134{
135 constexpr static unsigned int inputWidth = 3u;
136 constexpr static unsigned int inputHeight = 2u;
137 constexpr static unsigned int inputChannels = 1u;
138
139 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
140
141 constexpr static unsigned int outputChannels = 2u;
142
143 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
144 inputTensorInfo.SetQuantizationScale(0.1f);
145 inputTensorInfo.SetQuantizationOffset(63);
146
147 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
148 outputTensorInfo.SetQuantizationScale(5.f);
149 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
150
151 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
152 weightsDesc.SetQuantizationScale(0.2f);
153 weightsDesc.SetQuantizationOffset(93);
154
155 armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
156 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
157 biasesDesc.SetQuantizationOffset(0);
158
159 LayerTestResult<T, 2> result(outputTensorInfo);
160
161 auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
162 {
163 -1.2f, 6.1f, -3.5f,
164 18.8f, -5.5f, 2.9f
165 },
166 inputTensorInfo));
167
168 auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
169 {
170 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
171 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
172 },
173 weightsDesc));
174
175 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
176
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000177 if (constantWeights)
178 {
179 result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
180 memoryManager,
181 tensorHandleFactory,
182 inputTensorInfo,
183 outputTensorInfo,
184 weightsDesc,
185 biasesDesc,
186 weights,
187 bias,
188 input,
189 biasEnabled,
190 true);
191 }
192 else
193 {
194 result = SimpleFullyConnectedTestWeightsAsInputsImpl<T>(workloadFactory,
195 memoryManager,
196 tensorHandleFactory,
197 inputTensorInfo,
198 outputTensorInfo,
199 weightsDesc,
200 biasesDesc,
201 weights,
202 bias,
203 input,
204 biasEnabled,
205 true);
206 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100207
208 if (biasEnabled)
209 {
210 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
211 ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
212 }
213 else
214 {
215 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
216 ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
217 }
218
219 return result;
220}
221
222//
223// ArmNN variant of the AndroidNN fully_connected_float_large test.
224//
225// Tests the fully connected layer with large values, optionally transposing weights.
226// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
227//
228template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
229LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
230 armnn::IWorkloadFactory& workloadFactory,
231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100232 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100233 bool transposeWeights,
234 float qScale = 0.0f,
235 int32_t qOffset = 0)
236{
237 unsigned int inputWidth = 1;
238 unsigned int inputHeight = 1;
239 unsigned int inputChannels = 5;
240 unsigned int inputNum = 1;
241
242 unsigned int outputChannels = 1;
243 unsigned int outputNum = 1;
244
245 // Define the tensor descriptors.
246 armnn::TensorInfo inputTensorInfo;
247 armnn::TensorInfo outputTensorInfo;
248 armnn::TensorInfo weightsDesc;
249 armnn::TensorInfo biasesDesc;
250
251 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
252 unsigned int outputShape[] = { outputNum, outputChannels };
253 unsigned int weightsShape[] = { inputChannels, outputChannels };
254 if (transposeWeights)
255 {
256 std::swap(weightsShape[0], weightsShape[1]);
257 }
258
259 unsigned int biasShape[] = { outputChannels };
260
261 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
262 outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
263 weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
264 biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
265
266 // Set quantization parameters if the requested type is a quantized type.
267 if(armnn::IsQuantizedType<T>())
268 {
269 inputTensorInfo.SetQuantizationScale(qScale);
270 inputTensorInfo.SetQuantizationOffset(qOffset);
271 outputTensorInfo.SetQuantizationScale(qScale);
272 outputTensorInfo.SetQuantizationOffset(qOffset);
273 }
274
275 LayerTestResult<T, 2> result(outputTensorInfo);
276
277 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100278 armnnUtils::QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100279 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100280 },
281 qScale, qOffset)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100282 );
283
284 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100285 armnnUtils::QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100286 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100287 },
288 qScale, qOffset)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100289 );
290
291 std::vector<T> biasValues({900000.f});
292 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
293
294 result = SimpleFullyConnectedTestImpl<T>(
295 workloadFactory,
296 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100297 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100298 inputTensorInfo, outputTensorInfo,
299 weightsDesc, biasesDesc,
300 weights, bias, input,
301 true, transposeWeights
302 );
303
304 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100305 armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100306
307 return result;
308}
309
310//
311// Explicit template specializations
312//
313
Derek Lambertif90c56d2020-01-10 17:14:08 +0000314template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
315FullyConnectedTest<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100316 armnn::IWorkloadFactory& workloadFactory,
317 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100318 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000319 bool biasEnabled,
320 bool constWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100321
Derek Lambertif90c56d2020-01-10 17:14:08 +0000322template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
323FullyConnectedTest<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100324 armnn::IWorkloadFactory& workloadFactory,
325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100326 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000327 bool biasEnabled,
328 bool constWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100329
330//
331// Implementation functions
332//
333
334LayerTestResult<float, 2> FullyConnectedFloat32Test(
335 armnn::IWorkloadFactory& workloadFactory,
336 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100337 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100338 bool biasEnabled,
339 bool transposeWeights)
340{
341 unsigned int inputWidth = 1;
342 unsigned int inputHeight = 1;
343 unsigned int inputChannels = 5;
344 unsigned int inputNum = 2;
345
346 unsigned int outputChannels = 3;
347 unsigned int outputNum = 2;
348
349 // Define the tensor descriptors.
350 armnn::TensorInfo inputTensorInfo;
351 armnn::TensorInfo outputTensorInfo;
352 armnn::TensorInfo weightsDesc;
353 armnn::TensorInfo biasesDesc;
354
355 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
356 unsigned int outputShape[] = { outputNum, outputChannels };
357 unsigned int weightsShape[] = { inputChannels, outputChannels };
358
359 if (transposeWeights)
360 {
361 std::swap(weightsShape[0], weightsShape[1]);
362 }
363
364 unsigned int biasShape[] = { outputChannels };
365
366 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
367 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
368 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
369 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
370
371 LayerTestResult<float, 2> result(outputTensorInfo);
372
373 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
374 {
375 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
376
377 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
378 })
379 );
380
381 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
382 {
383 .5f, 2.f, .5f,
384 .5f, 2.f, 1.f,
385 .5f, 2.f, 2.f,
386 .5f, 2.f, 3.f,
387 .5f, 2.f, 4.f
388 }));
389
390 if (transposeWeights)
391 {
392 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
393 {
394 .5f, .5f, .5f, .5f, .5f,
395 2.f, 2.f, 2.f, 2.f, 2.f,
396 .5f, 1.f, 2.f, 3.f, 4.f
397 }));
398 }
399
400
401 std::vector<float> biasValues({0.f, 0.f, 0.f});
402 if (biasEnabled)
403 {
404 biasValues = std::vector<float>({10.f, 20.f, 30.f});
405 }
406 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
407
408 result = SimpleFullyConnectedTestImpl<float>(
409 workloadFactory,
410 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100411 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100412 inputTensorInfo, outputTensorInfo,
413 weightsDesc, biasesDesc,
414 weights, bias, input,
415 biasEnabled, transposeWeights
416 );
417
418 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
419 {
420 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
421 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
422 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
423
424 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
425 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
426 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
427 })
428 );
429
430 return result;
431}
432
433LayerTestResult<float, 2> FullyConnectedLargeTest(
434 armnn::IWorkloadFactory& workloadFactory,
435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100436 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100437 bool transposeWeights)
438{
Finn Williams7faf9a82020-08-27 10:37:36 +0100439 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
440 memoryManager,
441 tensorHandleFactory,
442 transposeWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100443}