blob: c9e2e1602dfb7d4b02a78e90789695911a6ba72e [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "FullyConnectedTestImpl.hpp"
7
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01008
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
10
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010011#include <backendsCommon/CpuTensorHandle.hpp>
12
13#include <backendsCommon/test/DataTypeUtils.hpp>
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
19//
20// Implementation templates
21//
22
23template<typename T, typename B>
24LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
25 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +010027 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028 armnn::TensorInfo inputTensorInfo,
29 armnn::TensorInfo outputTensorInfo,
30 armnn::TensorInfo weightsDesc,
31 armnn::TensorInfo biasesDesc,
32 boost::multi_array<T, 2>& weights,
33 boost::multi_array<B, 1>& bias,
34 boost::multi_array<T, 4>& input,
35 bool biasEnabled,
36 bool transposeWeights)
37{
Finn Williams7faf9a82020-08-27 10:37:36 +010038 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
39 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010040
41 armnn::FullyConnectedQueueDescriptor data;
42 armnn::WorkloadInfo info;
43 armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
44 armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
45
46 AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
47 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
48
49 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
50 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
51 data.m_Weight = &weightsTensor;
52 data.m_Bias = &biasTensor;
53 data.m_Parameters.m_BiasEnabled = biasEnabled;
54 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
55
56 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
57 LayerTestResult<T, 2> result(outputTensorInfo);
58
59 inputHandle->Allocate();
60 outputHandle->Allocate();
61 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
62
63 ExecuteWorkload(*workload, memoryManager);
64
65 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
66
67 return result;
68}
69
70template<armnn::DataType ArmnnType, typename T>
71LayerTestResult<T, 2> FullyConnectedTest(
72 armnn::IWorkloadFactory& workloadFactory,
73 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +010074 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010075 bool biasEnabled)
76{
77 constexpr static unsigned int inputWidth = 3u;
78 constexpr static unsigned int inputHeight = 2u;
79 constexpr static unsigned int inputChannels = 1u;
80
81 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
82
83 constexpr static unsigned int outputChannels = 2u;
84
85 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
86 inputTensorInfo.SetQuantizationScale(0.1f);
87 inputTensorInfo.SetQuantizationOffset(63);
88
89 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
90 outputTensorInfo.SetQuantizationScale(5.f);
91 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
92
93 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
94 weightsDesc.SetQuantizationScale(0.2f);
95 weightsDesc.SetQuantizationOffset(93);
96
97 armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
98 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
99 biasesDesc.SetQuantizationOffset(0);
100
101 LayerTestResult<T, 2> result(outputTensorInfo);
102
103 auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
104 {
105 -1.2f, 6.1f, -3.5f,
106 18.8f, -5.5f, 2.9f
107 },
108 inputTensorInfo));
109
110 auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
111 {
112 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
113 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
114 },
115 weightsDesc));
116
117 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
118
119 result = SimpleFullyConnectedTestImpl<T>(
120 workloadFactory,
121 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100122 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100123 inputTensorInfo, outputTensorInfo,
124 weightsDesc, biasesDesc,
125 weights, bias, input,
126 biasEnabled, true
127 );
128
129 if (biasEnabled)
130 {
131 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
132 ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
133 }
134 else
135 {
136 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
137 ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
138 }
139
140 return result;
141}
142
143//
144// ArmNN variant of the AndroidNN fully_connected_float_large test.
145//
146// Tests the fully connected layer with large values, optionally transposing weights.
147// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
148//
149template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
150LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
151 armnn::IWorkloadFactory& workloadFactory,
152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100153 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100154 bool transposeWeights,
155 float qScale = 0.0f,
156 int32_t qOffset = 0)
157{
158 unsigned int inputWidth = 1;
159 unsigned int inputHeight = 1;
160 unsigned int inputChannels = 5;
161 unsigned int inputNum = 1;
162
163 unsigned int outputChannels = 1;
164 unsigned int outputNum = 1;
165
166 // Define the tensor descriptors.
167 armnn::TensorInfo inputTensorInfo;
168 armnn::TensorInfo outputTensorInfo;
169 armnn::TensorInfo weightsDesc;
170 armnn::TensorInfo biasesDesc;
171
172 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
173 unsigned int outputShape[] = { outputNum, outputChannels };
174 unsigned int weightsShape[] = { inputChannels, outputChannels };
175 if (transposeWeights)
176 {
177 std::swap(weightsShape[0], weightsShape[1]);
178 }
179
180 unsigned int biasShape[] = { outputChannels };
181
182 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
183 outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
184 weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
185 biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
186
187 // Set quantization parameters if the requested type is a quantized type.
188 if(armnn::IsQuantizedType<T>())
189 {
190 inputTensorInfo.SetQuantizationScale(qScale);
191 inputTensorInfo.SetQuantizationOffset(qOffset);
192 outputTensorInfo.SetQuantizationScale(qScale);
193 outputTensorInfo.SetQuantizationOffset(qOffset);
194 }
195
196 LayerTestResult<T, 2> result(outputTensorInfo);
197
198 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100199 armnnUtils::QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100200 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100201 },
202 qScale, qOffset)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100203 );
204
205 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100206 armnnUtils::QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100207 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100208 },
209 qScale, qOffset)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100210 );
211
212 std::vector<T> biasValues({900000.f});
213 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
214
215 result = SimpleFullyConnectedTestImpl<T>(
216 workloadFactory,
217 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100218 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100219 inputTensorInfo, outputTensorInfo,
220 weightsDesc, biasesDesc,
221 weights, bias, input,
222 true, transposeWeights
223 );
224
225 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100226 armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100227
228 return result;
229}
230
231//
232// Explicit template specializations
233//
234
Derek Lambertif90c56d2020-01-10 17:14:08 +0000235template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
236FullyConnectedTest<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100237 armnn::IWorkloadFactory& workloadFactory,
238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100239 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100240 bool biasEnabled);
241
Derek Lambertif90c56d2020-01-10 17:14:08 +0000242template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
243FullyConnectedTest<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100244 armnn::IWorkloadFactory& workloadFactory,
245 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100246 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100247 bool biasEnabled);
248
249//
250// Implementation functions
251//
252
253LayerTestResult<float, 2> FullyConnectedFloat32Test(
254 armnn::IWorkloadFactory& workloadFactory,
255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100256 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100257 bool biasEnabled,
258 bool transposeWeights)
259{
260 unsigned int inputWidth = 1;
261 unsigned int inputHeight = 1;
262 unsigned int inputChannels = 5;
263 unsigned int inputNum = 2;
264
265 unsigned int outputChannels = 3;
266 unsigned int outputNum = 2;
267
268 // Define the tensor descriptors.
269 armnn::TensorInfo inputTensorInfo;
270 armnn::TensorInfo outputTensorInfo;
271 armnn::TensorInfo weightsDesc;
272 armnn::TensorInfo biasesDesc;
273
274 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
275 unsigned int outputShape[] = { outputNum, outputChannels };
276 unsigned int weightsShape[] = { inputChannels, outputChannels };
277
278 if (transposeWeights)
279 {
280 std::swap(weightsShape[0], weightsShape[1]);
281 }
282
283 unsigned int biasShape[] = { outputChannels };
284
285 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
286 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
287 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
288 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
289
290 LayerTestResult<float, 2> result(outputTensorInfo);
291
292 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
293 {
294 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
295
296 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
297 })
298 );
299
300 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
301 {
302 .5f, 2.f, .5f,
303 .5f, 2.f, 1.f,
304 .5f, 2.f, 2.f,
305 .5f, 2.f, 3.f,
306 .5f, 2.f, 4.f
307 }));
308
309 if (transposeWeights)
310 {
311 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
312 {
313 .5f, .5f, .5f, .5f, .5f,
314 2.f, 2.f, 2.f, 2.f, 2.f,
315 .5f, 1.f, 2.f, 3.f, 4.f
316 }));
317 }
318
319
320 std::vector<float> biasValues({0.f, 0.f, 0.f});
321 if (biasEnabled)
322 {
323 biasValues = std::vector<float>({10.f, 20.f, 30.f});
324 }
325 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
326
327 result = SimpleFullyConnectedTestImpl<float>(
328 workloadFactory,
329 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100330 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100331 inputTensorInfo, outputTensorInfo,
332 weightsDesc, biasesDesc,
333 weights, bias, input,
334 biasEnabled, transposeWeights
335 );
336
337 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
338 {
339 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
340 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
341 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
342
343 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
344 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
345 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
346 })
347 );
348
349 return result;
350}
351
352LayerTestResult<float, 2> FullyConnectedLargeTest(
353 armnn::IWorkloadFactory& workloadFactory,
354 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100355 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100356 bool transposeWeights)
357{
Finn Williams7faf9a82020-08-27 10:37:36 +0100358 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
359 memoryManager,
360 tensorHandleFactory,
361 transposeWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100362}