blob: 5555772c5a41f2d0306ec547217f982b7d497d29 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "FullyConnectedTestImpl.hpp"
7
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01008
Colm Donelanc42a9872022-02-02 16:35:09 +00009#include <armnnUtils/QuantizeHelper.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010010
Colm Donelan0c479742021-12-10 12:43:54 +000011#include <armnn/backends/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010012
Sadik Armagana097d2a2021-11-24 15:47:28 +000013#include <DataTypeUtils.hpp>
14#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000015#include <armnnTestUtils/WorkloadTestUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010016
Colm Donelanc42a9872022-02-02 16:35:09 +000017#include <armnnTestUtils/TensorHelpers.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010018
19//
20// Implementation templates
21//
22
23template<typename T, typename B>
24LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000025 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27 const armnn::ITensorHandleFactory& tensorHandleFactory,
28 armnn::TensorInfo inputTensorInfo,
29 armnn::TensorInfo outputTensorInfo,
30 armnn::TensorInfo weightsTensorInfo,
31 armnn::TensorInfo biasesTensorInfo,
Sadik Armagan483c8112021-06-01 09:24:52 +010032 std::vector<T>& weights,
33 std::vector<B>& bias,
34 std::vector<T>& input,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000035 bool biasEnabled,
Matthew Sloyan81beae32021-07-13 19:46:11 +010036 bool transposeWeights,
37 bool constantWeights)
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000038{
39 std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
40 std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
41 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
42
Sadik Armagan483c8112021-06-01 09:24:52 +010043 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
44
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000045 armnn::FullyConnectedQueueDescriptor data;
46 armnn::WorkloadInfo info;
Matthew Sloyan81beae32021-07-13 19:46:11 +010047 armnn::ScopedTensorHandle weightsTensor(weightsTensorInfo);
48 armnn::ScopedTensorHandle biasTensor(biasesTensorInfo);
49
50 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
51 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000052
53 AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
54 AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
55 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Matthew Sloyan81beae32021-07-13 19:46:11 +010056
57 // Need to set as layer members will be null when creating the workload because the optimization hasn't been run.
58 data.m_Weight = &weightsTensor;
59 data.m_Bias = &biasTensor;
60
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000061 data.m_Parameters.m_BiasEnabled = biasEnabled;
62 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
Matthew Sloyan81beae32021-07-13 19:46:11 +010063 data.m_Parameters.m_ConstantWeights = constantWeights;
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000064
65 std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
66 if (biasEnabled)
67 {
68 input2Handle = tensorHandleFactory.CreateTensorHandle(biasesTensorInfo);
69 AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
70 }
71
Teresa Charlin611c7fb2022-01-07 09:47:29 +000072 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::FullyConnected,
73 data,
74 info);
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000075 LayerTestResult<T, 2> result(outputTensorInfo);
76
77 input0Handle->Allocate();
78 input1Handle->Allocate();
79 outputHandle->Allocate();
Sadik Armagan483c8112021-06-01 09:24:52 +010080 CopyDataToITensorHandle(input0Handle.get(), input.data());
81 CopyDataToITensorHandle(input1Handle.get(), weights.data());
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000082 if (biasEnabled)
83 {
84 input2Handle->Allocate();
Sadik Armagan483c8112021-06-01 09:24:52 +010085 CopyDataToITensorHandle(input2Handle.get(), bias.data());
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000086 }
87
88 ExecuteWorkload(*workload, memoryManager);
89
Sadik Armagan483c8112021-06-01 09:24:52 +010090 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
91 result.m_ActualData = actualOutput;
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000092
93 return result;
94}
95
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010096template<armnn::DataType ArmnnType, typename T>
97LayerTestResult<T, 2> FullyConnectedTest(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100100 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000101 bool biasEnabled,
102 bool constantWeights)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100103{
104 constexpr static unsigned int inputWidth = 3u;
105 constexpr static unsigned int inputHeight = 2u;
106 constexpr static unsigned int inputChannels = 1u;
107
108 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
109
110 constexpr static unsigned int outputChannels = 2u;
111
112 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
113 inputTensorInfo.SetQuantizationScale(0.1f);
114 inputTensorInfo.SetQuantizationOffset(63);
115
116 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
117 outputTensorInfo.SetQuantizationScale(5.f);
118 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
119
120 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
121 weightsDesc.SetQuantizationScale(0.2f);
122 weightsDesc.SetQuantizationOffset(93);
123
124 armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
125 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
126 biasesDesc.SetQuantizationOffset(0);
127
128 LayerTestResult<T, 2> result(outputTensorInfo);
129
Sadik Armagan483c8112021-06-01 09:24:52 +0100130 std::vector<T> input = ConvertToDataType<ArmnnType>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100131 {
132 -1.2f, 6.1f, -3.5f,
133 18.8f, -5.5f, 2.9f
134 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100135 inputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100136
Sadik Armagan483c8112021-06-01 09:24:52 +0100137 std::vector<T> weights = ConvertToDataType<ArmnnType>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100138 {
139 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
140 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
141 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100142 weightsDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100143
Sadik Armagan483c8112021-06-01 09:24:52 +0100144 std::vector<int32_t> bias = {9250, 67500};
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100145
Matthew Sloyan81beae32021-07-13 19:46:11 +0100146 result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
147 memoryManager,
148 tensorHandleFactory,
149 inputTensorInfo,
150 outputTensorInfo,
151 weightsDesc,
152 biasesDesc,
153 weights,
154 bias,
155 input,
156 biasEnabled,
157 true,
158 constantWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100159
160 if (biasEnabled)
161 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100162 result.m_ExpectedData = ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100163 }
164 else
165 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100166 result.m_ExpectedData = ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100167 }
168
169 return result;
170}
171
172//
173// ArmNN variant of the AndroidNN fully_connected_float_large test.
174//
175// Tests the fully connected layer with large values, optionally transposing weights.
176// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
177//
178template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
179LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
180 armnn::IWorkloadFactory& workloadFactory,
181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100182 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100183 bool transposeWeights,
184 float qScale = 0.0f,
185 int32_t qOffset = 0)
186{
187 unsigned int inputWidth = 1;
188 unsigned int inputHeight = 1;
189 unsigned int inputChannels = 5;
190 unsigned int inputNum = 1;
191
192 unsigned int outputChannels = 1;
193 unsigned int outputNum = 1;
194
195 // Define the tensor descriptors.
196 armnn::TensorInfo inputTensorInfo;
197 armnn::TensorInfo outputTensorInfo;
198 armnn::TensorInfo weightsDesc;
199 armnn::TensorInfo biasesDesc;
200
201 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
202 unsigned int outputShape[] = { outputNum, outputChannels };
203 unsigned int weightsShape[] = { inputChannels, outputChannels };
204 if (transposeWeights)
205 {
206 std::swap(weightsShape[0], weightsShape[1]);
207 }
208
209 unsigned int biasShape[] = { outputChannels };
210
211 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
212 outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
213 weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
214 biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
215
216 // Set quantization parameters if the requested type is a quantized type.
217 if(armnn::IsQuantizedType<T>())
218 {
219 inputTensorInfo.SetQuantizationScale(qScale);
220 inputTensorInfo.SetQuantizationOffset(qOffset);
221 outputTensorInfo.SetQuantizationScale(qScale);
222 outputTensorInfo.SetQuantizationOffset(qOffset);
223 }
224
225 LayerTestResult<T, 2> result(outputTensorInfo);
226
Sadik Armagan483c8112021-06-01 09:24:52 +0100227 std::vector<T> input = armnnUtils::QuantizedVector<T>(
228 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100229 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100230 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100231 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100232
Sadik Armagan483c8112021-06-01 09:24:52 +0100233 std::vector<T> weights = armnnUtils::QuantizedVector<T>(
234 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100235 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100236 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100237 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100238
239 std::vector<T> biasValues({900000.f});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100240
241 result = SimpleFullyConnectedTestImpl<T>(
242 workloadFactory,
243 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100244 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100245 inputTensorInfo, outputTensorInfo,
246 weightsDesc, biasesDesc,
Sadik Armagan483c8112021-06-01 09:24:52 +0100247 weights, biasValues, input,
Matthew Sloyan81beae32021-07-13 19:46:11 +0100248 true, transposeWeights, true
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100249 );
250
Sadik Armagan483c8112021-06-01 09:24:52 +0100251 result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100252
253 return result;
254}
255
256//
257// Explicit template specializations
258//
259
Derek Lambertif90c56d2020-01-10 17:14:08 +0000260template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
261FullyConnectedTest<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100262 armnn::IWorkloadFactory& workloadFactory,
263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100264 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000265 bool biasEnabled,
266 bool constWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100267
Derek Lambertif90c56d2020-01-10 17:14:08 +0000268template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
269FullyConnectedTest<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100270 armnn::IWorkloadFactory& workloadFactory,
271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100272 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000273 bool biasEnabled,
274 bool constWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100275
276//
277// Implementation functions
278//
279
280LayerTestResult<float, 2> FullyConnectedFloat32Test(
281 armnn::IWorkloadFactory& workloadFactory,
282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100283 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100284 bool biasEnabled,
285 bool transposeWeights)
286{
287 unsigned int inputWidth = 1;
288 unsigned int inputHeight = 1;
289 unsigned int inputChannels = 5;
290 unsigned int inputNum = 2;
291
292 unsigned int outputChannels = 3;
293 unsigned int outputNum = 2;
294
295 // Define the tensor descriptors.
296 armnn::TensorInfo inputTensorInfo;
297 armnn::TensorInfo outputTensorInfo;
298 armnn::TensorInfo weightsDesc;
299 armnn::TensorInfo biasesDesc;
300
301 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
302 unsigned int outputShape[] = { outputNum, outputChannels };
303 unsigned int weightsShape[] = { inputChannels, outputChannels };
304
305 if (transposeWeights)
306 {
307 std::swap(weightsShape[0], weightsShape[1]);
308 }
309
310 unsigned int biasShape[] = { outputChannels };
311
312 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
313 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
314 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
315 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
316
317 LayerTestResult<float, 2> result(outputTensorInfo);
318
Sadik Armagan483c8112021-06-01 09:24:52 +0100319 std::vector<float> input =
320 {
321 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
322 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
323 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100324
Sadik Armagan483c8112021-06-01 09:24:52 +0100325 std::vector<float> weights =
326 {
327 .5f, 2.f, .5f,
328 .5f, 2.f, 1.f,
329 .5f, 2.f, 2.f,
330 .5f, 2.f, 3.f,
331 .5f, 2.f, 4.f
332 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100333
334 if (transposeWeights)
335 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100336 weights =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100337 {
338 .5f, .5f, .5f, .5f, .5f,
339 2.f, 2.f, 2.f, 2.f, 2.f,
340 .5f, 1.f, 2.f, 3.f, 4.f
Sadik Armagan483c8112021-06-01 09:24:52 +0100341 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100342 }
343
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100344 std::vector<float> biasValues({0.f, 0.f, 0.f});
345 if (biasEnabled)
346 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100347 biasValues = std::vector<float>({10.f, 20.f, 30.f});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100348 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100349
350 result = SimpleFullyConnectedTestImpl<float>(
351 workloadFactory,
352 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100353 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100354 inputTensorInfo, outputTensorInfo,
355 weightsDesc, biasesDesc,
Sadik Armagan483c8112021-06-01 09:24:52 +0100356 weights, biasValues, input,
Matthew Sloyan81beae32021-07-13 19:46:11 +0100357 biasEnabled, transposeWeights, true
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100358 );
359
Sadik Armagan483c8112021-06-01 09:24:52 +0100360 std::vector<float> expectedOutput =
361 {
362 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
363 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
364 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100365
Sadik Armagan483c8112021-06-01 09:24:52 +0100366 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
367 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
368 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
369 };
370 result.m_ExpectedData = expectedOutput;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100371
372 return result;
373}
374
375LayerTestResult<float, 2> FullyConnectedLargeTest(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100378 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100379 bool transposeWeights)
380{
Finn Williams7faf9a82020-08-27 10:37:36 +0100381 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
382 memoryManager,
383 tensorHandleFactory,
384 transposeWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100385}