blob: dcf87fe92be4a39414d186ae544b4ffdbe10a7c6 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "FullyConnectedTestImpl.hpp"
7
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01008
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
10
James Conroy1f58f032021-04-27 17:13:27 +010011#include <backendsCommon/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010012
13#include <backendsCommon/test/DataTypeUtils.hpp>
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
19//
20// Implementation templates
21//
22
23template<typename T, typename B>
24LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000025 armnn::IWorkloadFactory& workloadFactory,
26 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
27 const armnn::ITensorHandleFactory& tensorHandleFactory,
28 armnn::TensorInfo inputTensorInfo,
29 armnn::TensorInfo outputTensorInfo,
30 armnn::TensorInfo weightsTensorInfo,
31 armnn::TensorInfo biasesTensorInfo,
Sadik Armagan483c8112021-06-01 09:24:52 +010032 std::vector<T>& weights,
33 std::vector<B>& bias,
34 std::vector<T>& input,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000035 bool biasEnabled,
Matthew Sloyan81beae32021-07-13 19:46:11 +010036 bool transposeWeights,
37 bool constantWeights)
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000038{
39 std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
40 std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
41 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
42
Sadik Armagan483c8112021-06-01 09:24:52 +010043 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
44
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000045 armnn::FullyConnectedQueueDescriptor data;
46 armnn::WorkloadInfo info;
Matthew Sloyan81beae32021-07-13 19:46:11 +010047 armnn::ScopedTensorHandle weightsTensor(weightsTensorInfo);
48 armnn::ScopedTensorHandle biasTensor(biasesTensorInfo);
49
50 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
51 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000052
53 AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
54 AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
55 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Matthew Sloyan81beae32021-07-13 19:46:11 +010056
57 // Need to set as layer members will be null when creating the workload because the optimization hasn't been run.
58 data.m_Weight = &weightsTensor;
59 data.m_Bias = &biasTensor;
60
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000061 data.m_Parameters.m_BiasEnabled = biasEnabled;
62 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
Matthew Sloyan81beae32021-07-13 19:46:11 +010063 data.m_Parameters.m_ConstantWeights = constantWeights;
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000064
65 std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
66 if (biasEnabled)
67 {
68 input2Handle = tensorHandleFactory.CreateTensorHandle(biasesTensorInfo);
69 AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
70 }
71
72 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
73 LayerTestResult<T, 2> result(outputTensorInfo);
74
75 input0Handle->Allocate();
76 input1Handle->Allocate();
77 outputHandle->Allocate();
Sadik Armagan483c8112021-06-01 09:24:52 +010078 CopyDataToITensorHandle(input0Handle.get(), input.data());
79 CopyDataToITensorHandle(input1Handle.get(), weights.data());
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000080 if (biasEnabled)
81 {
82 input2Handle->Allocate();
Sadik Armagan483c8112021-06-01 09:24:52 +010083 CopyDataToITensorHandle(input2Handle.get(), bias.data());
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000084 }
85
86 ExecuteWorkload(*workload, memoryManager);
87
Sadik Armagan483c8112021-06-01 09:24:52 +010088 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
89 result.m_ActualData = actualOutput;
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000090
91 return result;
92}
93
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010094template<armnn::DataType ArmnnType, typename T>
95LayerTestResult<T, 2> FullyConnectedTest(
96 armnn::IWorkloadFactory& workloadFactory,
97 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +010098 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000099 bool biasEnabled,
100 bool constantWeights)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100101{
102 constexpr static unsigned int inputWidth = 3u;
103 constexpr static unsigned int inputHeight = 2u;
104 constexpr static unsigned int inputChannels = 1u;
105
106 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
107
108 constexpr static unsigned int outputChannels = 2u;
109
110 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
111 inputTensorInfo.SetQuantizationScale(0.1f);
112 inputTensorInfo.SetQuantizationOffset(63);
113
114 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
115 outputTensorInfo.SetQuantizationScale(5.f);
116 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
117
118 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
119 weightsDesc.SetQuantizationScale(0.2f);
120 weightsDesc.SetQuantizationOffset(93);
121
122 armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
123 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
124 biasesDesc.SetQuantizationOffset(0);
125
126 LayerTestResult<T, 2> result(outputTensorInfo);
127
Sadik Armagan483c8112021-06-01 09:24:52 +0100128 std::vector<T> input = ConvertToDataType<ArmnnType>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100129 {
130 -1.2f, 6.1f, -3.5f,
131 18.8f, -5.5f, 2.9f
132 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100133 inputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100134
Sadik Armagan483c8112021-06-01 09:24:52 +0100135 std::vector<T> weights = ConvertToDataType<ArmnnType>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100136 {
137 -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
138 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
139 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100140 weightsDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100141
Sadik Armagan483c8112021-06-01 09:24:52 +0100142 std::vector<int32_t> bias = {9250, 67500};
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100143
Matthew Sloyan81beae32021-07-13 19:46:11 +0100144 result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
145 memoryManager,
146 tensorHandleFactory,
147 inputTensorInfo,
148 outputTensorInfo,
149 weightsDesc,
150 biasesDesc,
151 weights,
152 bias,
153 input,
154 biasEnabled,
155 true,
156 constantWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100157
158 if (biasEnabled)
159 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100160 result.m_ExpectedData = ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100161 }
162 else
163 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100164 result.m_ExpectedData = ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100165 }
166
167 return result;
168}
169
170//
171// ArmNN variant of the AndroidNN fully_connected_float_large test.
172//
173// Tests the fully connected layer with large values, optionally transposing weights.
174// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
175//
176template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
177LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
178 armnn::IWorkloadFactory& workloadFactory,
179 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100180 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100181 bool transposeWeights,
182 float qScale = 0.0f,
183 int32_t qOffset = 0)
184{
185 unsigned int inputWidth = 1;
186 unsigned int inputHeight = 1;
187 unsigned int inputChannels = 5;
188 unsigned int inputNum = 1;
189
190 unsigned int outputChannels = 1;
191 unsigned int outputNum = 1;
192
193 // Define the tensor descriptors.
194 armnn::TensorInfo inputTensorInfo;
195 armnn::TensorInfo outputTensorInfo;
196 armnn::TensorInfo weightsDesc;
197 armnn::TensorInfo biasesDesc;
198
199 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
200 unsigned int outputShape[] = { outputNum, outputChannels };
201 unsigned int weightsShape[] = { inputChannels, outputChannels };
202 if (transposeWeights)
203 {
204 std::swap(weightsShape[0], weightsShape[1]);
205 }
206
207 unsigned int biasShape[] = { outputChannels };
208
209 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
210 outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
211 weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
212 biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
213
214 // Set quantization parameters if the requested type is a quantized type.
215 if(armnn::IsQuantizedType<T>())
216 {
217 inputTensorInfo.SetQuantizationScale(qScale);
218 inputTensorInfo.SetQuantizationOffset(qOffset);
219 outputTensorInfo.SetQuantizationScale(qScale);
220 outputTensorInfo.SetQuantizationOffset(qOffset);
221 }
222
223 LayerTestResult<T, 2> result(outputTensorInfo);
224
Sadik Armagan483c8112021-06-01 09:24:52 +0100225 std::vector<T> input = armnnUtils::QuantizedVector<T>(
226 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100227 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100228 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100229 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100230
Sadik Armagan483c8112021-06-01 09:24:52 +0100231 std::vector<T> weights = armnnUtils::QuantizedVector<T>(
232 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100233 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100234 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100235 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100236
237 std::vector<T> biasValues({900000.f});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100238
239 result = SimpleFullyConnectedTestImpl<T>(
240 workloadFactory,
241 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100242 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100243 inputTensorInfo, outputTensorInfo,
244 weightsDesc, biasesDesc,
Sadik Armagan483c8112021-06-01 09:24:52 +0100245 weights, biasValues, input,
Matthew Sloyan81beae32021-07-13 19:46:11 +0100246 true, transposeWeights, true
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100247 );
248
Sadik Armagan483c8112021-06-01 09:24:52 +0100249 result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100250
251 return result;
252}
253
254//
255// Explicit template specializations
256//
257
Derek Lambertif90c56d2020-01-10 17:14:08 +0000258template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
259FullyConnectedTest<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100260 armnn::IWorkloadFactory& workloadFactory,
261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100262 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000263 bool biasEnabled,
264 bool constWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100265
Derek Lambertif90c56d2020-01-10 17:14:08 +0000266template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
267FullyConnectedTest<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100268 armnn::IWorkloadFactory& workloadFactory,
269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100270 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000271 bool biasEnabled,
272 bool constWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100273
274//
275// Implementation functions
276//
277
278LayerTestResult<float, 2> FullyConnectedFloat32Test(
279 armnn::IWorkloadFactory& workloadFactory,
280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100281 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100282 bool biasEnabled,
283 bool transposeWeights)
284{
285 unsigned int inputWidth = 1;
286 unsigned int inputHeight = 1;
287 unsigned int inputChannels = 5;
288 unsigned int inputNum = 2;
289
290 unsigned int outputChannels = 3;
291 unsigned int outputNum = 2;
292
293 // Define the tensor descriptors.
294 armnn::TensorInfo inputTensorInfo;
295 armnn::TensorInfo outputTensorInfo;
296 armnn::TensorInfo weightsDesc;
297 armnn::TensorInfo biasesDesc;
298
299 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
300 unsigned int outputShape[] = { outputNum, outputChannels };
301 unsigned int weightsShape[] = { inputChannels, outputChannels };
302
303 if (transposeWeights)
304 {
305 std::swap(weightsShape[0], weightsShape[1]);
306 }
307
308 unsigned int biasShape[] = { outputChannels };
309
310 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
311 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
312 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
313 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
314
315 LayerTestResult<float, 2> result(outputTensorInfo);
316
Sadik Armagan483c8112021-06-01 09:24:52 +0100317 std::vector<float> input =
318 {
319 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
320 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
321 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100322
Sadik Armagan483c8112021-06-01 09:24:52 +0100323 std::vector<float> weights =
324 {
325 .5f, 2.f, .5f,
326 .5f, 2.f, 1.f,
327 .5f, 2.f, 2.f,
328 .5f, 2.f, 3.f,
329 .5f, 2.f, 4.f
330 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100331
332 if (transposeWeights)
333 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100334 weights =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100335 {
336 .5f, .5f, .5f, .5f, .5f,
337 2.f, 2.f, 2.f, 2.f, 2.f,
338 .5f, 1.f, 2.f, 3.f, 4.f
Sadik Armagan483c8112021-06-01 09:24:52 +0100339 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100340 }
341
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100342 std::vector<float> biasValues({0.f, 0.f, 0.f});
343 if (biasEnabled)
344 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100345 biasValues = std::vector<float>({10.f, 20.f, 30.f});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100346 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100347
348 result = SimpleFullyConnectedTestImpl<float>(
349 workloadFactory,
350 memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100351 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100352 inputTensorInfo, outputTensorInfo,
353 weightsDesc, biasesDesc,
Sadik Armagan483c8112021-06-01 09:24:52 +0100354 weights, biasValues, input,
Matthew Sloyan81beae32021-07-13 19:46:11 +0100355 biasEnabled, transposeWeights, true
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100356 );
357
Sadik Armagan483c8112021-06-01 09:24:52 +0100358 std::vector<float> expectedOutput =
359 {
360 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
361 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
362 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100363
Sadik Armagan483c8112021-06-01 09:24:52 +0100364 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
365 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
366 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
367 };
368 result.m_ExpectedData = expectedOutput;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100369
370 return result;
371}
372
373LayerTestResult<float, 2> FullyConnectedLargeTest(
374 armnn::IWorkloadFactory& workloadFactory,
375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams7faf9a82020-08-27 10:37:36 +0100376 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100377 bool transposeWeights)
378{
Finn Williams7faf9a82020-08-27 10:37:36 +0100379 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
380 memoryManager,
381 tensorHandleFactory,
382 transposeWeights);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100383}