blob: e7c0f01cc94c3cb406c2093fc47be7b4010d0d6e [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
7
8#include <backendsCommon/IBackendInternal.hpp>
9
telsoa014fcda012018-03-09 14:13:49 +000010template<typename T, typename B>
11LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
12 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000013 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +000014 armnn::TensorInfo inputTensorInfo,
15 armnn::TensorInfo outputTensorInfo,
16 armnn::TensorInfo weightsDesc,
17 armnn::TensorInfo biasesDesc,
surmeh013537c2c2018-05-18 16:31:43 +010018 boost::multi_array<T, 2>& weights,
19 boost::multi_array<B, 1>& bias,
20 boost::multi_array<T, 4>& input,
telsoa014fcda012018-03-09 14:13:49 +000021 bool biasEnabled,
22 bool transposeWeights)
23{
24 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
25 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
26
27 armnn::FullyConnectedQueueDescriptor data;
28 armnn::WorkloadInfo info;
29 armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
30 armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
31
32 AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
33 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
34
35 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
36 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
37 data.m_Weight = &weightsTensor;
38 data.m_Bias = &biasTensor;
39 data.m_Parameters.m_BiasEnabled = biasEnabled;
40 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
41
42 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
43 LayerTestResult<T, 2> result(outputTensorInfo);
44
45 inputHandle->Allocate();
46 outputHandle->Allocate();
47 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
48
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000049 ExecuteWorkload(*workload, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000050
51 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
52
53 return result;
54}
55
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000056LayerTestResult<float, 2> FullyConnectedFloat32Test(
57 armnn::IWorkloadFactory& workloadFactory,
58 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
59 bool biasEnabled,
telsoa014fcda012018-03-09 14:13:49 +000060 bool transposeWeights)
61{
62 unsigned int inputWidth = 1;
63 unsigned int inputHeight = 1;
64 unsigned int inputChannels = 5;
65 unsigned int inputNum = 2;
66
67 unsigned int outputChannels = 3;
68 unsigned int outputNum = 2;
69
telsoa01c577f2c2018-08-31 09:22:23 +010070 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +000071 armnn::TensorInfo inputTensorInfo;
72 armnn::TensorInfo outputTensorInfo;
73 armnn::TensorInfo weightsDesc;
74 armnn::TensorInfo biasesDesc;
75
76 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
77 unsigned int outputShape[] = { outputNum, outputChannels };
78 unsigned int weightsShape[] = { inputChannels, outputChannels };
79 if (transposeWeights)
80 {
81 std::swap(weightsShape[0], weightsShape[1]);
82 }
83 unsigned int biasShape[] = { outputChannels };
84
85 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
86 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
87 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
88 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
89
90 LayerTestResult<float, 2> result(outputTensorInfo);
91
92 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
93 {
94 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
95
96 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
97 })
98 );
99
100 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
101 {
102 .5f, 2.f, .5f,
103 .5f, 2.f, 1.f,
104 .5f, 2.f, 2.f,
105 .5f, 2.f, 3.f,
106 .5f, 2.f, 4.f
107 }));
108
109 if (transposeWeights)
110 {
111 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
112 {
113 .5f, .5f, .5f, .5f, .5f,
114 2.f, 2.f, 2.f, 2.f, 2.f,
115 .5f, 1.f, 2.f, 3.f, 4.f
116 }));
117 }
118
119
120 std::vector<float> biasValues({0.f, 0.f, 0.f});
121 if (biasEnabled)
122 {
123 biasValues = std::vector<float>({10.f, 20.f, 30.f});
124 }
125 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
126
127 result = SimpleFullyConnectedTestImpl<float>(
128 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000129 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000130 inputTensorInfo, outputTensorInfo,
131 weightsDesc, biasesDesc,
132 weights, bias, input,
133 biasEnabled, transposeWeights
134 );
135
136 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
137 {
138 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
139 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
140 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
141
142 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
143 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
144 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
145 })
146 );
147
148 return result;
149}
150
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000151LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(
152 armnn::IWorkloadFactory& workloadFactory,
153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
154 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000155{
156 constexpr static unsigned int inputWidth = 3u;
157 constexpr static unsigned int inputHeight = 2u;
158 constexpr static unsigned int inputChannels = 1u;
159
160 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
161
162 constexpr static unsigned int outputChannels = 2u;
163
164 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, armnn::DataType::QuantisedAsymm8);
165 inputTensorInfo.SetQuantizationScale(0.1f);
166 inputTensorInfo.SetQuantizationOffset(63);
167
168 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, armnn::DataType::QuantisedAsymm8);
169 outputTensorInfo.SetQuantizationScale(5.f);
170 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
171
172 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, armnn::DataType::QuantisedAsymm8);
173 weightsDesc.SetQuantizationScale(0.2f);
174 weightsDesc.SetQuantizationOffset(93);
175
176 armnn::TensorInfo biasesDesc({ outputChannels }, armnn::DataType::Signed32);
177 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
178 biasesDesc.SetQuantizationOffset(0);
179
180 LayerTestResult<uint8_t, 2> result(outputTensorInfo);
181
182 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>{51, 124, 28,
183 251, 8, 92});
184
185 auto weights = MakeTensor<uint8_t, 2>(weightsDesc, std::vector<uint8_t>{51, 193, 42, 53, 175, 34,
186 210, 145, 23, 74, 34, 150});
187
188 // scale = 0.02
189 // offset = 0
190 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
191
192 result = SimpleFullyConnectedTestImpl<uint8_t>(
193 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000194 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000195 inputTensorInfo, outputTensorInfo,
196 weightsDesc, biasesDesc,
197 weights, bias, input,
198 biasEnabled, true
199 );
200
telsoa01c577f2c2018-08-31 09:22:23 +0100201 // Manually calculated.
202 // Note one of these values has been clamped to 0.
telsoa014fcda012018-03-09 14:13:49 +0000203 if (biasEnabled)
204 {
205 result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 242});
206 }
207 else
208 {
209 result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 32});
210 }
211
212 return result;
213}
214
215
216
217//
218// ArmNN variant of the AndroidNN fully_connected_float_large test.
219//
220// Tests the fully connected layer with large values, optionally transposing weights.
221// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
222//
223template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000224LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
225 armnn::IWorkloadFactory& workloadFactory,
226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
227 bool transposeWeights,
228 float qScale = 0.0f,
229 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000230{
231 unsigned int inputWidth = 1;
232 unsigned int inputHeight = 1;
233 unsigned int inputChannels = 5;
234 unsigned int inputNum = 1;
235
236 unsigned int outputChannels = 1;
237 unsigned int outputNum = 1;
238
telsoa01c577f2c2018-08-31 09:22:23 +0100239 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000240 armnn::TensorInfo inputTensorInfo;
241 armnn::TensorInfo outputTensorInfo;
242 armnn::TensorInfo weightsDesc;
243 armnn::TensorInfo biasesDesc;
244
245 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
246 unsigned int outputShape[] = { outputNum, outputChannels };
247 unsigned int weightsShape[] = { inputChannels, outputChannels };
248 if (transposeWeights)
249 {
250 std::swap(weightsShape[0], weightsShape[1]);
251 }
252
253 unsigned int biasShape[] = { outputChannels };
254
255 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
256 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
257 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::GetDataType<T>());
258 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
259
260 // Set quantization parameters if the requested type is a quantized type.
261 if(armnn::IsQuantizedType<T>())
262 {
263 inputTensorInfo.SetQuantizationScale(qScale);
264 inputTensorInfo.SetQuantizationOffset(qOffset);
265 outputTensorInfo.SetQuantizationScale(qScale);
266 outputTensorInfo.SetQuantizationOffset(qOffset);
267 }
268
269 LayerTestResult<T, 2> result(outputTensorInfo);
270
271 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
272 QuantizedVector<T>(qScale, qOffset, {
273 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
274 })
275 );
276
277 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
278 QuantizedVector<T>(qScale, qOffset, {
279 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
280 })
281 );
282
283 std::vector<T> biasValues({900000.f});
284 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
285
286 result = SimpleFullyConnectedTestImpl<T>(
287 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000288 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000289 inputTensorInfo, outputTensorInfo,
290 weightsDesc, biasesDesc,
291 weights, bias, input,
292 true, transposeWeights
293 );
294
295 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
296 QuantizedVector<T>(qScale, qOffset, {
297 965432.0f,
298 })
299 );
300
301 return result;
302}