blob: ac2595b6bf7fe776b7dd42ad04c38b70471b5fca [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01006#include <ResolveType.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007#include "WorkloadTestUtils.hpp"
8
9#include <backendsCommon/IBackendInternal.hpp>
10
telsoa014fcda012018-03-09 14:13:49 +000011template<typename T, typename B>
12LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
13 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +000015 armnn::TensorInfo inputTensorInfo,
16 armnn::TensorInfo outputTensorInfo,
17 armnn::TensorInfo weightsDesc,
18 armnn::TensorInfo biasesDesc,
surmeh013537c2c2018-05-18 16:31:43 +010019 boost::multi_array<T, 2>& weights,
20 boost::multi_array<B, 1>& bias,
21 boost::multi_array<T, 4>& input,
telsoa014fcda012018-03-09 14:13:49 +000022 bool biasEnabled,
23 bool transposeWeights)
24{
25 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
26 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
27
28 armnn::FullyConnectedQueueDescriptor data;
29 armnn::WorkloadInfo info;
30 armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
31 armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
32
33 AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
34 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
35
36 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
37 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
38 data.m_Weight = &weightsTensor;
39 data.m_Bias = &biasTensor;
40 data.m_Parameters.m_BiasEnabled = biasEnabled;
41 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
42
43 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
44 LayerTestResult<T, 2> result(outputTensorInfo);
45
46 inputHandle->Allocate();
47 outputHandle->Allocate();
48 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
49
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000050 ExecuteWorkload(*workload, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +000051
52 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
53
54 return result;
55}
56
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000057LayerTestResult<float, 2> FullyConnectedFloat32Test(
58 armnn::IWorkloadFactory& workloadFactory,
59 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
60 bool biasEnabled,
telsoa014fcda012018-03-09 14:13:49 +000061 bool transposeWeights)
62{
63 unsigned int inputWidth = 1;
64 unsigned int inputHeight = 1;
65 unsigned int inputChannels = 5;
66 unsigned int inputNum = 2;
67
68 unsigned int outputChannels = 3;
69 unsigned int outputNum = 2;
70
telsoa01c577f2c2018-08-31 09:22:23 +010071 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +000072 armnn::TensorInfo inputTensorInfo;
73 armnn::TensorInfo outputTensorInfo;
74 armnn::TensorInfo weightsDesc;
75 armnn::TensorInfo biasesDesc;
76
77 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
78 unsigned int outputShape[] = { outputNum, outputChannels };
79 unsigned int weightsShape[] = { inputChannels, outputChannels };
80 if (transposeWeights)
81 {
82 std::swap(weightsShape[0], weightsShape[1]);
83 }
84 unsigned int biasShape[] = { outputChannels };
85
86 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
87 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
88 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
89 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
90
91 LayerTestResult<float, 2> result(outputTensorInfo);
92
93 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
94 {
95 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
96
97 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
98 })
99 );
100
101 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
102 {
103 .5f, 2.f, .5f,
104 .5f, 2.f, 1.f,
105 .5f, 2.f, 2.f,
106 .5f, 2.f, 3.f,
107 .5f, 2.f, 4.f
108 }));
109
110 if (transposeWeights)
111 {
112 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
113 {
114 .5f, .5f, .5f, .5f, .5f,
115 2.f, 2.f, 2.f, 2.f, 2.f,
116 .5f, 1.f, 2.f, 3.f, 4.f
117 }));
118 }
119
120
121 std::vector<float> biasValues({0.f, 0.f, 0.f});
122 if (biasEnabled)
123 {
124 biasValues = std::vector<float>({10.f, 20.f, 30.f});
125 }
126 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
127
128 result = SimpleFullyConnectedTestImpl<float>(
129 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000130 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000131 inputTensorInfo, outputTensorInfo,
132 weightsDesc, biasesDesc,
133 weights, bias, input,
134 biasEnabled, transposeWeights
135 );
136
137 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
138 {
139 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
140 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
141 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
142
143 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
144 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
145 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
146 })
147 );
148
149 return result;
150}
151
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000152LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(
153 armnn::IWorkloadFactory& workloadFactory,
154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
155 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000156{
157 constexpr static unsigned int inputWidth = 3u;
158 constexpr static unsigned int inputHeight = 2u;
159 constexpr static unsigned int inputChannels = 1u;
160
161 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
162
163 constexpr static unsigned int outputChannels = 2u;
164
165 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, armnn::DataType::QuantisedAsymm8);
166 inputTensorInfo.SetQuantizationScale(0.1f);
167 inputTensorInfo.SetQuantizationOffset(63);
168
169 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, armnn::DataType::QuantisedAsymm8);
170 outputTensorInfo.SetQuantizationScale(5.f);
171 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
172
173 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, armnn::DataType::QuantisedAsymm8);
174 weightsDesc.SetQuantizationScale(0.2f);
175 weightsDesc.SetQuantizationOffset(93);
176
177 armnn::TensorInfo biasesDesc({ outputChannels }, armnn::DataType::Signed32);
178 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
179 biasesDesc.SetQuantizationOffset(0);
180
181 LayerTestResult<uint8_t, 2> result(outputTensorInfo);
182
183 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>{51, 124, 28,
184 251, 8, 92});
185
186 auto weights = MakeTensor<uint8_t, 2>(weightsDesc, std::vector<uint8_t>{51, 193, 42, 53, 175, 34,
187 210, 145, 23, 74, 34, 150});
188
189 // scale = 0.02
190 // offset = 0
191 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
192
193 result = SimpleFullyConnectedTestImpl<uint8_t>(
194 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000195 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000196 inputTensorInfo, outputTensorInfo,
197 weightsDesc, biasesDesc,
198 weights, bias, input,
199 biasEnabled, true
200 );
201
telsoa01c577f2c2018-08-31 09:22:23 +0100202 // Manually calculated.
203 // Note one of these values has been clamped to 0.
telsoa014fcda012018-03-09 14:13:49 +0000204 if (biasEnabled)
205 {
206 result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 242});
207 }
208 else
209 {
210 result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 32});
211 }
212
213 return result;
214}
215
216
217
218//
219// ArmNN variant of the AndroidNN fully_connected_float_large test.
220//
221// Tests the fully connected layer with large values, optionally transposing weights.
222// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
223//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000224template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000225LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
226 armnn::IWorkloadFactory& workloadFactory,
227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
228 bool transposeWeights,
229 float qScale = 0.0f,
230 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000231{
232 unsigned int inputWidth = 1;
233 unsigned int inputHeight = 1;
234 unsigned int inputChannels = 5;
235 unsigned int inputNum = 1;
236
237 unsigned int outputChannels = 1;
238 unsigned int outputNum = 1;
239
telsoa01c577f2c2018-08-31 09:22:23 +0100240 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000241 armnn::TensorInfo inputTensorInfo;
242 armnn::TensorInfo outputTensorInfo;
243 armnn::TensorInfo weightsDesc;
244 armnn::TensorInfo biasesDesc;
245
246 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
247 unsigned int outputShape[] = { outputNum, outputChannels };
248 unsigned int weightsShape[] = { inputChannels, outputChannels };
249 if (transposeWeights)
250 {
251 std::swap(weightsShape[0], weightsShape[1]);
252 }
253
254 unsigned int biasShape[] = { outputChannels };
255
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000256 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
257 outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
258 weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
259 biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000260
261 // Set quantization parameters if the requested type is a quantized type.
262 if(armnn::IsQuantizedType<T>())
263 {
264 inputTensorInfo.SetQuantizationScale(qScale);
265 inputTensorInfo.SetQuantizationOffset(qOffset);
266 outputTensorInfo.SetQuantizationScale(qScale);
267 outputTensorInfo.SetQuantizationOffset(qOffset);
268 }
269
270 LayerTestResult<T, 2> result(outputTensorInfo);
271
272 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
273 QuantizedVector<T>(qScale, qOffset, {
274 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
275 })
276 );
277
278 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
279 QuantizedVector<T>(qScale, qOffset, {
280 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
281 })
282 );
283
284 std::vector<T> biasValues({900000.f});
285 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
286
287 result = SimpleFullyConnectedTestImpl<T>(
288 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000289 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000290 inputTensorInfo, outputTensorInfo,
291 weightsDesc, biasesDesc,
292 weights, bias, input,
293 true, transposeWeights
294 );
295
296 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
297 QuantizedVector<T>(qScale, qOffset, {
298 965432.0f,
299 })
300 );
301
302 return result;
303}