blob: 298901e640b3d81866344c8796dfa9a0abd5fe78 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6template<typename T, typename B>
7LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
8 armnn::IWorkloadFactory& workloadFactory,
9 armnn::TensorInfo inputTensorInfo,
10 armnn::TensorInfo outputTensorInfo,
11 armnn::TensorInfo weightsDesc,
12 armnn::TensorInfo biasesDesc,
surmeh013537c2c2018-05-18 16:31:43 +010013 boost::multi_array<T, 2>& weights,
14 boost::multi_array<B, 1>& bias,
15 boost::multi_array<T, 4>& input,
telsoa014fcda012018-03-09 14:13:49 +000016 bool biasEnabled,
17 bool transposeWeights)
18{
19 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
20 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
21
22 armnn::FullyConnectedQueueDescriptor data;
23 armnn::WorkloadInfo info;
24 armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
25 armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
26
27 AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
28 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
29
30 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
31 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
32 data.m_Weight = &weightsTensor;
33 data.m_Bias = &biasTensor;
34 data.m_Parameters.m_BiasEnabled = biasEnabled;
35 data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
36
37 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
38 LayerTestResult<T, 2> result(outputTensorInfo);
39
40 inputHandle->Allocate();
41 outputHandle->Allocate();
42 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
43
Aron Virginas-Tar60578952018-10-31 11:04:01 +000044 workloadFactory.Acquire();
telsoa014fcda012018-03-09 14:13:49 +000045 workload->Execute();
Aron Virginas-Tar60578952018-10-31 11:04:01 +000046 workloadFactory.Release();
telsoa014fcda012018-03-09 14:13:49 +000047
48 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
49
50 return result;
51}
52
53LayerTestResult<float, 2> FullyConnectedFloat32Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled,
54 bool transposeWeights)
55{
56 unsigned int inputWidth = 1;
57 unsigned int inputHeight = 1;
58 unsigned int inputChannels = 5;
59 unsigned int inputNum = 2;
60
61 unsigned int outputChannels = 3;
62 unsigned int outputNum = 2;
63
telsoa01c577f2c2018-08-31 09:22:23 +010064 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +000065 armnn::TensorInfo inputTensorInfo;
66 armnn::TensorInfo outputTensorInfo;
67 armnn::TensorInfo weightsDesc;
68 armnn::TensorInfo biasesDesc;
69
70 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
71 unsigned int outputShape[] = { outputNum, outputChannels };
72 unsigned int weightsShape[] = { inputChannels, outputChannels };
73 if (transposeWeights)
74 {
75 std::swap(weightsShape[0], weightsShape[1]);
76 }
77 unsigned int biasShape[] = { outputChannels };
78
79 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
80 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
81 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
82 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
83
84 LayerTestResult<float, 2> result(outputTensorInfo);
85
86 boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
87 {
88 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
89
90 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
91 })
92 );
93
94 boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
95 {
96 .5f, 2.f, .5f,
97 .5f, 2.f, 1.f,
98 .5f, 2.f, 2.f,
99 .5f, 2.f, 3.f,
100 .5f, 2.f, 4.f
101 }));
102
103 if (transposeWeights)
104 {
105 weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
106 {
107 .5f, .5f, .5f, .5f, .5f,
108 2.f, 2.f, 2.f, 2.f, 2.f,
109 .5f, 1.f, 2.f, 3.f, 4.f
110 }));
111 }
112
113
114 std::vector<float> biasValues({0.f, 0.f, 0.f});
115 if (biasEnabled)
116 {
117 biasValues = std::vector<float>({10.f, 20.f, 30.f});
118 }
119 boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
120
121 result = SimpleFullyConnectedTestImpl<float>(
122 workloadFactory,
123 inputTensorInfo, outputTensorInfo,
124 weightsDesc, biasesDesc,
125 weights, bias, input,
126 biasEnabled, transposeWeights
127 );
128
129 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
130 {
131 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
132 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
133 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
134
135 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
136 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
137 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
138 })
139 );
140
141 return result;
142}
143
144LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
145{
146 constexpr static unsigned int inputWidth = 3u;
147 constexpr static unsigned int inputHeight = 2u;
148 constexpr static unsigned int inputChannels = 1u;
149
150 constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
151
152 constexpr static unsigned int outputChannels = 2u;
153
154 armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, armnn::DataType::QuantisedAsymm8);
155 inputTensorInfo.SetQuantizationScale(0.1f);
156 inputTensorInfo.SetQuantizationOffset(63);
157
158 armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, armnn::DataType::QuantisedAsymm8);
159 outputTensorInfo.SetQuantizationScale(5.f);
160 outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
161
162 armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, armnn::DataType::QuantisedAsymm8);
163 weightsDesc.SetQuantizationScale(0.2f);
164 weightsDesc.SetQuantizationOffset(93);
165
166 armnn::TensorInfo biasesDesc({ outputChannels }, armnn::DataType::Signed32);
167 biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
168 biasesDesc.SetQuantizationOffset(0);
169
170 LayerTestResult<uint8_t, 2> result(outputTensorInfo);
171
172 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>{51, 124, 28,
173 251, 8, 92});
174
175 auto weights = MakeTensor<uint8_t, 2>(weightsDesc, std::vector<uint8_t>{51, 193, 42, 53, 175, 34,
176 210, 145, 23, 74, 34, 150});
177
178 // scale = 0.02
179 // offset = 0
180 auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
181
182 result = SimpleFullyConnectedTestImpl<uint8_t>(
183 workloadFactory,
184 inputTensorInfo, outputTensorInfo,
185 weightsDesc, biasesDesc,
186 weights, bias, input,
187 biasEnabled, true
188 );
189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Manually calculated.
191 // Note one of these values has been clamped to 0.
telsoa014fcda012018-03-09 14:13:49 +0000192 if (biasEnabled)
193 {
194 result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 242});
195 }
196 else
197 {
198 result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 32});
199 }
200
201 return result;
202}
203
204
205
206//
207// ArmNN variant of the AndroidNN fully_connected_float_large test.
208//
209// Tests the fully connected layer with large values, optionally transposing weights.
210// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
211//
212template<typename T>
213LayerTestResult<T, 2> FullyConnectedLargeTestCommon(armnn::IWorkloadFactory& workloadFactory,
214 bool transposeWeights,
215 float qScale = 0.0f,
216 int32_t qOffset = 0)
217{
218 unsigned int inputWidth = 1;
219 unsigned int inputHeight = 1;
220 unsigned int inputChannels = 5;
221 unsigned int inputNum = 1;
222
223 unsigned int outputChannels = 1;
224 unsigned int outputNum = 1;
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000227 armnn::TensorInfo inputTensorInfo;
228 armnn::TensorInfo outputTensorInfo;
229 armnn::TensorInfo weightsDesc;
230 armnn::TensorInfo biasesDesc;
231
232 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
233 unsigned int outputShape[] = { outputNum, outputChannels };
234 unsigned int weightsShape[] = { inputChannels, outputChannels };
235 if (transposeWeights)
236 {
237 std::swap(weightsShape[0], weightsShape[1]);
238 }
239
240 unsigned int biasShape[] = { outputChannels };
241
242 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
243 outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
244 weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::GetDataType<T>());
245 biasesDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
246
247 // Set quantization parameters if the requested type is a quantized type.
248 if(armnn::IsQuantizedType<T>())
249 {
250 inputTensorInfo.SetQuantizationScale(qScale);
251 inputTensorInfo.SetQuantizationOffset(qOffset);
252 outputTensorInfo.SetQuantizationScale(qScale);
253 outputTensorInfo.SetQuantizationOffset(qOffset);
254 }
255
256 LayerTestResult<T, 2> result(outputTensorInfo);
257
258 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
259 QuantizedVector<T>(qScale, qOffset, {
260 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
261 })
262 );
263
264 boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
265 QuantizedVector<T>(qScale, qOffset, {
266 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
267 })
268 );
269
270 std::vector<T> biasValues({900000.f});
271 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
272
273 result = SimpleFullyConnectedTestImpl<T>(
274 workloadFactory,
275 inputTensorInfo, outputTensorInfo,
276 weightsDesc, biasesDesc,
277 weights, bias, input,
278 true, transposeWeights
279 );
280
281 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
282 QuantizedVector<T>(qScale, qOffset, {
283 965432.0f,
284 })
285 );
286
287 return result;
288}