blob: f8ee7b1efd63f5ca34128a6a1c361d17afe8db14 [file] [log] [blame]
Sadik Armagan32ca1442020-11-13 17:51:56 +00001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ConvolutionTestHelper.hpp"
7
8#include <armnn_delegate.hpp>
9
10#include <flatbuffers/flatbuffers.h>
11#include <tensorflow/lite/interpreter.h>
12#include <tensorflow/lite/kernels/register.h>
13#include <tensorflow/lite/model.h>
14#include <tensorflow/lite/schema/schema_generated.h>
15#include <tensorflow/lite/version.h>
16
17#include <doctest/doctest.h>
18
19namespace armnnDelegate
20{
21
22void Conv2DWithBiasesFp32Test(std::vector<armnn::BackendId>& backends)
23{
24 // Set input data
25 std::vector<int32_t> inputShape { 1, 5, 5, 1 };
26 std::vector<int32_t> filterShape { 1, 3, 3, 1 };
27 std::vector<int32_t> biasShape { 1 };
28 std::vector<int32_t> outputShape { 1, 3, 3, 1 };
29
30 static std::vector<float> inputValues =
31 {
32 1, 5, 2, 3, 5,
33 8, 7, 3, 6, 3,
34 3, 3, 9, 1, 9,
35 4, 1, 8, 1, 3,
36 6, 8, 1, 9, 2
37 };
38
39 std::vector<float> filterValues =
40 {
41 4, 5, 6,
42 0, 0, 0,
43 3, 2, 1
44 };
45
46 std::vector<float> biasValues = { 0 };
47
48 std::vector<float> expectedOutputValues =
49 {
50 23, 33, 24,
51 91, 99, 48,
52 26, 50, 19
53 };
54
55 tflite::Padding padding = tflite::Padding_SAME;
56
57 ConvolutionTest<float>(tflite::BuiltinOperator_CONV_2D,
58 ::tflite::TensorType_FLOAT32,
59 2, // strideX
60 2, // strideY
61 1, // dilationX
62 1, // dilationY
63 padding,
64 tflite::ActivationFunctionType_NONE,
65 backends,
66 inputShape,
67 filterShape,
68 outputShape,
69 inputValues,
70 filterValues,
71 expectedOutputValues,
72 biasShape,
73 biasValues);
74}
75
76void Conv2DWithBiasesUint8Test(std::vector<armnn::BackendId>& backends)
77{
78 // Set input data
79 std::vector<int32_t> inputShape { 1, 2, 2, 1 };
80 std::vector<int32_t> filterShape { 1, 2, 2, 1 };
81 std::vector<int32_t> biasShape { 1 };
82 std::vector<int32_t> outputShape { 1, 2, 2, 1 };
83
84 static std::vector<uint8_t> inputValues = { 1, 2, 3, 4 };
85
86 std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
87
88 std::vector<int32_t> biasValues = { 10 };
89
90 std::vector<uint8_t> expectedOutputValues =
91 {
92 (1 * 2 + 2 * 1 + 3 * 0 + 4 * 6 + 10) / 2, // 19
93 (2 * 2 + 0 * 1 + 4 * 0 + 0 * 6 + 10) / 2, // 7
94 (3 * 2 + 4 * 1 + 0 * 0 + 0 * 6 + 10) / 2, // 10
95 (4 * 2 + 0 * 1 + 0 * 0 + 0 * 6 + 10) / 2, // 9
96 };
97
98 tflite::Padding padding = tflite::Padding_SAME;
99
100 ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
101 ::tflite::TensorType_UINT8,
102 1, // strideX
103 1, // strideY
104 1, // dilationX
105 1, // dilationY
106 padding,
107 tflite::ActivationFunctionType_NONE,
108 backends,
109 inputShape,
110 filterShape,
111 outputShape,
112 inputValues,
113 filterValues,
114 expectedOutputValues,
115 biasShape,
116 biasValues);
117}
118
119void Conv2DWithBiasesReluUint8Test(std::vector<armnn::BackendId>& backends)
120{
121 // Set input data
122 std::vector<int32_t> inputShape { 1, 2, 2, 1 };
123 std::vector<int32_t> filterShape { 1, 2, 2, 1 };
124 std::vector<int32_t> biasShape { 1 };
125 std::vector<int32_t> outputShape { 1, 2, 2, 1 };
126
127 static std::vector<uint8_t> inputValues = { 1, 2, 4, 8 };
128
129 std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
130
131 std::vector<int32_t> biasValues = { 16 };
132
133 // factors to consider:
134 // - the filter zero point is non zero, hence the (x-fz)
135 // - the output scale is 2 hence the /2
136 // - output zero point is non zero, hence the +outZero
137 // - RELU cuts negative values and then we add the output zero point
138 uint8_t bias = 16;
139 uint8_t outZero = 20;
140 uint8_t fz = 4; // filter zero point
141
142 std::vector<uint8_t> expectedOutputValues =
143 {
144 std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
145 std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
146 std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
147 std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
148 };
149
150 tflite::Padding padding = tflite::Padding_SAME;
151
152 ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
153 ::tflite::TensorType_UINT8,
154 1, // strideX
155 1, // strideY
156 1, // dilationX
157 1, // dilationY
158 padding,
159 tflite::ActivationFunctionType_RELU,
160 backends,
161 inputShape,
162 filterShape,
163 outputShape,
164 inputValues,
165 filterValues,
166 expectedOutputValues,
167 biasShape,
168 biasValues,
169 1, // filter scale
170 4, // filter offset
171 2, // output scale
172 20); // output offset
173}
174
175void Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId>& backends)
176{
177 // Set input data
178 std::vector<int32_t> inputShape { 1, 2, 2, 1 };
179 std::vector<int32_t> filterShape { 1, 2, 2, 1 };
180 std::vector<int32_t> biasShape { 1 };
181 std::vector<int32_t> outputShape { 1, 2, 2, 1 };
182
183 static std::vector<uint8_t> inputValues = { 1, 2, 4, 1 };
184
185 std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
186
187 std::vector<int32_t> biasValues = { 0 };
188
189 // factors to consider:
190 // - the output scale is 2 hence the /2
191 // - RELU6 cuts output values at +6
192 uint8_t relu6Min = 6 / 2; // divide by output scale
193
194 std::vector<uint8_t> expectedOutputValues =
195 {
196 std::min(relu6Min, static_cast<uint8_t>((1 * 2 + 2 * 1 + 4 * 0 + 1 * 6) / 2)),
197 std::min(relu6Min, static_cast<uint8_t>((2 * 2 + 0 * 1 + 1 * 0 + 0 * 6) / 2)),
198 std::min(relu6Min, static_cast<uint8_t>((4 * 2 + 1 * 1 + 0 * 0 + 0 * 6) / 2)),
199 std::min(relu6Min, static_cast<uint8_t>((1 * 2 + 0 * 1 + 0 * 0 + 0 * 6) / 2))
200 };
201
202 tflite::Padding padding = tflite::Padding_SAME;
203
204 ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
205 ::tflite::TensorType_UINT8,
206 1, // strideX
207 1, // strideY
208 1, // dilationX
209 1, // dilationY
210 padding,
211 tflite::ActivationFunctionType_RELU6,
212 backends,
213 inputShape,
214 filterShape,
215 outputShape,
216 inputValues,
217 filterValues,
218 expectedOutputValues,
219 biasShape,
220 biasValues);
221}
222
Jan Eilers187b3a72020-11-19 17:50:34 +0000223TEST_SUITE("Convolution2dTest_CpuRefTests")
Sadik Armagan32ca1442020-11-13 17:51:56 +0000224{
225
226TEST_CASE ("Conv2DWithBiases_Fp32_CpuRef_Test")
227{
228 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
229 Conv2DWithBiasesFp32Test(backends);
230}
231
232TEST_CASE ("Conv2DWithBiases_Uint8_CpuRef_Test")
233{
234 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
235 Conv2DWithBiasesUint8Test(backends);
236}
237
238TEST_CASE ("Conv2DWithBiases_Relu_Uint8_CpuRef_Test")
239{
240 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
241 Conv2DWithBiasesReluUint8Test(backends);
242}
243
244TEST_CASE ("Conv2DWithBiases_Relu6_Uint8_CpuRef_Test")
245{
246 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
247 Conv2DWithBiasesRelu6Uint8Test(backends);
248}
249
250} //End of TEST_SUITE("Convolution2dTest_CpuRef")
251
Jan Eilers187b3a72020-11-19 17:50:34 +0000252TEST_SUITE("Convolution2dTest_CpuAccTests")
Sadik Armagan32ca1442020-11-13 17:51:56 +0000253{
254
255TEST_CASE ("Conv2DWithBiases_Fp32_CpuAcc_Test")
256{
257std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
258Conv2DWithBiasesFp32Test(backends);
259}
260
261TEST_CASE ("Conv2DWithBiases_Uint8_CpuAcc_Test")
262{
263std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
264Conv2DWithBiasesUint8Test(backends);
265}
266
267TEST_CASE ("Conv2DWithBiases_Relu_Uint8_CpuAcc_Test")
268{
269std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
270Conv2DWithBiasesReluUint8Test(backends);
271}
272
273TEST_CASE ("Conv2DWithBiases_Relu6Uint8_CpuAcc_Test")
274{
275std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
276Conv2DWithBiasesRelu6Uint8Test(backends);
277}
278
279} //End of TEST_SUITE("Convolution2dTest_CpuAcc")
280
Jan Eilers187b3a72020-11-19 17:50:34 +0000281TEST_SUITE("Convolution2dTest_GpuAccTests")
Sadik Armagan32ca1442020-11-13 17:51:56 +0000282{
283
284TEST_CASE ("Conv2DWithBiases_Fp32_GpuAcc_Test")
285{
286std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
287Conv2DWithBiasesFp32Test(backends);
288}
289
290TEST_CASE ("Conv2DWithBiases_Uint8_GpuAcc_Test")
291{
292std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
293Conv2DWithBiasesUint8Test(backends);
294}
295
296TEST_CASE ("Conv2DWithBiases_Relu_Uint8_GpuAcc_Test")
297{
298std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
299Conv2DWithBiasesReluUint8Test(backends);
300}
301
302TEST_CASE ("Conv2DWithBiases_Relu_Uint8_GpuAcc_Test")
303{
304std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
305Conv2DWithBiasesRelu6Uint8Test(backends);
306}
307
308} //End of TEST_SUITE("Convolution2dTest_GpuAcc")
309
310void TransposeConvUint8Test(std::vector<armnn::BackendId>& backends)
311{
312 // Set input data
313 std::vector<int32_t> transposeTensorShape { 4 };
314 std::vector<int32_t> filterShape { 1, 2, 2, 1 };
315 std::vector<int32_t> inputShape { 1, 2, 2, 1 };
316 std::vector<int32_t> outputShape { 1, 3, 3, 1 };
317
318 std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
319 static std::vector<uint8_t> inputValues = { 1, 2, 3, 4 };
320 std::vector<uint8_t> filterValues = { 0, 1, 2, 4 };
321 std::vector<uint8_t> expectedOutputValues =
322 {
323 0, 1, 2,
324 2, 11, 12,
325 6, 20, 16
326 };
327
328 tflite::Padding padding = tflite::Padding_VALID;
329 TransposeConvTest<uint8_t>(backends,
330 ::tflite::TensorType_UINT8,
331 1, // strideX
332 1, // strideY
333 padding,
334 transposeTensorShape,
335 filterShape,
336 inputShape,
337 outputShape,
338 transposeData,
339 filterValues,
340 inputValues,
341 expectedOutputValues);
342}
343
344void TransposeConvFp32Test(std::vector<armnn::BackendId>& backends)
345{
346 std::vector<int32_t> transposeTensorShape { 4 };
347 std::vector<int32_t> filterShape { 1, 2, 2, 1 };
348 std::vector<int32_t> inputShape { 1, 2, 2, 1 };
349 std::vector<int32_t> outputShape { 1, 3, 3, 1 };
350
351 std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
352 static std::vector<float> inputValues = { 1, 2, 3, 4 };
353 std::vector<float> filterValues = { 0, 1, 2, 4 };
354 std::vector<float> expectedOutputValues =
355 {
356 0, 1, 2,
357 2, 11, 12,
358 6, 20, 16
359 };
360
361 tflite::Padding padding = tflite::Padding_VALID;
362 TransposeConvTest<float>(backends,
363 ::tflite::TensorType_FLOAT32,
364 1, // strideX
365 1, // strideY
366 padding,
367 transposeTensorShape,
368 filterShape,
369 inputShape,
370 outputShape,
371 transposeData,
372 filterValues,
373 inputValues,
374 expectedOutputValues);
375}
376
377TEST_SUITE("TransposeConv_CpuRef_Test")
378{
379
380TEST_CASE ("TransposeConv_Fp32_Test")
381{
382 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
383 TransposeConvFp32Test(backends);
384}
385
386TEST_CASE ("TransposeConv_Uint8_Test")
387{
388 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
389 TransposeConvUint8Test(backends);
390}
391
392} // End of TEST_SUITE(TransposeConv_CpuRef_Test)
393
394TEST_SUITE("TransposeConv_CpuAcc_Test")
395{
396
397TEST_CASE ("TransposeConv_Fp32_Test")
398{
399std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
400TransposeConvFp32Test(backends);
401}
402
403TEST_CASE ("TransposeConv_Uint8_Test")
404{
405std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
406TransposeConvUint8Test(backends);
407}
408
409} // End of TEST_SUITE(TransposeConv_CpuAcc_Test)
410
411TEST_SUITE("TransposeConv_GpuAcc_Test")
412{
413
414TEST_CASE ("TransposeConv_Fp32_Test")
415{
416std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
417TransposeConvFp32Test(backends);
418}
419
420TEST_CASE ("TransposeConv_Uint8_Test")
421{
422std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
423TransposeConvUint8Test(backends);
424}
425
426} // End of TEST_SUITE(TransposeConv_GpuAcc_Test)
427
428} // namespace armnnDelegate