blob: 309c8a3a964faa07da475ed65a0748403901aa69 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Finn Williamsb49ed182021-06-29 15:50:08 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "ParserFlatbuffersFixture.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01007
telsoa01c577f2c2018-08-31 09:22:23 +01008
Sadik Armagan1625efc2021-06-10 18:24:34 +01009TEST_SUITE("TensorflowLiteParser_DepthwiseConvolution2D")
10{
telsoa01c577f2c2018-08-31 09:22:23 +010011struct DepthwiseConvolution2dFixture : public ParserFlatbuffersFixture
12{
13 explicit DepthwiseConvolution2dFixture(const std::string& inputShape,
14 const std::string& outputShape,
15 const std::string& filterShape,
16 const std::string& filterData,
17 const std::string& strides,
18 const std::string& paddingType,
19 const std::string biasShape = "",
20 const std::string biasData = "")
21 {
22 std::string inputTensors = "[ 0, 2 ]";
23 std::string biasTensor = "";
24 std::string biasBuffer = "";
25 if (biasShape.size() > 0 && biasData.size() > 0)
26 {
27 inputTensors = "[ 0, 2, 3 ]";
28 biasTensor = R"(
29 {
30 "shape": )" + biasShape + R"( ,
31 "type": "INT32",
32 "buffer": 3,
33 "name": "biasTensor",
34 "quantization": {
35 "min": [ 0.0 ],
36 "max": [ 255.0 ],
37 "scale": [ 1.0 ],
38 "zero_point": [ 0 ],
39 }
40 } )";
41 biasBuffer = R"(
42 { "data": )" + biasData + R"(, }, )";
43 }
44 m_JsonString = R"(
45 {
46 "version": 3,
47 "operator_codes": [ { "builtin_code": "DEPTHWISE_CONV_2D" } ],
48 "subgraphs": [ {
49 "tensors": [
50 {
51 "shape": )" + inputShape + R"(,
52 "type": "UINT8",
53 "buffer": 0,
54 "name": "inputTensor",
55 "quantization": {
56 "min": [ 0.0 ],
57 "max": [ 255.0 ],
58 "scale": [ 1.0 ],
59 "zero_point": [ 0 ],
60 }
61 },
62 {
63 "shape": )" + outputShape + R"(,
64 "type": "UINT8",
65 "buffer": 1,
66 "name": "outputTensor",
67 "quantization": {
68 "min": [ 0.0 ],
69 "max": [ 511.0 ],
70 "scale": [ 2.0 ],
71 "zero_point": [ 0 ],
72 }
73 },
74 {
75 "shape": )" + filterShape + R"(,
76 "type": "UINT8",
77 "buffer": 2,
78 "name": "filterTensor",
79 "quantization": {
80 "min": [ 0.0 ],
81 "max": [ 255.0 ],
82 "scale": [ 1.0 ],
83 "zero_point": [ 0 ],
84 }
85 }, )" + biasTensor + R"(
86 ],
87 "inputs": [ 0 ],
88 "outputs": [ 1 ],
89 "operators": [
90 {
91 "opcode_index": 0,
92 "inputs": )" + inputTensors + R"(,
93 "outputs": [ 1 ],
94 "builtin_options_type": "DepthwiseConv2DOptions",
95 "builtin_options": {
96 "padding": ")" + paddingType + R"(",
97 "stride_w": )" + strides+ R"(,
98 "stride_h": )" + strides+ R"(,
99 "depth_multiplier": 1,
100 "fused_activation_function": "NONE"
101 },
102 "custom_options_format": "FLEXBUFFERS"
103 }
104 ],
105 } ],
106 "buffers" : [
107 { },
108 { },
109 { "data": )" + filterData + R"(, }, )"
110 + biasBuffer + R"(
111 ]
112 }
113 )";
114 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
115 }
116};
117
118struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
119{
120 DepthwiseConvolution2dSameFixture()
121 : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
122 "[ 1, 3, 3, 1 ]", // outputShape
123 "[ 1, 3, 3, 1 ]", // filterShape
124 "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
125 "1", // stride w and h
126 "SAME") // padding type
127 {}
128};
129
Sadik Armagan1625efc2021-06-10 18:24:34 +0100130TEST_CASE_FIXTURE(DepthwiseConvolution2dSameFixture, "ParseDepthwiseConv2DSame")
telsoa01c577f2c2018-08-31 09:22:23 +0100131{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000132 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100133 0,
134 { 0, 1, 2,
135 3, 4, 5,
136 6, 7, 8 },
137 // the expected values were generated using the example python implementation at
138 // https://eli.thegreenplace.net/2018/depthwise-separable-convolutions-for-machine-learning/
139 // divide the expected values by the output scale, as it is not 1.0
140 { 14/2, 35/2, 38/2,
141 57/2, 120/2, 111/2,
142 110/2, 197/2, 158/2 });
143}
144
145struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
146{
147 DepthwiseConvolution2dValidFixture ()
148 : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
149 "[ 1, 1, 1, 1 ]", // outputShape
150 "[ 1, 3, 3, 1 ]", // filterShape
151 "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
152 "1", // stride w and h
153 "VALID") // padding type
154 {}
155};
156
Sadik Armagan1625efc2021-06-10 18:24:34 +0100157TEST_CASE_FIXTURE(DepthwiseConvolution2dValidFixture, "ParseDepthwiseConv2DValid")
telsoa01c577f2c2018-08-31 09:22:23 +0100158{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000159 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100160 0,
161 { 0, 1, 2,
162 3, 4, 5,
163 6, 7, 8 },
164 // divide the expected values by the output scale, as it is not 1.0
165 { 120/2 });
166}
167
168struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
169{
170 DepthwiseConvolution2dSameBiasFixture()
171 : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
172 "[ 1, 3, 3, 1 ]", // outputShape
173 "[ 1, 3, 3, 1 ]", // filterShape
174 "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
175 "1", // stride w and h
176 "SAME", // padding type
177 "[ 1 ]", // biasShape
178 "[ 10, 0, 0, 0 ]") // biasData
179 {}
180};
181
Sadik Armagan1625efc2021-06-10 18:24:34 +0100182TEST_CASE_FIXTURE(DepthwiseConvolution2dSameBiasFixture, "ParseDepthwiseConv2DSameBias")
telsoa01c577f2c2018-08-31 09:22:23 +0100183{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000184 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100185 0,
186 { 0, 1, 2,
187 3, 4, 5,
188 6, 7, 8 },
189 // divide the expected values by the output scale, as it is not 1.0
190 { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
191 ( 57+10)/2, (120+10)/2, (111+10)/2,
192 (110+10)/2, (197+10)/2, (158+10)/2 });
193}
194
Sadik Armagand109a4d2020-07-28 10:42:13 +0100195struct DynamicDepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
196{
197 DynamicDepthwiseConvolution2dSameBiasFixture()
198 : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
199 "[ ]", // outputShape
200 "[ 1, 3, 3, 1 ]", // filterShape
201 "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
202 "1", // stride w and h
203 "SAME", // padding type
204 "[ 1 ]", // biasShape
205 "[ 10, 0, 0, 0 ]") // biasData
206 {}
207};
208
Sadik Armagan1625efc2021-06-10 18:24:34 +0100209TEST_CASE_FIXTURE(DynamicDepthwiseConvolution2dSameBiasFixture, "ParseDynamicDepthwiseConv2DSameBias")
Sadik Armagand109a4d2020-07-28 10:42:13 +0100210{
211 RunTest<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(0,
212 { { "inputTensor", { 0, 1, 2,
213 3, 4, 5,
214 6, 7, 8 } } },
215 { { "outputTensor", { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
216 ( 57+10)/2, (120+10)/2, (111+10)/2,
217 (110+10)/2, (197+10)/2, (158+10)/2 } } },
218 true);
219}
220
Jan Eilersf6491492021-04-02 13:06:15 +0100221struct DepthwiseConvolution2dFixture2 : public ParserFlatbuffersFixture
222{
223 explicit DepthwiseConvolution2dFixture2(const std::string& inputShape,
Jan Eilers7612bd62021-04-06 17:29:03 +0100224 const std::string& outputShape,
225 const std::string& filterShape,
226 const std::string& filterData,
227 const std::string& strides,
228 const std::string& paddingType,
229 const std::string biasShape = "",
230 const std::string biasData = "",
231 const std::string filter_quant_min = "[ 0.0 ]",
232 const std::string filter_quant_max = "[ 255.0 ]",
233 const std::string filter_quant_scale = "[ 1.0 ]",
234 const std::string filter_quant_zero_point = "[ 0 ]",
235 const std::string filter_quant_axis = "",
236 const std::string output_scale = "[ 1.0 ]")
Jan Eilersf6491492021-04-02 13:06:15 +0100237 {
238 std::string inputTensors = "[ 0, 2 ]";
239 std::string biasTensor = "";
240 std::string biasBuffer = "";
241 if (biasShape.size() > 0 && biasData.size() > 0)
242 {
243 inputTensors = "[ 0, 2, 3 ]";
244 biasTensor = R"(
245 {
246 "shape": )" + biasShape + R"( ,
247 "type": "INT32",
248 "buffer": 3,
249 "name": "biasTensor",
250 "quantization": {
251 "min": [ 0.0 ],
252 "max": [ 255.0 ],
253 "scale": [ 1.0 ],
254 "zero_point": [ 0 ],
255 }
256 } )";
257 biasBuffer = R"(
258 { "data": )" + biasData + R"(, }, )";
259 }
260
261 std::string filter_qantization =
262 R"(
263 "min": )" + filter_quant_min + R"(,
264 "max": )" + filter_quant_max + R"(,
265 "scale": )" + filter_quant_scale + R"(,
266 "zero_point": )" + filter_quant_zero_point;
267 // A given quantization axis indicates if per channel quantization is used for filters
268 if (filter_quant_axis.size() > 0)
269 {
270 filter_qantization +=
271 R"(,
272 "quantized_dimension": )" + filter_quant_axis;
273 }
274 m_JsonString = R"(
275 {
276 "version": 3,
277 "operator_codes": [ { "builtin_code": "DEPTHWISE_CONV_2D" } ],
278 "subgraphs": [ {
279 "tensors": [
280 {
281 "shape": )" + inputShape + R"(,
282 "type": "INT8",
283 "buffer": 0,
284 "name": "inputTensor",
285 "quantization": {
286 "min": [ 0.0 ],
287 "max": [ 255.0 ],
288 "scale": [ 1.0 ],
289 "zero_point": [ 0 ],
290 }
291 },
292 {
293 "shape": )" + outputShape + R"(,
294 "type": "INT8",
295 "buffer": 1,
296 "name": "outputTensor",
297 "quantization": {
298 "min": [ 0.0 ],
299 "max": [ 511.0 ],
Jan Eilers7612bd62021-04-06 17:29:03 +0100300 "scale": )" + output_scale + R"(,
Jan Eilersf6491492021-04-02 13:06:15 +0100301 "zero_point": [ 0 ],
302 }
303 },
304 {
305 "shape": )" + filterShape + R"(,
306 "type": "INT8",
307 "buffer": 2,
308 "name": "filterTensor",
309 "quantization": {)" + filter_qantization + R"(
310 }
311 }, )" + biasTensor + R"(
312 ],
313 "inputs": [ 0 ],
314 "outputs": [ 1 ],
315 "operators": [
316 {
317 "opcode_index": 0,
318 "inputs": )" + inputTensors + R"(,
319 "outputs": [ 1 ],
320 "builtin_options_type": "DepthwiseConv2DOptions",
321 "builtin_options": {
322 "padding": ")" + paddingType + R"(",
323 "stride_w": )" + strides+ R"(,
324 "stride_h": )" + strides+ R"(,
325 "depth_multiplier": 1,
326 "fused_activation_function": "NONE"
327 },
328 "custom_options_format": "FLEXBUFFERS"
329 }
330 ],
331 } ],
332 "buffers" : [
333 { },
334 { },
335 { "data": )" + filterData + R"(, }, )"
336 + biasBuffer + R"(
337 ]
338 }
339 )";
340 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
341 }
342};
343
344
345// No quantization meaning scale=1.0 and offset=0.0 and tensor quantization
346struct DepthwiseConvolution2dNoQuantFixture : DepthwiseConvolution2dFixture2
347{
348 DepthwiseConvolution2dNoQuantFixture()
349 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
350 "[ 1, 3, 3, 3 ]", // outputShape
351 "[ 1, 3, 3, 3 ]", // filterShape
352 "[ 9,8,7, 6,5,4, 3,2,1, "
353 "9,8,7, 6,5,4, 3,2,1, "
354 "9,8,7, 6,5,4, 3,2,1 ]", // filterData
355 "1", // stride w and h
356 "SAME", // padding type
357 "", // bias shape
358 "" // bias data
359 )
360 {}
361};
362
363// No quantization meaning scale=1.0 and offset=0.0 and tensor quantization
Sadik Armagan1625efc2021-06-10 18:24:34 +0100364TEST_CASE_FIXTURE(DepthwiseConvolution2dNoQuantFixture, "ParseDepthwiseConv2DNoQuant")
Jan Eilersf6491492021-04-02 13:06:15 +0100365{
366 RunTest<4, armnn::DataType::QAsymmS8>(
367 0,
368 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
369 { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
370 36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
371}
372
373// Uses per channel quantization on weights but with scales = 1.0 and offsets = 0.0
374struct DepthwiseConvolution2dNoChannelQuantFixture : DepthwiseConvolution2dFixture2
375{
376 DepthwiseConvolution2dNoChannelQuantFixture()
377 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
378 "[ 1, 3, 3, 3 ]", // outputShape
379 "[ 1, 3, 3, 3 ]", // filterShape
Jan Eilers7612bd62021-04-06 17:29:03 +0100380 "[ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]", //filterData
Jan Eilersf6491492021-04-02 13:06:15 +0100381 "1", // stride w and h
382 "SAME", // padding type
383 "", // bias shape
384 "", // bias data
Jan Eilers7612bd62021-04-06 17:29:03 +0100385 "[ 0.0 ]", // filter quantization min values
Jan Eilersf6491492021-04-02 13:06:15 +0100386 "[ 255.0 ]", // filter quantization max values
387 "[ 1.0, 1.0, 1.0]", // filter quantization scales
388 "[ 0, 0, 0]", // filter quantization zero-points
389 "3" // filter quantized axis
390 // (in case of per channel quantization)
391 )
392 {}
393};
394
395// Uses per channel quantization on weights but with scales = 1.0 and offsets = 0.0
Sadik Armagan1625efc2021-06-10 18:24:34 +0100396TEST_CASE_FIXTURE(DepthwiseConvolution2dNoChannelQuantFixture, "ParseDepthwiseConv2DFilterNoChannelQuant")
Jan Eilersf6491492021-04-02 13:06:15 +0100397{
398 RunTest<4, armnn::DataType::QAsymmS8>(
399 0,
400 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
401 { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
402 36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
403}
404
405// Uses per channel quantization on weights but all scales are set to the same value
406struct DepthwiseConvolution2dWeightsPerChannelQuantFixture : DepthwiseConvolution2dFixture2
407{
408 DepthwiseConvolution2dWeightsPerChannelQuantFixture()
409 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
410 "[ 1, 3, 3, 3 ]", // outputShape
411 "[ 1, 3, 3, 3 ]", // filterShape
412 // filterData is [ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]
413 // quantized per channel with q_dim=3
414 "[36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, "
415 "20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12, 8, 4]",
416 "1", // stride w and h
417 "SAME", // padding type
418 "", // bias shape
419 "", // bias data
420 "[ 0.0 ]", // filter quantization min values
421 "[ 255.0 ]", // filter quantization max values
422 "[ 0.25, 0.25, 0.25]", // filter quantization scales
423 "[ 0, 0, 0]", // filter quantization zero-points
424 "3" // filter quantized axis
425 // (in case of per channel quantization)
426 )
427 {}
428};
429
430// Weights are per channel quantized but all scales are set to the same value
Sadik Armagan1625efc2021-06-10 18:24:34 +0100431TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuantFixture,
432 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant")
Jan Eilersf6491492021-04-02 13:06:15 +0100433{
434 RunTest<4, armnn::DataType::QAsymmS8>(
435 0,
436 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
437 { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
438 36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
439}
440
441// Uses per channel quantization on weights all scales are different in this test
442struct DepthwiseConvolution2dWeightsPerChannelQuant1Fixture : DepthwiseConvolution2dFixture2
443{
444 DepthwiseConvolution2dWeightsPerChannelQuant1Fixture()
445 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
446 "[ 1, 3, 3, 3 ]", // outputShape
447 "[ 1, 3, 3, 3 ]", // filterShape
448 // filterData is [ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]
449 // quantized per channel with q_dim=3
450 "[36, 40, 70, 24, 25, 40, 12, 10, 10, 36, 40, 70, 24, "
451 "25, 40, 12, 10, 10, 36, 40, 70, 24, 25, 40, 12, 10, 10]",
452 "1", // stride w and h
453 "SAME", // padding type
454 "", // bias shape
455 "", // bias data
456 "[ 0.0 ]", // filter quantization min values
457 "[ 255.0 ]", // filter quantization max values
458 "[ 0.25, 0.2, 0.1]", // filter quantization scales
459 "[ 0, 0, 0]", // filter quantization zero-points
460 "3" // filter quantized axis
461 // (in case of per channel quantization)
462 )
463 {}
464};
465
466// Uses per channel quantization on weights all scales are different in this test
Sadik Armagan1625efc2021-06-10 18:24:34 +0100467TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1Fixture,
468 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1")
Jan Eilersf6491492021-04-02 13:06:15 +0100469{
470 RunTest<4, armnn::DataType::QAsymmS8>(
471 0,
472 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
473 { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
474 36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
475}
476
477
478// Uses per channel quantization on weights all scales are different in this test
479// Uses different shape for weights and input compared to the other tests above
480struct DepthwiseConvolution2dWeightsPerChannelQuant2Fixture : DepthwiseConvolution2dFixture2
481{
482 DepthwiseConvolution2dWeightsPerChannelQuant2Fixture()
483 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
484 "[ 1, 4, 4, 4 ]", // outputShape
485 "[ 1, 2, 2, 4 ]", // filterShape
486 // filterData is [ 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3 ]
487 // quantized per channel with q_dim=3
488 "[36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10]",
489 "1", // stride w and h
490 "SAME", // padding type
491 "", // bias shape
492 "", // bias data
493 "[ 0.0 ]", // filter quantization min values
494 "[ 255.0 ]", // filter quantization max values
495 "[ 0.25, 0.2, 0.1, 0.3]", // filter quantization scales
496 "[ 0, 0, 0, 0]", // filter quantization zero-points
497 "3" // filter quantized axis
498 // (in case of per channel quantization)
499 )
500 {}
501};
502
503// Uses per channel quantization on weights all scales are different in this test
504// Uses different shape for weights and input compared to the other tests above
Sadik Armagan1625efc2021-06-10 18:24:34 +0100505TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant2Fixture,
506 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant2")
Jan Eilersf6491492021-04-02 13:06:15 +0100507{
508 RunTest<4, armnn::DataType::QAsymmS8>(
509 0,
510 { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
511 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
512 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
513 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
514 { 21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
515 21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
516 21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
517 14, 12, 10, 8, 14, 12, 10, 8, 14, 12, 10, 8, 9, 8, 7, 6});
518}
519
520// Test for depthwise_multiplier different to one (M > 1)
521struct DepthwiseConvolution2dWeightsPerChannelQuant4Fixture : DepthwiseConvolution2dFixture2
522{
523 DepthwiseConvolution2dWeightsPerChannelQuant4Fixture()
524 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
525 "[ 1, 4, 4, 16 ]", // outputShape
526 "[ 1, 2, 2, 16 ]", // filterShape
527 // filter data is [ 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
528 // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
529 // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
530 // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3 ]
531 // quantized per channel with q_dim=3
532 "[36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
533 "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
534 "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
535 "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10]",
536 "1", // stride w and h
537 "SAME", // padding type
538 "", // bias shape
539 "", // bias data
540 "[ 0.0 ]", // filter quantization min values
541 "[ 255.0 ]", // filter quantization max values
542 "[ 0.25, 0.2, 0.1, 0.3,"
543 "0.25, 0.2, 0.1, 0.3,"
544 "0.25, 0.2, 0.1, 0.3,"
545 "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
546 "[ 0, 0, 0, 0]", // filter quantization zero-points
547 "3" // filter quantized axis
548 // (in case of per channel quantization)
549 )
550 {}
551};
552
553// Test for depthwise_multiplier different to one (M > 1)
Sadik Armagan1625efc2021-06-10 18:24:34 +0100554TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4Fixture,
555 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4")
Jan Eilersf6491492021-04-02 13:06:15 +0100556{
557 RunTest<4, armnn::DataType::QAsymmS8>(
558 0,
559 { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
560 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
561 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
562 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
563 { 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
564 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
565 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
566 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
567 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
568 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
569 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
570 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
571 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
572 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
573 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
574 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
575 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
576 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
577 18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
578 9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3});
579}
580
Jan Eilers7612bd62021-04-06 17:29:03 +0100581
582struct DepthwiseConvolution2dWeightsPerChannelQuant6Fixture : DepthwiseConvolution2dFixture2
583{
584 DepthwiseConvolution2dWeightsPerChannelQuant6Fixture()
585 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
586 "[ 1, 4, 4, 16 ]", // outputShape
587 "[ 1, 2, 2, 16 ]", // filterShape
588 // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
589 // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
590 // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
591 // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0]
592 // quantized per channel with q_dim=3
593 "[12,20,10, 3, 4,15,30, 6, 4,20,30,12, 4,10,20,12,"
594 " 8, 0,30, 3, 0,10,40, 9,16,15, 0, 3,12,20,40, 3,"
595 " 12,15,20, 0, 0, 0,10, 9,12,10,40,12,12, 5,10, 9,"
596 " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
597 "1", // stride w and h
598 "SAME", // padding type
599 "", // bias shape
600 "", // bias data
601 "[ 0.0 ]", // filter quantization min values
602 "[ 255.0 ]", // filter quantization max values
603 "[ 0.25, 0.2, 0.1, 0.333333333,"
604 "0.25, 0.2, 0.1, 0.333333333,"
605 "0.25, 0.2, 0.1, 0.333333333,"
606 "0.25, 0.2, 0.1, 0.333333333]", // filter quantization scales
607 "[ 0, 0, 0, 0]", // filter quantization zero-points
608 "3" // filter quantized axis
609 // (in case of per channel quantization)
610 )
611 {}
612};
613
614
Sadik Armagan1625efc2021-06-10 18:24:34 +0100615TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant6Fixture,
616 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant6")
Jan Eilers7612bd62021-04-06 17:29:03 +0100617{
618 RunTest<4, armnn::DataType::QAsymmS8>(
619 0,
620 { 1,0,1,2,0,4,4,0,2,1,2,0,1,3,3,0,
621 1,2,2,3,3,4,1,1,2,4,1,3,4,2,0,2,
622 0,3,1,3,4,3,2,0,1,2,3,3,0,2,4,2,
623 1,2,1,4,3,4,1,3,1,0,2,3,1,3,2,0},
Jan Eilers53ef7952021-06-02 12:01:25 +0100624 { 9, 7, 3, 7,12, 8,22,22,27,22,13,17,13,10, 9,17,
Jan Eilers7612bd62021-04-06 17:29:03 +0100625 15, 9,12, 6,16,14,24,27,19,26,18,23, 9,10, 7, 3,
626 18,14, 9,11, 7, 9,21,25,17,19,10,15,13, 9, 7, 9,
627 15,16, 9, 1, 3, 9,11,12, 3,12, 9,12, 6, 2, 2, 6,
628 13, 4,10,12,11,14,28,28,17,17,14,15,15,13,13,22,
629 26,24,17, 7,10,20,33,31,23,17,17,16,16,23,20, 7,
630 17,11,16, 6,10,16,24,22,26,18,23,20,22,23,21,23,
631 12,16, 4, 4, 2, 6, 8,10,12, 8,16,16, 8, 6, 6,14,
632 14, 3,14,10,15,15,27,25,16,14, 9,11,21,19,16,24,
633 24,25,13, 7, 3,13,21,24,25,23,14,17,24,24,21,12,
Jan Eilers53ef7952021-06-02 12:01:25 +0100634 7, 7, 3, 3,11,10,17,13,33,32,21,26,18,17,17,23,
635 3, 3, 2, 0, 2, 6, 9,13,10,20,20,24, 2, 4, 4, 8,
636 9, 4,10, 4, 2,14,22,16, 5, 7, 3, 5,13,20,20,19,
Jan Eilers7612bd62021-04-06 17:29:03 +0100637 11,12, 6, 4, 4,12,12, 8, 9,10, 3, 6,12,18,18,15,
Jan Eilers53ef7952021-06-02 12:01:25 +0100638 5, 4, 4, 2, 0, 6,12, 9,10,14, 6,10, 3, 6, 6,12,
639 3, 4, 1, 1, 3, 9, 9, 6, 2, 8, 6, 8, 0, 0, 0, 0});
Jan Eilers7612bd62021-04-06 17:29:03 +0100640}
641
642
643struct DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture : DepthwiseConvolution2dFixture2
644{
645 DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture()
646 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
647 "[ 1, 3, 3, 3 ]", // outputShape
648 "[ 1, 3, 3, 3 ]", // filterShape
649 // filterData is [ 1,4,0,2,4,3,1,0,1,
650 // 3,0,4,0,1,3,4,2,4,
651 // 3,0,3,4,4,0,3,4,2]
652 // quantized per channel with q_dim=3
653 "[ 4,20, 0, 8,20,30, 4, 0,10,12,"
654 " 0,40, 0, 5,30,16,10,40,12, 0,"
655 "30,16,20, 0,12,20,20]",
656 "1", // stride w and h
657 "SAME", // padding type
658 "", // bias shape
659 "", // bias data
660 "[ 0.0 ]", // filter quantization min values
661 "[ 255.0 ]", // filter quantization max values
662 "[ 0.25, 0.2, 0.1]", // filter quantization scales
663 "[ 0, 0, 0]", // filter quantization zero-points
664 "3" // filter quantized axis
665 // (in case of per channel quantization)
666 )
667 {}
668};
669
670
Sadik Armagan1625efc2021-06-10 18:24:34 +0100671TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture,
672 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_1")
Jan Eilers7612bd62021-04-06 17:29:03 +0100673{
674 RunTest<4, armnn::DataType::QAsymmS8>(
675 0,
676 { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
677 { 11,11, 9,17,11,16,10, 5,10,
678 14,15,13,21,19,20,13,13,13,
679 7, 7,11,11,11,15, 6, 9,10});
680}
681
682// Same with input different to 1
683struct DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture : DepthwiseConvolution2dFixture2
684{
685 DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture()
686 : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
687 "[ 1, 3, 3, 3 ]", // outputShape
688 "[ 1, 3, 3, 3 ]", // filterShape
689 // filterData is [ 1,4,0,2,4,3,1,0,1,
690 // 3,0,4,0,1,3,4,2,4,
691 // 3,0,3,4,4,0,3,4,2]
692 // quantized per channel with q_dim=3
693 "[ 4,20, 0, 8,20,30, 4, 0,10,12,"
694 " 0,40, 0, 5,30,16,10,40,12, 0,"
695 "30,16,20, 0,12,20,20]",
696 "1", // stride w and h
697 "SAME", // padding type
698 "", // bias shape
699 "", // bias data
700 "[ 0.0 ]", // filter quantization min values
701 "[ 255.0 ]", // filter quantization max values
702 "[ 0.25, 0.2, 0.1]", // filter quantization scales
703 "[ 0, 0, 0]", // filter quantization zero-points
704 "3" // filter quantized axis
705 // (in case of per channel quantization)
706 )
707 {}
708};
709
710
Sadik Armagan1625efc2021-06-10 18:24:34 +0100711TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture,
712 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_2")
Jan Eilers7612bd62021-04-06 17:29:03 +0100713{
714 RunTest<4, armnn::DataType::QAsymmS8>(
715 0,
716 { 3,2,0,0,4,3,0,1,2,
717 0,1,3,0,4,2,2,2,3,
718 2,4,3,2,0,4,3,4,0},
719 { 0,30,16,15,30,32, 8, 9,24,
720 20,33,28,34,48,50,18,38,35,
721 8, 8,36,20,28,33,10,28,25});
722}
723
724
725struct DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture : DepthwiseConvolution2dFixture2
726{
727 DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture()
728 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
729 "[ 1, 4, 4, 16 ]", // outputShape
730 "[ 1, 2, 2, 16 ]", // filterShape
731 // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
732 // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
733 // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
734 // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
735 // quantized per channel with q_dim=3
736 "[12,20,10, 3, 4,15,30, 6, 4,20,30,13, 4,10,20,13,"
737 " 8, 0,30, 3, 0,10,40,10,16,15, 0, 3,12,20,40, 3,"
738 " 12,15,20, 0, 0, 0,10,10,12,10,40,13,12, 5,10,10,"
739 " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
740 "1", // stride w and h
741 "SAME", // padding type
742 "", // bias shape
743 "", // bias data
744 "[ 0.0 ]", // filter quantization min values
745 "[ 255.0 ]", // filter quantization max values
746 "[ 0.25, 0.2, 0.1, 0.3,"
747 "0.25, 0.2, 0.1, 0.3,"
748 "0.25, 0.2, 0.1, 0.3,"
749 "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
750 "[ 0, 0, 0, 0]", // filter quantization zero-points
751 "3" // filter quantized axis
752 // (in case of per channel quantization)
753 )
754 {}
755};
756
757
Sadik Armagan1625efc2021-06-10 18:24:34 +0100758TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture,
759 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_1")
Jan Eilers7612bd62021-04-06 17:29:03 +0100760{
761 RunTest<4, armnn::DataType::QAsymmS8>(
762 0,
763 { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
764 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
765 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
766 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
767 { 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
768 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
769 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
770 6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
771 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
772 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
773 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
774 6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
775 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
776 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
777 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
778 6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
779 5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
780 5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
781 5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
782 3, 4, 1, 1, 1, 3, 3, 2, 1, 4, 3, 4, 1, 2, 2, 4});
783}
784
785
786
787struct DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture : DepthwiseConvolution2dFixture2
788{
789 DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture()
790 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
791 "[ 1, 4, 4, 16 ]", // outputShape
792 "[ 1, 2, 2, 16 ]", // filterShape
793 // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
794 // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
795 // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
796 // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
797 // quantized per channel with q_dim=3
798 "[12,20,10, 3, 4,15,30, 6, 4,20,30,13, 4,10,20,13,"
799 " 8, 0,30, 3, 0,10,40,10,16,15, 0, 3,12,20,40, 3,"
800 " 12,15,20, 0, 0, 0,10,10,12,10,40,13,12, 5,10,10,"
801 " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
802 "1", // stride w and h
803 "SAME", // padding type
804 "", // bias shape
805 "", // bias data
806 "[ 0.0 ]", // filter quantization min values
807 "[ 255.0 ]", // filter quantization max values
808 "[ 0.25, 0.2, 0.1, 0.3,"
809 "0.25, 0.2, 0.1, 0.3,"
810 "0.25, 0.2, 0.1, 0.3,"
811 "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
812 "[ 0, 0, 0, 0]", // filter quantization zero-points
813 "3" // filter quantized axis
814 // (in case of per channel quantization)
815 )
816 {}
817};
818
819
Sadik Armagan1625efc2021-06-10 18:24:34 +0100820TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture,
821 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_2")
Jan Eilers7612bd62021-04-06 17:29:03 +0100822{
823 RunTest<4, armnn::DataType::QAsymmS8>(
824 0,
825 { 3,3,3,4, 4,4,0,0, 0,3,4,3, 0,2,2,3,
826 3,0,3,0, 0,3,2,1, 4,1,2,2, 0,0,0,4,
827 3,2,2,2, 2,1,0,4, 4,3,2,4, 3,2,0,0,
828 4,1,4,4, 1,0,4,3, 3,2,0,3, 1,1,0,2},
829 { 26,21,21, 7,12,17,28,21,20,22,25,26, 6,11,10,16,
830 16,16, 4,12, 7,18,28,27,30,20,12,14,16,19,17, 6,
831 12,12, 8, 0, 3,13,18,15,18,26,20,26,26,32,28,21,
832 0, 0, 0, 0, 2, 6, 6, 4, 2, 8, 6, 8,15,10,10,24,
833 20,21, 9, 7, 3, 6,15,16,17,22,17,22,17,18,14, 7,
834 18, 6,16,12,12,11,17,15,18,18,10,12,27,26,22,18,
835 27,28,12,10, 7, 3, 8,13, 8,12,14,16,26,24,24,24,
836 9, 9, 6, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 8, 8,16,
837 26,24,17, 7, 2, 8,11,10,30,24,30,28,32,33,30,24,
838 20,11,16,12, 7, 9,17,13,20,14,16,18,31,36,33,29,
839 28,25,19, 9, 6,13,20,19, 2, 8, 6, 8,17,17,15,25,
840 12,15, 5, 3, 2, 6, 7, 7, 0, 0, 0, 0, 6, 2, 2, 6,
841 14,16, 7, 5, 1, 3, 3, 2,20,28,12,20,13,20,20,19,
842 9, 4,10, 4, 0, 4, 8, 6, 4,16,12,16,12,18,18,15,
843 11,12, 6, 4, 2, 8,10, 7, 0, 0, 0, 0, 9,14,14,14,
844 3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8});
845}
846
847
848struct DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture : DepthwiseConvolution2dFixture2
849{
850 DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture()
851 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
852 "[ 1, 4, 4, 16 ]", // outputShape
853 "[ 1, 2, 2, 16 ]", // filterShape
854 // filter data is [ 1, 4, 9, 16, 25, 36,
855 // 49, 64, 81, 100, 121, 144,
856 // 169, 196, 225, 256, 17, 36,
857 // 57, 80, 105, 132, 161, 192,
858 // 225, 260, 297, 336, 377, 420,
859 // 465, 512, 33, 68, 105, 144,
860 // 185, 228, 273, 320, 369, 420,
861 // 473, 528, 585, 644, 705, 768,
862 // 49, 100, 153, 208, 265, 324,
863 // 385, 448, 513, 580, 649, 720,
864 // 793, 868, 945,1024 ]
865 // quantized per channel with q_dim=3
866 "[ 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,"
867 " 17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,"
868 " 33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,"
869 "49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64]",
870 "1", // stride w and h
871 "SAME", // padding type
872 "", // bias shape
873 "", // bias data
874 "[ 0.0 ]", // filter quantization min values
875 "[ 255.0 ]", // filter quantization max values
876 "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16]", // filter quantization scales
877 "[ 0, 0, 0, 0]", // filter quantization zero-points
878 "3", // filter quantized axis
879 // (in case of per channel quantization)
880 "[ 100.0 ]" // output scale
881 )
882 {}
883};
884
885// Test for depthwise_multiplier different to one (M > 1)
Sadik Armagan1625efc2021-06-10 18:24:34 +0100886TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture,
887 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_5")
Jan Eilers7612bd62021-04-06 17:29:03 +0100888{
889 RunTest<4, armnn::DataType::QAsymmS8>(
890 0,
891 { 1,1,1,2,2,2,1,2,1,2,2,1,2,2,1,1,1,1,1,1,1,2,2,2,
892 1,2,2,2,1,1,1,2,1,1,1,1,2,1,2,1,2,1,1,2,1,2,1,1,
893 1,2,2,1,2,2,1,1,2,1,2,1,1,2,1,2},
894 { 1, 2, 3, 5, 9,11,14,16,17,19,21,24,32,36,39,43,
895 1, 2, 3, 4,11,14,17,20,22,26,29,33,34,38,42,46,
896 1, 2, 3, 5, 8,11,13,16,16,18,21,24,33,36,39,43,
897 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6,13,14,16,17,
898 1, 3, 4, 6, 6, 8,10,12,19,22,24,27,23,25,28,30,
899 1, 3, 5, 8, 7, 8,10,12,18,21,24,27,32,36,39,43,
900 1, 2, 4, 5, 8,10,13,15,12,14,16,18,30,33,37,40,
901 0, 0, 1, 1, 3, 4, 5, 7, 4, 5, 5, 6, 9,10,11,12,
902 1, 3, 5, 7,10,12,15,17,17,20,23,25,19,21,23,25,
903 2, 4, 6, 8, 7, 9,11,13,17,20,23,25,23,25,28,30,
904 1, 2, 4, 6, 9,11,14,16,15,17,20,22,28,31,35,38,
905 0, 0, 1, 1, 4, 5, 6, 7, 4, 5, 5, 6,13,14,16,17,
906 0, 0, 1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 5, 6, 6, 7,
907 0, 0, 1, 1, 1, 2, 2, 3, 5, 6, 7, 8, 5, 6, 6, 7,
908 0, 0, 0, 1, 2, 3, 3, 4, 3, 4, 5, 6, 9,10,11,12,
909 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 3, 3, 4, 5});
910}
911
912
913struct DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture : DepthwiseConvolution2dFixture2
914{
915 DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture()
916 : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
917 "[ 1, 4, 4, 16 ]", // outputShape
918 "[ 1, 2, 2, 16 ]", // filterShape
919 // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
920 // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
921 // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
922 // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
923 // quantized per channel with q_dim=3
924 "[12,20,10, 3, 2,24, 9,10, 5,16,30,12, 3,10, 4,32,"
925 " 8, 0,30, 3, 0,16,12,15,20,12, 0, 3, 9,20, 8, 8,"
926 " 12,15,20, 0, 0, 0, 3,15,15, 8,40,12, 9, 5, 2,24,"
927 " 4, 0, 0, 6, 6, 0, 3, 5,20, 8,20, 3, 6,15, 4, 0]",
928 "1", // stride w and h
929 "SAME", // padding type
930 "", // bias shape
931 "", // bias data
932 "[ 0.0 ]", // filter quantization min values
933 "[ 255.0 ]", // filter quantization max values
934 "[0.25, 0.2, 0.1, 0.3333333333, "
935 "0.5, 0.125, 0.33333333, 0.2, "
936 "0.2, 0.25, 0.1, 0.333333333, "
937 "0.3333333333, 0.2, 0.5, 0.125]", // filter quantization scales
938 "[ 0, 0, 0, 0]", // filter quantization zero-points
939 "3" // filter quantized axis
940 // (in case of per channel quantization)
941 )
942 {}
943};
944
945// Test for depthwise_multiplier different to one (M > 1)
Sadik Armagan1625efc2021-06-10 18:24:34 +0100946TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture,
947 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_1")
Jan Eilers7612bd62021-04-06 17:29:03 +0100948{
949 RunTest<4, armnn::DataType::QAsymmS8>(
950 0,
951 { 3,3,3,4, 4,4,0,0, 0,3,4,3, 0,2,2,3,
952 3,0,3,0, 0,3,2,1, 4,1,2,2, 0,0,0,4,
953 3,2,2,2, 2,1,0,4, 4,3,2,4, 3,2,0,0,
954 4,1,4,4, 1,0,4,3, 3,2,0,3, 1,1,0,2},
955 { 26,21,21, 7,12,17,28,21,20,22,25,26, 6,11,10,16,
956 16,16, 4,12, 7,18,28,27,30,20,12,14,16,19,17, 6,
957 12,12, 8, 0, 3,13,18,15,18,26,20,26,26,32,28,21,
958 0, 0, 0, 0, 2, 6, 6, 4, 2, 8, 6, 8,15,10,10,24,
959 20,21, 9, 7, 3, 6,15,16,17,22,17,22,17,18,14, 7,
960 18, 6,16,12,12,11,17,15,18,18,10,12,27,26,22,18,
961 27,28,12,10, 7, 3, 8,13, 8,12,14,16,26,24,24,24,
962 9, 9, 6, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 8, 8,16,
963 26,24,17, 7, 2, 8,11,10,30,24,30,28,32,33,30,24,
964 20,11,16,12, 7, 9,17,13,20,14,16,18,31,36,33,29,
965 28,25,19, 9, 6,13,20,19, 2, 8, 6, 8,17,17,15,25,
966 12,15, 5, 3, 2, 6, 7, 7, 0, 0, 0, 0, 6, 2, 2, 6,
967 14,16, 7, 5, 1, 3, 3, 2,20,28,12,20,13,20,20,19,
968 9, 4,10, 4, 0, 4, 8, 6, 4,16,12,16,12,18,18,15,
969 11,12, 6, 4, 2, 8,10, 7, 0, 0, 0, 0, 9,14,14,14,
970 3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8});
971}
972
Jan Eilers53ef7952021-06-02 12:01:25 +0100973struct DepthwiseConvolution2dWeightsPerChannelQuant4_3_2Fixture : DepthwiseConvolution2dFixture2
974{
975 DepthwiseConvolution2dWeightsPerChannelQuant4_3_2Fixture()
976 : DepthwiseConvolution2dFixture2("[ 1, 2, 2, 2 ]", // inputShape
977 "[ 1, 2, 2, 4 ]", // outputShape
978 "[ 1, 3, 3, 4 ]", // filterShape
979 // filter data is [ 0,1,2,3,4,5,6,7,8,
980 // 0,1,2,3,4,5,6,7,8,
981 // 0,1,2,3,4,5,6,7,8,
982 // 0,1,2,3,4,5,6,7,8 ]
983 // quantized per channel with q_dim=3
984 "[0, 5,20, 9,16,25,60,21,32,"
985 " 0,10, 6,12,20,50,18,28,40,"
986 " 0, 3, 8,15,40,15,24,35,80,"
987 " 0, 4,10,30,12,20,30,70,24]",
988 "1", // stride w and h
989 "SAME", // padding type
990 "", // bias shape
991 "", // bias data
992 "[ 0.0 ]", // filter quantization min values
993 "[ 255.0 ]", // filter quantization max values
994 "[0.25, 0.2, 0.1, 0.3333333333]", // filter quantization scales
995 "[ 0, 0, 0, 0]", // filter quantization zero-points
996 "3" // filter quantized axis
997 // (in case of per channel quantization)
998 )
999 {}
1000};
1001
1002// An easy test with M > 1 for debugging
1003TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_3_2Fixture,
1004 "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_2")
1005{
1006 RunTest<4, armnn::DataType::QAsymmS8>(
1007 0,
1008 { 0,1,2,3,4,5,6,7},
1009 { 38,50,76,92,44,56,66,37,56,50,37,53,62,74,45,61});
Sadik Armagan1625efc2021-06-10 18:24:34 +01001010}
Jan Eilers53ef7952021-06-02 12:01:25 +01001011
1012} // end of TEST_SUITE("TensorflowLiteParser_DepthwiseConvolution2D")